summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /arch/powerpc/include
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/8xx_immap.h564
-rw-r--r--arch/powerpc/include/asm/Kbuild10
-rw-r--r--arch/powerpc/include/asm/accounting.h32
-rw-r--r--arch/powerpc/include/asm/agp.h19
-rw-r--r--arch/powerpc/include/asm/archrandom.h16
-rw-r--r--arch/powerpc/include/asm/asm-compat.h64
-rw-r--r--arch/powerpc/include/asm/asm-const.h15
-rw-r--r--arch/powerpc/include/asm/asm-offsets.h1
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h75
-rw-r--r--arch/powerpc/include/asm/async_tx.h31
-rw-r--r--arch/powerpc/include/asm/atomic.h489
-rw-r--r--arch/powerpc/include/asm/backlight.h42
-rw-r--r--arch/powerpc/include/asm/barrier.h119
-rw-r--r--arch/powerpc/include/asm/bitops.h346
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h190
-rw-r--r--arch/powerpc/include/asm/book3s/32/mmu-hash.h236
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgalloc.h74
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h620
-rw-r--r--arch/powerpc/include/asm/book3s/32/tlbflush.h82
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h172
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h296
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-pkey.h45
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h262
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h135
-rw-r--r--arch/powerpc/include/asm/book3s/64/kexec.h28
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup.h427
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h885
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h295
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h183
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-4k.h67
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-64k.h63
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h1444
-rw-r--r--arch/powerpc/include/asm/book3s/64/pkeys.h25
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix-4k.h22
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix-64k.h23
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h329
-rw-r--r--arch/powerpc/include/asm/book3s/64/slice.h42
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-hash.h129
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h97
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h235
-rw-r--r--arch/powerpc/include/asm/book3s/pgalloc.h15
-rw-r--r--arch/powerpc/include/asm/book3s/pgtable.h49
-rw-r--r--arch/powerpc/include/asm/book3s/tlbflush.h11
-rw-r--r--arch/powerpc/include/asm/bootx.h51
-rw-r--r--arch/powerpc/include/asm/bpf_perf_event.h9
-rw-r--r--arch/powerpc/include/asm/btext.h34
-rw-r--r--arch/powerpc/include/asm/bug.h175
-rw-r--r--arch/powerpc/include/asm/cache.h146
-rw-r--r--arch/powerpc/include/asm/cacheflush.h130
-rw-r--r--arch/powerpc/include/asm/cell-pmu.h94
-rw-r--r--arch/powerpc/include/asm/cell-regs.h327
-rw-r--r--arch/powerpc/include/asm/checksum.h221
-rw-r--r--arch/powerpc/include/asm/clocksource.h7
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h531
-rw-r--r--arch/powerpc/include/asm/code-patching-asm.h18
-rw-r--r--arch/powerpc/include/asm/code-patching.h239
-rw-r--r--arch/powerpc/include/asm/compat.h114
-rw-r--r--arch/powerpc/include/asm/context_tracking.h11
-rw-r--r--arch/powerpc/include/asm/copro.h27
-rw-r--r--arch/powerpc/include/asm/cpm.h1
-rw-r--r--arch/powerpc/include/asm/cpm1.h612
-rw-r--r--arch/powerpc/include/asm/cpm2.h1149
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h56
-rw-r--r--arch/powerpc/include/asm/cpu_setup.h49
-rw-r--r--arch/powerpc/include/asm/cpufeature.h36
-rw-r--r--arch/powerpc/include/asm/cpuidle.h105
-rw-r--r--arch/powerpc/include/asm/cputable.h607
-rw-r--r--arch/powerpc/include/asm/cputhreads.h115
-rw-r--r--arch/powerpc/include/asm/cputime.h115
-rw-r--r--arch/powerpc/include/asm/crashdump-ppc64.h19
-rw-r--r--arch/powerpc/include/asm/current.h38
-rw-r--r--arch/powerpc/include/asm/dbdma.h109
-rw-r--r--arch/powerpc/include/asm/dbell.h161
-rw-r--r--arch/powerpc/include/asm/dcr-generic.h36
-rw-r--r--arch/powerpc/include/asm/dcr-mmio.h44
-rw-r--r--arch/powerpc/include/asm/dcr-native.h144
-rw-r--r--arch/powerpc/include/asm/dcr-regs.h184
-rw-r--r--arch/powerpc/include/asm/dcr.h65
-rw-r--r--arch/powerpc/include/asm/debug.h55
-rw-r--r--arch/powerpc/include/asm/delay.h76
-rw-r--r--arch/powerpc/include/asm/device.h58
-rw-r--r--arch/powerpc/include/asm/disassemble.h117
-rw-r--r--arch/powerpc/include/asm/dma-direct.h14
-rw-r--r--arch/powerpc/include/asm/dma.h344
-rw-r--r--arch/powerpc/include/asm/drmem.h125
-rw-r--r--arch/powerpc/include/asm/dt_cpu_ftrs.h25
-rw-r--r--arch/powerpc/include/asm/dtl.h44
-rw-r--r--arch/powerpc/include/asm/edac.h40
-rw-r--r--arch/powerpc/include/asm/eeh.h461
-rw-r--r--arch/powerpc/include/asm/eeh_event.h30
-rw-r--r--arch/powerpc/include/asm/ehv_pic.h40
-rw-r--r--arch/powerpc/include/asm/elf.h197
-rw-r--r--arch/powerpc/include/asm/elfnote.h24
-rw-r--r--arch/powerpc/include/asm/emergency-restart.h1
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h84
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h575
-rw-r--r--arch/powerpc/include/asm/exception-64e.h172
-rw-r--r--arch/powerpc/include/asm/exception-64s.h178
-rw-r--r--arch/powerpc/include/asm/exec.h10
-rw-r--r--arch/powerpc/include/asm/extable.h44
-rw-r--r--arch/powerpc/include/asm/fadump-internal.h166
-rw-r--r--arch/powerpc/include/asm/fadump.h35
-rw-r--r--arch/powerpc/include/asm/fb.h22
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h298
-rw-r--r--arch/powerpc/include/asm/firmware.h149
-rw-r--r--arch/powerpc/include/asm/fixmap.h123
-rw-r--r--arch/powerpc/include/asm/floppy.h214
-rw-r--r--arch/powerpc/include/asm/fs_pd.h49
-rw-r--r--arch/powerpc/include/asm/fsl_gtm.h43
-rw-r--r--arch/powerpc/include/asm/fsl_hcalls.h655
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h296
-rw-r--r--arch/powerpc/include/asm/fsl_pamu_stash.h21
-rw-r--r--arch/powerpc/include/asm/fsl_pm.h47
-rw-r--r--arch/powerpc/include/asm/ftrace.h111
-rw-r--r--arch/powerpc/include/asm/futex.h101
-rw-r--r--arch/powerpc/include/asm/grackle.h13
-rw-r--r--arch/powerpc/include/asm/hardirq.h38
-rw-r--r--arch/powerpc/include/asm/head-64.h172
-rw-r--r--arch/powerpc/include/asm/heathrow.h68
-rw-r--r--arch/powerpc/include/asm/highmem.h70
-rw-r--r--arch/powerpc/include/asm/hmi.h37
-rw-r--r--arch/powerpc/include/asm/hugetlb.h87
-rw-r--r--arch/powerpc/include/asm/hvcall.h672
-rw-r--r--arch/powerpc/include/asm/hvconsole.h31
-rw-r--r--arch/powerpc/include/asm/hvcserver.h46
-rw-r--r--arch/powerpc/include/asm/hvsi.h95
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h127
-rw-r--r--arch/powerpc/include/asm/hw_irq.h521
-rw-r--r--arch/powerpc/include/asm/hydra.h99
-rw-r--r--arch/powerpc/include/asm/i8259.h13
-rw-r--r--arch/powerpc/include/asm/ibmebus.h60
-rw-r--r--arch/powerpc/include/asm/icswx.h204
-rw-r--r--arch/powerpc/include/asm/ide.h18
-rw-r--r--arch/powerpc/include/asm/idle.h93
-rw-r--r--arch/powerpc/include/asm/imc-pmu.h172
-rw-r--r--arch/powerpc/include/asm/immap_cpm2.h648
-rw-r--r--arch/powerpc/include/asm/inst.h170
-rw-r--r--arch/powerpc/include/asm/interrupt.h689
-rw-r--r--arch/powerpc/include/asm/io-defs.h61
-rw-r--r--arch/powerpc/include/asm/io-workarounds.h55
-rw-r--r--arch/powerpc/include/asm/io.h1026
-rw-r--r--arch/powerpc/include/asm/io_event_irq.h50
-rw-r--r--arch/powerpc/include/asm/iommu.h321
-rw-r--r--arch/powerpc/include/asm/ipic.h76
-rw-r--r--arch/powerpc/include/asm/irq.h61
-rw-r--r--arch/powerpc/include/asm/irq_work.h11
-rw-r--r--arch/powerpc/include/asm/irqflags.h74
-rw-r--r--arch/powerpc/include/asm/isa-bridge.h30
-rw-r--r--arch/powerpc/include/asm/jump_label.h57
-rw-r--r--arch/powerpc/include/asm/kasan.h84
-rw-r--r--arch/powerpc/include/asm/kdebug.h16
-rw-r--r--arch/powerpc/include/asm/kdump.h47
-rw-r--r--arch/powerpc/include/asm/kexec.h197
-rw-r--r--arch/powerpc/include/asm/kexec_ranges.h25
-rw-r--r--arch/powerpc/include/asm/keylargo.h262
-rw-r--r--arch/powerpc/include/asm/kfence.h48
-rw-r--r--arch/powerpc/include/asm/kgdb.h67
-rw-r--r--arch/powerpc/include/asm/kprobes.h97
-rw-r--r--arch/powerpc/include/asm/kup.h215
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h158
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h486
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_32.h36
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h682
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h160
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_uvmem.h100
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h108
-rw-r--r--arch/powerpc/include/asm/kvm_booke_hv_asm.h68
-rw-r--r--arch/powerpc/include/asm/kvm_fpu.h77
-rw-r--r--arch/powerpc/include/asm/kvm_guest.h25
-rw-r--r--arch/powerpc/include/asm/kvm_host.h889
-rw-r--r--arch/powerpc/include/asm/kvm_para.h43
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h1040
-rw-r--r--arch/powerpc/include/asm/libata-portmap.h9
-rw-r--r--arch/powerpc/include/asm/linkage.h16
-rw-r--r--arch/powerpc/include/asm/livepatch.h23
-rw-r--r--arch/powerpc/include/asm/local.h148
-rw-r--r--arch/powerpc/include/asm/lppaca.h167
-rw-r--r--arch/powerpc/include/asm/lv1call.h337
-rw-r--r--arch/powerpc/include/asm/machdep.h261
-rw-r--r--arch/powerpc/include/asm/macio.h145
-rw-r--r--arch/powerpc/include/asm/mc146818rtc.h32
-rw-r--r--arch/powerpc/include/asm/mce.h268
-rw-r--r--arch/powerpc/include/asm/mediabay.h49
-rw-r--r--arch/powerpc/include/asm/mem_encrypt.h21
-rw-r--r--arch/powerpc/include/asm/membarrier.h28
-rw-r--r--arch/powerpc/include/asm/mman.h44
-rw-r--r--arch/powerpc/include/asm/mmiowb.h18
-rw-r--r--arch/powerpc/include/asm/mmu.h421
-rw-r--r--arch/powerpc/include/asm/mmu_context.h303
-rw-r--r--arch/powerpc/include/asm/mmzone.h55
-rw-r--r--arch/powerpc/include/asm/module.h85
-rw-r--r--arch/powerpc/include/asm/module.lds.h8
-rw-r--r--arch/powerpc/include/asm/mpc5121.h120
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h365
-rw-r--r--arch/powerpc/include/asm/mpc52xx_psc.h352
-rw-r--r--arch/powerpc/include/asm/mpc5xxx.h24
-rw-r--r--arch/powerpc/include/asm/mpc6xx.h7
-rw-r--r--arch/powerpc/include/asm/mpc8260.h26
-rw-r--r--arch/powerpc/include/asm/mpc85xx.h91
-rw-r--r--arch/powerpc/include/asm/mpic.h497
-rw-r--r--arch/powerpc/include/asm/mpic_msgr.h129
-rw-r--r--arch/powerpc/include/asm/mpic_timer.h42
-rw-r--r--arch/powerpc/include/asm/msi_bitmap.h31
-rw-r--r--arch/powerpc/include/asm/nmi.h22
-rw-r--r--arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h81
-rw-r--r--arch/powerpc/include/asm/nohash/32/kup-8xx.h91
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-40x.h68
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-44x.h156
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h275
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgalloc.h35
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h371
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-40x.h85
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-44x.h128
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-85xx.h74
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h193
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgalloc.h67
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-4k.h93
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h310
-rw-r--r--arch/powerpc/include/asm/nohash/hugetlb-e500.h45
-rw-r--r--arch/powerpc/include/asm/nohash/kup-booke.h110
-rw-r--r--arch/powerpc/include/asm/nohash/mmu-e500.h324
-rw-r--r--arch/powerpc/include/asm/nohash/mmu.h19
-rw-r--r--arch/powerpc/include/asm/nohash/pgalloc.h72
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h279
-rw-r--r--arch/powerpc/include/asm/nohash/pte-e500.h129
-rw-r--r--arch/powerpc/include/asm/nohash/tlbflush.h77
-rw-r--r--arch/powerpc/include/asm/nvram.h94
-rw-r--r--arch/powerpc/include/asm/ohare.h55
-rw-r--r--arch/powerpc/include/asm/opal-api.h1188
-rw-r--r--arch/powerpc/include/asm/opal.h396
-rw-r--r--arch/powerpc/include/asm/paca.h300
-rw-r--r--arch/powerpc/include/asm/page.h334
-rw-r--r--arch/powerpc/include/asm/page_32.h62
-rw-r--r--arch/powerpc/include/asm/page_64.h106
-rw-r--r--arch/powerpc/include/asm/paravirt.h165
-rw-r--r--arch/powerpc/include/asm/paravirt_api_clock.h2
-rw-r--r--arch/powerpc/include/asm/parport.h43
-rw-r--r--arch/powerpc/include/asm/pasemi_dma.h526
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h296
-rw-r--r--arch/powerpc/include/asm/pci.h120
-rw-r--r--arch/powerpc/include/asm/percpu.h22
-rw-r--r--arch/powerpc/include/asm/perf_event.h48
-rw-r--r--arch/powerpc/include/asm/perf_event_fsl_emb.h46
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h184
-rw-r--r--arch/powerpc/include/asm/pgalloc.h73
-rw-r--r--arch/powerpc/include/asm/pgtable-be-types.h114
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h95
-rw-r--r--arch/powerpc/include/asm/pgtable.h182
-rw-r--r--arch/powerpc/include/asm/pkeys.h172
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h412
-rw-r--r--arch/powerpc/include/asm/pmac_feature.h417
-rw-r--r--arch/powerpc/include/asm/pmac_low_i2c.h98
-rw-r--r--arch/powerpc/include/asm/pmac_pfunc.h254
-rw-r--r--arch/powerpc/include/asm/pmc.h53
-rw-r--r--arch/powerpc/include/asm/pmi.h53
-rw-r--r--arch/powerpc/include/asm/pnv-ocxl.h85
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h78
-rw-r--r--arch/powerpc/include/asm/powernv.h21
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h690
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h67
-rw-r--r--arch/powerpc/include/asm/ppc4xx.h13
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h821
-rw-r--r--arch/powerpc/include/asm/probes.h90
-rw-r--r--arch/powerpc/include/asm/processor.h451
-rw-r--r--arch/powerpc/include/asm/prom.h177
-rw-r--r--arch/powerpc/include/asm/ps3.h521
-rw-r--r--arch/powerpc/include/asm/ps3av.h729
-rw-r--r--arch/powerpc/include/asm/ps3gpu.h74
-rw-r--r--arch/powerpc/include/asm/ps3stor.h59
-rw-r--r--arch/powerpc/include/asm/pte-walk.h88
-rw-r--r--arch/powerpc/include/asm/ptrace.h398
-rw-r--r--arch/powerpc/include/asm/qspinlock.h84
-rw-r--r--arch/powerpc/include/asm/qspinlock_paravirt.h7
-rw-r--r--arch/powerpc/include/asm/reg.h1466
-rw-r--r--arch/powerpc/include/asm/reg_8xx.h83
-rw-r--r--arch/powerpc/include/asm/reg_a2.h154
-rw-r--r--arch/powerpc/include/asm/reg_booke.h700
-rw-r--r--arch/powerpc/include/asm/reg_fsl_emb.h89
-rw-r--r--arch/powerpc/include/asm/rheap.h92
-rw-r--r--arch/powerpc/include/asm/rio.h18
-rw-r--r--arch/powerpc/include/asm/rtas-types.h116
-rw-r--r--arch/powerpc/include/asm/rtas.h382
-rw-r--r--arch/powerpc/include/asm/runlatch.h44
-rw-r--r--arch/powerpc/include/asm/seccomp.h34
-rw-r--r--arch/powerpc/include/asm/sections.h75
-rw-r--r--arch/powerpc/include/asm/secure_boot.h29
-rw-r--r--arch/powerpc/include/asm/security_features.h113
-rw-r--r--arch/powerpc/include/asm/secvar.h35
-rw-r--r--arch/powerpc/include/asm/serial.h21
-rw-r--r--arch/powerpc/include/asm/set_memory.h44
-rw-r--r--arch/powerpc/include/asm/setjmp.h15
-rw-r--r--arch/powerpc/include/asm/setup.h96
-rw-r--r--arch/powerpc/include/asm/sfp-machine.h343
-rw-r--r--arch/powerpc/include/asm/shmparam.h7
-rw-r--r--arch/powerpc/include/asm/signal.h17
-rw-r--r--arch/powerpc/include/asm/simple_spinlock.h266
-rw-r--r--arch/powerpc/include/asm/simple_spinlock_types.h21
-rw-r--r--arch/powerpc/include/asm/smp.h271
-rw-r--r--arch/powerpc/include/asm/smu.h694
-rw-r--r--arch/powerpc/include/asm/sparsemem.h30
-rw-r--r--arch/powerpc/include/asm/spinlock.h21
-rw-r--r--arch/powerpc/include/asm/spinlock_types.h16
-rw-r--r--arch/powerpc/include/asm/spu.h679
-rw-r--r--arch/powerpc/include/asm/spu_csa.h247
-rw-r--r--arch/powerpc/include/asm/spu_info.h15
-rw-r--r--arch/powerpc/include/asm/spu_priv1.h224
-rw-r--r--arch/powerpc/include/asm/sstep.h181
-rw-r--r--arch/powerpc/include/asm/stackprotector.h38
-rw-r--r--arch/powerpc/include/asm/stacktrace.h13
-rw-r--r--arch/powerpc/include/asm/static_call.h29
-rw-r--r--arch/powerpc/include/asm/string.h85
-rw-r--r--arch/powerpc/include/asm/svm.h33
-rw-r--r--arch/powerpc/include/asm/swab.h9
-rw-r--r--arch/powerpc/include/asm/swiotlb.h20
-rw-r--r--arch/powerpc/include/asm/switch_to.h129
-rw-r--r--arch/powerpc/include/asm/synch.h73
-rw-r--r--arch/powerpc/include/asm/syscall.h122
-rw-r--r--arch/powerpc/include/asm/syscall_wrapper.h49
-rw-r--r--arch/powerpc/include/asm/syscalls.h161
-rw-r--r--arch/powerpc/include/asm/syscalls_32.h60
-rw-r--r--arch/powerpc/include/asm/task_size_32.h21
-rw-r--r--arch/powerpc/include/asm/task_size_64.h83
-rw-r--r--arch/powerpc/include/asm/tce.h30
-rw-r--r--arch/powerpc/include/asm/thread_info.h193
-rw-r--r--arch/powerpc/include/asm/time.h124
-rw-r--r--arch/powerpc/include/asm/timex.h25
-rw-r--r--arch/powerpc/include/asm/tlb.h91
-rw-r--r--arch/powerpc/include/asm/tlbflush.h11
-rw-r--r--arch/powerpc/include/asm/tm.h22
-rw-r--r--arch/powerpc/include/asm/topology.h147
-rw-r--r--arch/powerpc/include/asm/trace.h243
-rw-r--r--arch/powerpc/include/asm/trace_clock.h17
-rw-r--r--arch/powerpc/include/asm/tsi108.h113
-rw-r--r--arch/powerpc/include/asm/tsi108_irq.h110
-rw-r--r--arch/powerpc/include/asm/tsi108_pci.h30
-rw-r--r--arch/powerpc/include/asm/types.h20
-rw-r--r--arch/powerpc/include/asm/uaccess.h471
-rw-r--r--arch/powerpc/include/asm/udbg.h58
-rw-r--r--arch/powerpc/include/asm/uic.h17
-rw-r--r--arch/powerpc/include/asm/ultravisor-api.h39
-rw-r--r--arch/powerpc/include/asm/ultravisor.h85
-rw-r--r--arch/powerpc/include/asm/uninorth.h230
-rw-r--r--arch/powerpc/include/asm/unistd.h57
-rw-r--r--arch/powerpc/include/asm/uprobes.h35
-rw-r--r--arch/powerpc/include/asm/user.h47
-rw-r--r--arch/powerpc/include/asm/vas.h294
-rw-r--r--arch/powerpc/include/asm/vdso.h52
-rw-r--r--arch/powerpc/include/asm/vdso/clocksource.h7
-rw-r--r--arch/powerpc/include/asm/vdso/gettimeofday.h160
-rw-r--r--arch/powerpc/include/asm/vdso/processor.h38
-rw-r--r--arch/powerpc/include/asm/vdso/timebase.h73
-rw-r--r--arch/powerpc/include/asm/vdso/vsyscall.h25
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h117
-rw-r--r--arch/powerpc/include/asm/vermagic.h20
-rw-r--r--arch/powerpc/include/asm/vga.h60
-rw-r--r--arch/powerpc/include/asm/vio.h170
-rw-r--r--arch/powerpc/include/asm/vmalloc.h24
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h206
-rw-r--r--arch/powerpc/include/asm/xics.h178
-rw-r--r--arch/powerpc/include/asm/xive-regs.h134
-rw-r--r--arch/powerpc/include/asm/xive.h169
-rw-r--r--arch/powerpc/include/asm/xmon.h31
-rw-r--r--arch/powerpc/include/asm/xor.h47
-rw-r--r--arch/powerpc/include/asm/xor_altivec.h22
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild3
-rw-r--r--arch/powerpc/include/uapi/asm/auxvec.h55
-rw-r--r--arch/powerpc/include/uapi/asm/bitsperlong.h13
-rw-r--r--arch/powerpc/include/uapi/asm/bootx.h133
-rw-r--r--arch/powerpc/include/uapi/asm/byteorder.h17
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h63
-rw-r--r--arch/powerpc/include/uapi/asm/eeh.h57
-rw-r--r--arch/powerpc/include/uapi/asm/elf.h292
-rw-r--r--arch/powerpc/include/uapi/asm/epapr_hcalls.h99
-rw-r--r--arch/powerpc/include/uapi/asm/errno.h11
-rw-r--r--arch/powerpc/include/uapi/asm/fcntl.h12
-rw-r--r--arch/powerpc/include/uapi/asm/ioctl.h14
-rw-r--r--arch/powerpc/include/uapi/asm/ioctls.h123
-rw-r--r--arch/powerpc/include/uapi/asm/ipcbuf.h35
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h736
-rw-r--r--arch/powerpc/include/uapi/asm/kvm_para.h98
-rw-r--r--arch/powerpc/include/uapi/asm/mman.h35
-rw-r--r--arch/powerpc/include/uapi/asm/msgbuf.h36
-rw-r--r--arch/powerpc/include/uapi/asm/nvram.h63
-rw-r--r--arch/powerpc/include/uapi/asm/opal-prd.h59
-rw-r--r--arch/powerpc/include/uapi/asm/papr_pdsm.h165
-rw-r--r--arch/powerpc/include/uapi/asm/perf_event.h19
-rw-r--r--arch/powerpc/include/uapi/asm/perf_regs.h95
-rw-r--r--arch/powerpc/include/uapi/asm/posix_types.h21
-rw-r--r--arch/powerpc/include/uapi/asm/ps3fb.h46
-rw-r--r--arch/powerpc/include/uapi/asm/ptrace.h272
-rw-r--r--arch/powerpc/include/uapi/asm/sembuf.h39
-rw-r--r--arch/powerpc/include/uapi/asm/setup.h7
-rw-r--r--arch/powerpc/include/uapi/asm/shmbuf.h60
-rw-r--r--arch/powerpc/include/uapi/asm/sigcontext.h92
-rw-r--r--arch/powerpc/include/uapi/asm/signal.h119
-rw-r--r--arch/powerpc/include/uapi/asm/socket.h21
-rw-r--r--arch/powerpc/include/uapi/asm/spu_info.h40
-rw-r--r--arch/powerpc/include/uapi/asm/stat.h82
-rw-r--r--arch/powerpc/include/uapi/asm/swab.h24
-rw-r--r--arch/powerpc/include/uapi/asm/termbits.h157
-rw-r--r--arch/powerpc/include/uapi/asm/termios.h77
-rw-r--r--arch/powerpc/include/uapi/asm/tm.h21
-rw-r--r--arch/powerpc/include/uapi/asm/types.h41
-rw-r--r--arch/powerpc/include/uapi/asm/ucontext.h41
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h19
-rw-r--r--arch/powerpc/include/uapi/asm/vas-api.h28
406 files changed, 62656 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/8xx_immap.h b/arch/powerpc/include/asm/8xx_immap.h
new file mode 100644
index 000000000..bdf0563ba
--- /dev/null
+++ b/arch/powerpc/include/asm/8xx_immap.h
@@ -0,0 +1,564 @@
+/*
+ * MPC8xx Internal Memory Map
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * The I/O on the MPC860 is comprised of blocks of special registers
+ * and the dual port ram for the Communication Processor Module.
+ * Within this space are functional units such as the SIU, memory
+ * controller, system timers, and other control functions. It is
+ * a combination that I found difficult to separate into logical
+ * functional files.....but anyone else is welcome to try. -- Dan
+ */
+#ifdef __KERNEL__
+#ifndef __IMMAP_8XX__
+#define __IMMAP_8XX__
+
+/* System configuration registers.
+*/
+typedef struct sys_conf {
+ uint sc_siumcr;
+ uint sc_sypcr;
+ uint sc_swt;
+ char res1[2];
+ ushort sc_swsr;
+ uint sc_sipend;
+ uint sc_simask;
+ uint sc_siel;
+ uint sc_sivec;
+ uint sc_tesr;
+ char res2[0xc];
+ uint sc_sdcr;
+ char res3[0x4c];
+} sysconf8xx_t;
+
+/* PCMCIA configuration registers.
+*/
+typedef struct pcmcia_conf {
+ uint pcmc_pbr0;
+ uint pcmc_por0;
+ uint pcmc_pbr1;
+ uint pcmc_por1;
+ uint pcmc_pbr2;
+ uint pcmc_por2;
+ uint pcmc_pbr3;
+ uint pcmc_por3;
+ uint pcmc_pbr4;
+ uint pcmc_por4;
+ uint pcmc_pbr5;
+ uint pcmc_por5;
+ uint pcmc_pbr6;
+ uint pcmc_por6;
+ uint pcmc_pbr7;
+ uint pcmc_por7;
+ char res1[0x20];
+ uint pcmc_pgcra;
+ uint pcmc_pgcrb;
+ uint pcmc_pscr;
+ char res2[4];
+ uint pcmc_pipr;
+ char res3[4];
+ uint pcmc_per;
+ char res4[4];
+} pcmconf8xx_t;
+
+/* Memory controller registers.
+*/
+typedef struct mem_ctlr {
+ uint memc_br0;
+ uint memc_or0;
+ uint memc_br1;
+ uint memc_or1;
+ uint memc_br2;
+ uint memc_or2;
+ uint memc_br3;
+ uint memc_or3;
+ uint memc_br4;
+ uint memc_or4;
+ uint memc_br5;
+ uint memc_or5;
+ uint memc_br6;
+ uint memc_or6;
+ uint memc_br7;
+ uint memc_or7;
+ char res1[0x24];
+ uint memc_mar;
+ uint memc_mcr;
+ char res2[4];
+ uint memc_mamr;
+ uint memc_mbmr;
+ ushort memc_mstat;
+ ushort memc_mptpr;
+ uint memc_mdr;
+ char res3[0x80];
+} memctl8xx_t;
+
+/*-----------------------------------------------------------------------
+ * BR - Memory Controller: Base Register 16-9
+ */
+#define BR_BA_MSK 0xffff8000 /* Base Address Mask */
+#define BR_AT_MSK 0x00007000 /* Address Type Mask */
+#define BR_PS_MSK 0x00000c00 /* Port Size Mask */
+#define BR_PS_32 0x00000000 /* 32 bit port size */
+#define BR_PS_16 0x00000800 /* 16 bit port size */
+#define BR_PS_8 0x00000400 /* 8 bit port size */
+#define BR_PARE 0x00000200 /* Parity Enable */
+#define BR_WP 0x00000100 /* Write Protect */
+#define BR_MS_MSK 0x000000c0 /* Machine Select Mask */
+#define BR_MS_GPCM 0x00000000 /* G.P.C.M. Machine Select */
+#define BR_MS_UPMA 0x00000080 /* U.P.M.A Machine Select */
+#define BR_MS_UPMB 0x000000c0 /* U.P.M.B Machine Select */
+#define BR_V 0x00000001 /* Bank Valid */
+
+/*-----------------------------------------------------------------------
+ * OR - Memory Controller: Option Register 16-11
+ */
+#define OR_AM_MSK 0xffff8000 /* Address Mask Mask */
+#define OR_ATM_MSK 0x00007000 /* Address Type Mask Mask */
+#define OR_CSNT_SAM 0x00000800 /* Chip Select Negation Time/ Start */
+ /* Address Multiplex */
+#define OR_ACS_MSK 0x00000600 /* Address to Chip Select Setup mask */
+#define OR_ACS_DIV1 0x00000000 /* CS is output at the same time */
+#define OR_ACS_DIV4 0x00000400 /* CS is output 1/4 a clock later */
+#define OR_ACS_DIV2 0x00000600 /* CS is output 1/2 a clock later */
+#define OR_G5LA 0x00000400 /* Output #GPL5 on #GPL_A5 */
+#define OR_G5LS 0x00000200 /* Drive #GPL high on falling edge of...*/
+#define OR_BI 0x00000100 /* Burst inhibit */
+#define OR_SCY_MSK 0x000000f0 /* Cycle Length in Clocks */
+#define OR_SCY_0_CLK 0x00000000 /* 0 clock cycles wait states */
+#define OR_SCY_1_CLK 0x00000010 /* 1 clock cycles wait states */
+#define OR_SCY_2_CLK 0x00000020 /* 2 clock cycles wait states */
+#define OR_SCY_3_CLK 0x00000030 /* 3 clock cycles wait states */
+#define OR_SCY_4_CLK 0x00000040 /* 4 clock cycles wait states */
+#define OR_SCY_5_CLK 0x00000050 /* 5 clock cycles wait states */
+#define OR_SCY_6_CLK 0x00000060 /* 6 clock cycles wait states */
+#define OR_SCY_7_CLK 0x00000070 /* 7 clock cycles wait states */
+#define OR_SCY_8_CLK 0x00000080 /* 8 clock cycles wait states */
+#define OR_SCY_9_CLK 0x00000090 /* 9 clock cycles wait states */
+#define OR_SCY_10_CLK 0x000000a0 /* 10 clock cycles wait states */
+#define OR_SCY_11_CLK 0x000000b0 /* 11 clock cycles wait states */
+#define OR_SCY_12_CLK 0x000000c0 /* 12 clock cycles wait states */
+#define OR_SCY_13_CLK 0x000000d0 /* 13 clock cycles wait states */
+#define OR_SCY_14_CLK 0x000000e0 /* 14 clock cycles wait states */
+#define OR_SCY_15_CLK 0x000000f0 /* 15 clock cycles wait states */
+#define OR_SETA 0x00000008 /* External Transfer Acknowledge */
+#define OR_TRLX 0x00000004 /* Timing Relaxed */
+#define OR_EHTR 0x00000002 /* Extended Hold Time on Read */
+
+/* System Integration Timers.
+*/
+typedef struct sys_int_timers {
+ ushort sit_tbscr;
+ char res0[0x02];
+ uint sit_tbreff0;
+ uint sit_tbreff1;
+ char res1[0x14];
+ ushort sit_rtcsc;
+ char res2[0x02];
+ uint sit_rtc;
+ uint sit_rtsec;
+ uint sit_rtcal;
+ char res3[0x10];
+ ushort sit_piscr;
+ char res4[2];
+ uint sit_pitc;
+ uint sit_pitr;
+ char res5[0x34];
+} sit8xx_t;
+
+#define TBSCR_TBIRQ_MASK ((ushort)0xff00)
+#define TBSCR_REFA ((ushort)0x0080)
+#define TBSCR_REFB ((ushort)0x0040)
+#define TBSCR_REFAE ((ushort)0x0008)
+#define TBSCR_REFBE ((ushort)0x0004)
+#define TBSCR_TBF ((ushort)0x0002)
+#define TBSCR_TBE ((ushort)0x0001)
+
+#define RTCSC_RTCIRQ_MASK ((ushort)0xff00)
+#define RTCSC_SEC ((ushort)0x0080)
+#define RTCSC_ALR ((ushort)0x0040)
+#define RTCSC_38K ((ushort)0x0010)
+#define RTCSC_SIE ((ushort)0x0008)
+#define RTCSC_ALE ((ushort)0x0004)
+#define RTCSC_RTF ((ushort)0x0002)
+#define RTCSC_RTE ((ushort)0x0001)
+
+#define PISCR_PIRQ_MASK ((ushort)0xff00)
+#define PISCR_PS ((ushort)0x0080)
+#define PISCR_PIE ((ushort)0x0004)
+#define PISCR_PTF ((ushort)0x0002)
+#define PISCR_PTE ((ushort)0x0001)
+
+/* Clocks and Reset.
+*/
+typedef struct clk_and_reset {
+ uint car_sccr;
+ uint car_plprcr;
+ uint car_rsr;
+ char res[0x74]; /* Reserved area */
+} car8xx_t;
+
+/* System Integration Timers keys.
+*/
+typedef struct sitk {
+ uint sitk_tbscrk;
+ uint sitk_tbreff0k;
+ uint sitk_tbreff1k;
+ uint sitk_tbk;
+ char res1[0x10];
+ uint sitk_rtcsck;
+ uint sitk_rtck;
+ uint sitk_rtseck;
+ uint sitk_rtcalk;
+ char res2[0x10];
+ uint sitk_piscrk;
+ uint sitk_pitck;
+ char res3[0x38];
+} sitk8xx_t;
+
+/* Clocks and reset keys.
+*/
+typedef struct cark {
+ uint cark_sccrk;
+ uint cark_plprcrk;
+ uint cark_rsrk;
+ char res[0x474];
+} cark8xx_t;
+
+/* The key to unlock registers maintained by keep-alive power.
+*/
+#define KAPWR_KEY ((unsigned int)0x55ccaa33)
+
+/* Video interface. MPC823 Only.
+*/
+typedef struct vid823 {
+ ushort vid_vccr;
+ ushort res1;
+ u_char vid_vsr;
+ u_char res2;
+ u_char vid_vcmr;
+ u_char res3;
+ uint vid_vbcb;
+ uint res4;
+ uint vid_vfcr0;
+ uint vid_vfaa0;
+ uint vid_vfba0;
+ uint vid_vfcr1;
+ uint vid_vfaa1;
+ uint vid_vfba1;
+ u_char res5[0x18];
+} vid823_t;
+
+/* LCD interface. 823 Only.
+*/
+typedef struct lcd {
+ uint lcd_lccr;
+ uint lcd_lchcr;
+ uint lcd_lcvcr;
+ char res1[4];
+ uint lcd_lcfaa;
+ uint lcd_lcfba;
+ char lcd_lcsr;
+ char res2[0x7];
+} lcd823_t;
+
+/* I2C
+*/
+typedef struct i2c {
+ u_char i2c_i2mod;
+ char res1[3];
+ u_char i2c_i2add;
+ char res2[3];
+ u_char i2c_i2brg;
+ char res3[3];
+ u_char i2c_i2com;
+ char res4[3];
+ u_char i2c_i2cer;
+ char res5[3];
+ u_char i2c_i2cmr;
+ char res6[0x8b];
+} i2c8xx_t;
+
+/* DMA control/status registers.
+*/
+typedef struct sdma_csr {
+ char res1[4];
+ uint sdma_sdar;
+ u_char sdma_sdsr;
+ char res3[3];
+ u_char sdma_sdmr;
+ char res4[3];
+ u_char sdma_idsr1;
+ char res5[3];
+ u_char sdma_idmr1;
+ char res6[3];
+ u_char sdma_idsr2;
+ char res7[3];
+ u_char sdma_idmr2;
+ char res8[0x13];
+} sdma8xx_t;
+
+/* Communication Processor Module Interrupt Controller.
+*/
+typedef struct cpm_ic {
+ ushort cpic_civr;
+ char res[0xe];
+ uint cpic_cicr;
+ uint cpic_cipr;
+ uint cpic_cimr;
+ uint cpic_cisr;
+} cpic8xx_t;
+
+/* Input/Output Port control/status registers.
+*/
+typedef struct io_port {
+ ushort iop_padir;
+ ushort iop_papar;
+ ushort iop_paodr;
+ ushort iop_padat;
+ char res1[8];
+ ushort iop_pcdir;
+ ushort iop_pcpar;
+ ushort iop_pcso;
+ ushort iop_pcdat;
+ ushort iop_pcint;
+ char res2[6];
+ ushort iop_pddir;
+ ushort iop_pdpar;
+ char res3[2];
+ ushort iop_pddat;
+ uint utmode;
+ char res4[4];
+} iop8xx_t;
+
+/* Communication Processor Module Timers
+*/
+typedef struct cpm_timers {
+ ushort cpmt_tgcr;
+ char res1[0xe];
+ ushort cpmt_tmr1;
+ ushort cpmt_tmr2;
+ ushort cpmt_trr1;
+ ushort cpmt_trr2;
+ ushort cpmt_tcr1;
+ ushort cpmt_tcr2;
+ ushort cpmt_tcn1;
+ ushort cpmt_tcn2;
+ ushort cpmt_tmr3;
+ ushort cpmt_tmr4;
+ ushort cpmt_trr3;
+ ushort cpmt_trr4;
+ ushort cpmt_tcr3;
+ ushort cpmt_tcr4;
+ ushort cpmt_tcn3;
+ ushort cpmt_tcn4;
+ ushort cpmt_ter1;
+ ushort cpmt_ter2;
+ ushort cpmt_ter3;
+ ushort cpmt_ter4;
+ char res2[8];
+} cpmtimer8xx_t;
+
+/* Finally, the Communication Processor stuff.....
+*/
+typedef struct scc { /* Serial communication channels */
+ uint scc_gsmrl;
+ uint scc_gsmrh;
+ ushort scc_psmr;
+ char res1[2];
+ ushort scc_todr;
+ ushort scc_dsr;
+ ushort scc_scce;
+ char res2[2];
+ ushort scc_sccm;
+ char res3;
+ u_char scc_sccs;
+ char res4[8];
+} scc_t;
+
+typedef struct smc { /* Serial management channels */
+ char res1[2];
+ ushort smc_smcmr;
+ char res2[2];
+ u_char smc_smce;
+ char res3[3];
+ u_char smc_smcm;
+ char res4[5];
+} smc_t;
+
+/* MPC860T Fast Ethernet Controller. It isn't part of the CPM, but
+ * it fits within the address space.
+ */
+
+typedef struct fec {
+ uint fec_addr_low; /* lower 32 bits of station address */
+ ushort fec_addr_high; /* upper 16 bits of station address */
+ ushort res1; /* reserved */
+ uint fec_grp_hash_table_high; /* upper 32-bits of hash table */
+ uint fec_grp_hash_table_low; /* lower 32-bits of hash table */
+ uint fec_r_des_start; /* beginning of Rx descriptor ring */
+ uint fec_x_des_start; /* beginning of Tx descriptor ring */
+ uint fec_r_buff_size; /* Rx buffer size */
+ uint res2[9]; /* reserved */
+ uint fec_ecntrl; /* ethernet control register */
+ uint fec_ievent; /* interrupt event register */
+ uint fec_imask; /* interrupt mask register */
+ uint fec_ivec; /* interrupt level and vector status */
+ uint fec_r_des_active; /* Rx ring updated flag */
+ uint fec_x_des_active; /* Tx ring updated flag */
+ uint res3[10]; /* reserved */
+ uint fec_mii_data; /* MII data register */
+ uint fec_mii_speed; /* MII speed control register */
+ uint res4[17]; /* reserved */
+ uint fec_r_bound; /* end of RAM (read-only) */
+ uint fec_r_fstart; /* Rx FIFO start address */
+ uint res5[6]; /* reserved */
+ uint fec_x_fstart; /* Tx FIFO start address */
+ uint res6[17]; /* reserved */
+ uint fec_fun_code; /* fec SDMA function code */
+ uint res7[3]; /* reserved */
+ uint fec_r_cntrl; /* Rx control register */
+ uint fec_r_hash; /* Rx hash register */
+ uint res8[14]; /* reserved */
+ uint fec_x_cntrl; /* Tx control register */
+ uint res9[0x1e]; /* reserved */
+} fec_t;
+
+/* The FEC and LCD color map share the same address space....
+ * I guess we will never see an 823T :-).
+ */
+union fec_lcd {
+ fec_t fl_un_fec;
+ u_char fl_un_cmap[0x200];
+};
+
+typedef struct comm_proc {
+ /* General control and status registers.
+ */
+ ushort cp_cpcr;
+ u_char res1[2];
+ ushort cp_rccr;
+ u_char res2;
+ u_char cp_rmds;
+ u_char res3[4];
+ ushort cp_cpmcr1;
+ ushort cp_cpmcr2;
+ ushort cp_cpmcr3;
+ ushort cp_cpmcr4;
+ u_char res4[2];
+ ushort cp_rter;
+ u_char res5[2];
+ ushort cp_rtmr;
+ u_char res6[0x14];
+
+ /* Baud rate generators.
+ */
+ uint cp_brgc1;
+ uint cp_brgc2;
+ uint cp_brgc3;
+ uint cp_brgc4;
+
+ /* Serial Communication Channels.
+ */
+ scc_t cp_scc[4];
+
+ /* Serial Management Channels.
+ */
+ smc_t cp_smc[2];
+
+ /* Serial Peripheral Interface.
+ */
+ ushort cp_spmode;
+ u_char res7[4];
+ u_char cp_spie;
+ u_char res8[3];
+ u_char cp_spim;
+ u_char res9[2];
+ u_char cp_spcom;
+ u_char res10[2];
+
+ /* Parallel Interface Port.
+ */
+ u_char res11[2];
+ ushort cp_pipc;
+ u_char res12[2];
+ ushort cp_ptpr;
+ uint cp_pbdir;
+ uint cp_pbpar;
+ u_char res13[2];
+ ushort cp_pbodr;
+ uint cp_pbdat;
+
+ /* Port E - MPC87x/88x only.
+ */
+ uint cp_pedir;
+ uint cp_pepar;
+ uint cp_peso;
+ uint cp_peodr;
+ uint cp_pedat;
+
+ /* Communications Processor Timing Register -
+ Contains RMII Timing for the FECs on MPC87x/88x only.
+ */
+ uint cp_cptr;
+
+ /* Serial Interface and Time Slot Assignment.
+ */
+ uint cp_simode;
+ u_char cp_sigmr;
+ u_char res15;
+ u_char cp_sistr;
+ u_char cp_sicmr;
+ u_char res16[4];
+ uint cp_sicr;
+ uint cp_sirp;
+ u_char res17[0xc];
+
+ /* 256 bytes of MPC823 video controller RAM array.
+ */
+ u_char cp_vcram[0x100];
+ u_char cp_siram[0x200];
+
+ /* The fast ethernet controller is not really part of the CPM,
+ * but it resides in the address space.
+ * The LCD color map is also here.
+ */
+ union fec_lcd fl_un;
+#define cp_fec fl_un.fl_un_fec
+#define lcd_cmap fl_un.fl_un_cmap
+ char res18[0xE00];
+
+ /* The DUET family has a second FEC here */
+ fec_t cp_fec2;
+#define cp_fec1 cp_fec /* consistency macro */
+
+ /* Dual Ported RAM follows.
+ * There are many different formats for this memory area
+ * depending upon the devices used and options chosen.
+ * Some processors don't have all of it populated.
+ */
+ u_char cp_dpmem[0x1C00]; /* BD / Data / ucode */
+ u_char cp_dparam[0x400]; /* Parameter RAM */
+} cpm8xx_t;
+
+/* Internal memory map.
+*/
+typedef struct immap {
+ sysconf8xx_t im_siu_conf; /* SIU Configuration */
+ pcmconf8xx_t im_pcmcia; /* PCMCIA Configuration */
+ memctl8xx_t im_memctl; /* Memory Controller */
+ sit8xx_t im_sit; /* System integration timers */
+ car8xx_t im_clkrst; /* Clocks and reset */
+ sitk8xx_t im_sitk; /* Sys int timer keys */
+ cark8xx_t im_clkrstk; /* Clocks and reset keys */
+ vid823_t im_vid; /* Video (823 only) */
+ lcd823_t im_lcd; /* LCD (823 only) */
+ i2c8xx_t im_i2c; /* I2C control/status */
+ sdma8xx_t im_sdma; /* SDMA control/status */
+ cpic8xx_t im_cpic; /* CPM Interrupt Controller */
+ iop8xx_t im_ioport; /* IO Port control/status */
+ cpmtimer8xx_t im_cpmtimer; /* CPM timers */
+ cpm8xx_t im_cpm; /* Communication processor */
+} immap_t;
+
+#endif /* __IMMAP_8XX__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
new file mode 100644
index 000000000..bcf95ce09
--- /dev/null
+++ b/arch/powerpc/include/asm/Kbuild
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+generated-y += syscall_table_32.h
+generated-y += syscall_table_64.h
+generated-y += syscall_table_spu.h
+generic-y += export.h
+generic-y += kvm_types.h
+generic-y += mcs_spinlock.h
+generic-y += qrwlock.h
+generic-y += vtime.h
+generic-y += early_ioremap.h
diff --git a/arch/powerpc/include/asm/accounting.h b/arch/powerpc/include/asm/accounting.h
new file mode 100644
index 000000000..6d79c3170
--- /dev/null
+++ b/arch/powerpc/include/asm/accounting.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Common time accounting prototypes and such for all ppc machines.
+ */
+
+#ifndef __POWERPC_ACCOUNTING_H
+#define __POWERPC_ACCOUNTING_H
+
+/* Stuff for accurate time accounting */
+struct cpu_accounting_data {
+ /* Accumulated cputime values to flush on ticks*/
+ unsigned long utime;
+ unsigned long stime;
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
+ unsigned long utime_scaled;
+ unsigned long stime_scaled;
+#endif
+ unsigned long gtime;
+ unsigned long hardirq_time;
+ unsigned long softirq_time;
+ unsigned long steal_time;
+ unsigned long idle_time;
+ /* Internal counters */
+ unsigned long starttime; /* TB value snapshot */
+ unsigned long starttime_user; /* TB value on exit to usermode */
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
+ unsigned long startspurr; /* SPURR value snapshot */
+ unsigned long utime_sspurr; /* ->user_time when ->startspurr set */
+#endif
+};
+
+#endif
diff --git a/arch/powerpc/include/asm/agp.h b/arch/powerpc/include/asm/agp.h
new file mode 100644
index 000000000..6b6485c98
--- /dev/null
+++ b/arch/powerpc/include/asm/agp.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_AGP_H
+#define _ASM_POWERPC_AGP_H
+#ifdef __KERNEL__
+
+#include <asm/io.h>
+
+#define map_page_into_agp(page) do {} while (0)
+#define unmap_page_from_agp(page) do {} while (0)
+#define flush_agp_cache() mb()
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_AGP_H */
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
new file mode 100644
index 000000000..51b093f67
--- /dev/null
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_ARCHRANDOM_H
+#define _ASM_POWERPC_ARCHRANDOM_H
+
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
+{
+ return 0;
+}
+
+size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs);
+
+#ifdef CONFIG_PPC_POWERNV
+int pnv_get_random_long(unsigned long *v);
+#endif
+
+#endif /* _ASM_POWERPC_ARCHRANDOM_H */
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
new file mode 100644
index 000000000..2bc53c646
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -0,0 +1,64 @@
+#ifndef _ASM_POWERPC_ASM_COMPAT_H
+#define _ASM_POWERPC_ASM_COMPAT_H
+
+#include <asm/asm-const.h>
+#include <asm/types.h>
+#include <asm/ppc-opcode.h>
+
+#ifdef __powerpc64__
+
+/* operations for longs and pointers */
+#define PPC_LL stringify_in_c(ld)
+#define PPC_STL stringify_in_c(std)
+#define PPC_STLU stringify_in_c(stdu)
+#define PPC_LCMPI stringify_in_c(cmpdi)
+#define PPC_LCMPLI stringify_in_c(cmpldi)
+#define PPC_LCMP stringify_in_c(cmpd)
+#define PPC_LONG stringify_in_c(.8byte)
+#define PPC_LONG_ALIGN stringify_in_c(.balign 8)
+#define PPC_TLNEI stringify_in_c(tdnei)
+#define PPC_LLARX stringify_in_c(ldarx)
+#define PPC_STLCX stringify_in_c(stdcx.)
+#define PPC_CNTLZL stringify_in_c(cntlzd)
+#define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), RS)
+#define PPC_SRL stringify_in_c(srd)
+#define PPC_LR_STKOFF 16
+#define PPC_MIN_STKFRM 112
+
+#ifdef __BIG_ENDIAN__
+#define LHZX_BE stringify_in_c(lhzx)
+#define LWZX_BE stringify_in_c(lwzx)
+#define LDX_BE stringify_in_c(ldx)
+#define STWX_BE stringify_in_c(stwx)
+#define STDX_BE stringify_in_c(stdx)
+#else
+#define LHZX_BE stringify_in_c(lhbrx)
+#define LWZX_BE stringify_in_c(lwbrx)
+#define LDX_BE stringify_in_c(ldbrx)
+#define STWX_BE stringify_in_c(stwbrx)
+#define STDX_BE stringify_in_c(stdbrx)
+#endif
+
+#else /* 32-bit */
+
+/* operations for longs and pointers */
+#define PPC_LL stringify_in_c(lwz)
+#define PPC_STL stringify_in_c(stw)
+#define PPC_STLU stringify_in_c(stwu)
+#define PPC_LCMPI stringify_in_c(cmpwi)
+#define PPC_LCMPLI stringify_in_c(cmplwi)
+#define PPC_LCMP stringify_in_c(cmpw)
+#define PPC_LONG stringify_in_c(.long)
+#define PPC_LONG_ALIGN stringify_in_c(.balign 4)
+#define PPC_TLNEI stringify_in_c(twnei)
+#define PPC_LLARX stringify_in_c(lwarx)
+#define PPC_STLCX stringify_in_c(stwcx.)
+#define PPC_CNTLZL stringify_in_c(cntlzw)
+#define PPC_MTOCRF stringify_in_c(mtcrf)
+#define PPC_SRL stringify_in_c(srw)
+#define PPC_LR_STKOFF 4
+#define PPC_MIN_STKFRM 16
+
+#endif
+
+#endif /* _ASM_POWERPC_ASM_COMPAT_H */
diff --git a/arch/powerpc/include/asm/asm-const.h b/arch/powerpc/include/asm/asm-const.h
new file mode 100644
index 000000000..bfb3c3534
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-const.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_POWERPC_ASM_CONST_H
+#define _ASM_POWERPC_ASM_CONST_H
+
+#ifdef __ASSEMBLY__
+# define stringify_in_c(...) __VA_ARGS__
+# define ASM_CONST(x) x
+#else
+/* This version of stringify will deal with commas... */
+# define __stringify_in_c(...) #__VA_ARGS__
+# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
+# define __ASM_CONST(x) x##UL
+# define ASM_CONST(x) __ASM_CONST(x)
+#endif
+
+#endif /* _ASM_POWERPC_ASM_CONST_H */
diff --git a/arch/powerpc/include/asm/asm-offsets.h b/arch/powerpc/include/asm/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
new file mode 100644
index 000000000..274bce76f
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_ASM_PROTOTYPES_H
+#define _ASM_POWERPC_ASM_PROTOTYPES_H
+/*
+ * This file is for C prototypes of asm symbols that are EXPORTed.
+ * It allows the modversions logic to see their prototype and
+ * generate proper CRCs for them.
+ *
+ * Copyright 2016, Daniel Axtens, IBM Corporation.
+ */
+
+#include <linux/threads.h>
+#include <asm/cacheflush.h>
+#include <asm/checksum.h>
+#include <linux/uaccess.h>
+#include <asm/epapr_hcalls.h>
+#include <asm/dcr.h>
+#include <asm/mmu_context.h>
+#include <asm/ultravisor-api.h>
+
+#include <uapi/asm/ucontext.h>
+
+/* Ultravisor */
+#if defined(CONFIG_PPC_POWERNV) || defined(CONFIG_PPC_SVM)
+long ucall_norets(unsigned long opcode, ...);
+#else
+static inline long ucall_norets(unsigned long opcode, ...)
+{
+ return U_NOT_AVAILABLE;
+}
+#endif
+
+/* OPAL */
+int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
+ int64_t a4, int64_t a5, int64_t a6, int64_t a7,
+ int64_t opcode, uint64_t msr);
+
+/* misc runtime */
+void enable_machine_check(void);
+extern u64 __bswapdi2(u64);
+extern s64 __lshrdi3(s64, int);
+extern s64 __ashldi3(s64, int);
+extern s64 __ashrdi3(s64, int);
+extern int __cmpdi2(s64, s64);
+extern int __ucmpdi2(u64, u64);
+
+/* tracing */
+void _mcount(void);
+
+/* Transaction memory related */
+void tm_enable(void);
+void tm_disable(void);
+void tm_abort(uint8_t cause);
+
+struct kvm_vcpu;
+void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
+void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
+void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
+#else
+static inline void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
+ bool preserve_nv) { }
+static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
+ bool preserve_nv) { }
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
+void kvmppc_p9_enter_guest(struct kvm_vcpu *vcpu);
+
+long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
+long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
+ unsigned long dabrx);
+
+#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/async_tx.h b/arch/powerpc/include/asm/async_tx.h
new file mode 100644
index 000000000..a14758426
--- /dev/null
+++ b/arch/powerpc/include/asm/async_tx.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2008-2009 DENX Software Engineering.
+ *
+ * Author: Yuri Tikhonov <yur@emcraft.com>
+ */
+#ifndef _ASM_POWERPC_ASYNC_TX_H_
+#define _ASM_POWERPC_ASYNC_TX_H_
+
+#if defined(CONFIG_440SPe) || defined(CONFIG_440SP)
+extern struct dma_chan *
+ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
+ struct page **dst_lst, int dst_cnt, struct page **src_lst,
+ int src_cnt, size_t src_sz);
+
+#define async_tx_find_channel(dep, cap, dst_lst, dst_cnt, src_lst, \
+ src_cnt, src_sz) \
+ ppc440spe_async_tx_find_best_channel(cap, dst_lst, dst_cnt, src_lst, \
+ src_cnt, src_sz)
+#else
+
+#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
+ __async_tx_find_channel(dep, type)
+
+struct dma_chan *
+__async_tx_find_channel(struct async_submit_ctl *submit,
+ enum dma_transaction_type tx_type);
+
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
new file mode 100644
index 000000000..486ab7889
--- /dev/null
+++ b/arch/powerpc/include/asm/atomic.h
@@ -0,0 +1,489 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_ATOMIC_H_
+#define _ASM_POWERPC_ATOMIC_H_
+
+/*
+ * PowerPC atomic operations
+ */
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+#include <asm/asm-const.h>
+
+/*
+ * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
+ * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
+ * on the platform without lwsync.
+ */
+#define __atomic_acquire_fence() \
+ __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
+
+#define __atomic_release_fence() \
+ __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
+
+static __inline__ int arch_atomic_read(const atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
+
+ return t;
+}
+
+static __inline__ void arch_atomic_set(atomic_t *v, int i)
+{
+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
+}
+
+#define ATOMIC_OP(op, asm_op, suffix, sign, ...) \
+static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
+{ \
+ int t; \
+ \
+ __asm__ __volatile__( \
+"1: lwarx %0,0,%3 # atomic_" #op "\n" \
+ #asm_op "%I2" suffix " %0,%0,%2\n" \
+" stwcx. %0,0,%3 \n" \
+" bne- 1b\n" \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r"#sign (a), "r" (&v->counter) \
+ : "cc", ##__VA_ARGS__); \
+} \
+
+#define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...) \
+static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
+{ \
+ int t; \
+ \
+ __asm__ __volatile__( \
+"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
+ #asm_op "%I2" suffix " %0,%0,%2\n" \
+" stwcx. %0,0,%3\n" \
+" bne- 1b\n" \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r"#sign (a), "r" (&v->counter) \
+ : "cc", ##__VA_ARGS__); \
+ \
+ return t; \
+}
+
+#define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...) \
+static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
+{ \
+ int res, t; \
+ \
+ __asm__ __volatile__( \
+"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
+ #asm_op "%I3" suffix " %1,%0,%3\n" \
+" stwcx. %1,0,%4\n" \
+" bne- 1b\n" \
+ : "=&r" (res), "=&r" (t), "+m" (v->counter) \
+ : "r"#sign (a), "r" (&v->counter) \
+ : "cc", ##__VA_ARGS__); \
+ \
+ return res; \
+}
+
+#define ATOMIC_OPS(op, asm_op, suffix, sign, ...) \
+ ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__) \
+ ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\
+ ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)
+
+ATOMIC_OPS(add, add, "c", I, "xer")
+ATOMIC_OPS(sub, sub, "c", I, "xer")
+
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, asm_op, suffix, sign) \
+ ATOMIC_OP(op, asm_op, suffix, sign) \
+ ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign)
+
+ATOMIC_OPS(and, and, ".", K)
+ATOMIC_OPS(or, or, "", K)
+ATOMIC_OPS(xor, xor, "", K)
+
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP_RELAXED
+#undef ATOMIC_OP_RETURN_RELAXED
+#undef ATOMIC_OP
+
+#define arch_atomic_cmpxchg(v, o, n) \
+ (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg_relaxed(v, o, n) \
+ arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic_cmpxchg_acquire(v, o, n) \
+ arch_cmpxchg_acquire(&((v)->counter), (o), (n))
+
+#define arch_atomic_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
+#define arch_atomic_xchg_relaxed(v, new) \
+ arch_xchg_relaxed(&((v)->counter), (new))
+
+/*
+ * Don't want to override the generic atomic_try_cmpxchg_acquire, because
+ * we add a lock hint to the lwarx, which may not be wanted for the
+ * _acquire case (and is not used by the other _acquire variants so it
+ * would be a surprise).
+ */
+static __always_inline bool
+arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
+
+ __asm__ __volatile__ (
+"1: lwarx %0,0,%2,%[eh] # atomic_try_cmpxchg_acquire \n"
+" cmpw 0,%0,%3 \n"
+" bne- 2f \n"
+" stwcx. %4,0,%2 \n"
+" bne- 1b \n"
+"\t" PPC_ACQUIRE_BARRIER " \n"
+"2: \n"
+ : "=&r" (r), "+m" (v->counter)
+ : "r" (&v->counter), "r" (o), "r" (new), [eh] "n" (eh)
+ : "cr0", "memory");
+
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+
+/**
+ * atomic_fetch_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v.
+ */
+static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int t;
+
+ __asm__ __volatile__ (
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
+ cmpw 0,%0,%3 \n\
+ beq 2f \n\
+ add%I2c %0,%0,%2 \n"
+" stwcx. %0,0,%1 \n\
+ bne- 1b \n"
+ PPC_ATOMIC_EXIT_BARRIER
+" sub%I2c %0,%0,%2 \n\
+2:"
+ : "=&r" (t)
+ : "r" (&v->counter), "rI" (a), "r" (u)
+ : "cc", "memory", "xer");
+
+ return t;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
+ cmpwi %0,1\n\
+ addi %0,%0,-1\n\
+ blt- 2f\n"
+" stwcx. %0,0,%1\n\
+ bne- 1b"
+ PPC_ATOMIC_EXIT_BARRIER
+ "\n\
+2:" : "=&b" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
+
+#ifdef __powerpc64__
+
+#define ATOMIC64_INIT(i) { (i) }
+
+static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
+{
+ s64 t;
+
+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
+
+ return t;
+}
+
+static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
+{
+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
+}
+
+#define ATOMIC64_OP(op, asm_op) \
+static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
+{ \
+ s64 t; \
+ \
+ __asm__ __volatile__( \
+"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
+ #asm_op " %0,%2,%0\n" \
+" stdcx. %0,0,%3 \n" \
+" bne- 1b\n" \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+}
+
+#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
+static inline s64 \
+arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
+{ \
+ s64 t; \
+ \
+ __asm__ __volatile__( \
+"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
+ #asm_op " %0,%2,%0\n" \
+" stdcx. %0,0,%3\n" \
+" bne- 1b\n" \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+ \
+ return t; \
+}
+
+#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
+static inline s64 \
+arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
+{ \
+ s64 res, t; \
+ \
+ __asm__ __volatile__( \
+"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
+ #asm_op " %1,%3,%0\n" \
+" stdcx. %1,0,%4\n" \
+" bne- 1b\n" \
+ : "=&r" (res), "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+ \
+ return res; \
+}
+
+#define ATOMIC64_OPS(op, asm_op) \
+ ATOMIC64_OP(op, asm_op) \
+ ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
+ ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
+
+ATOMIC64_OPS(add, add)
+ATOMIC64_OPS(sub, subf)
+
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, asm_op) \
+ ATOMIC64_OP(op, asm_op) \
+ ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
+
+ATOMIC64_OPS(and, and)
+ATOMIC64_OPS(or, or)
+ATOMIC64_OPS(xor, xor)
+
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+
+#undef ATOPIC64_OPS
+#undef ATOMIC64_FETCH_OP_RELAXED
+#undef ATOMIC64_OP_RETURN_RELAXED
+#undef ATOMIC64_OP
+
+static __inline__ void arch_atomic64_inc(atomic64_t *v)
+{
+ s64 t;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2 # atomic64_inc\n\
+ addic %0,%0,1\n\
+ stdcx. %0,0,%2 \n\
+ bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (&v->counter)
+ : "cc", "xer");
+}
+#define arch_atomic64_inc arch_atomic64_inc
+
+static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
+{
+ s64 t;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
+" addic %0,%0,1\n"
+" stdcx. %0,0,%2\n"
+" bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (&v->counter)
+ : "cc", "xer");
+
+ return t;
+}
+
+static __inline__ void arch_atomic64_dec(atomic64_t *v)
+{
+ s64 t;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2 # atomic64_dec\n\
+ addic %0,%0,-1\n\
+ stdcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (&v->counter)
+ : "cc", "xer");
+}
+#define arch_atomic64_dec arch_atomic64_dec
+
+static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
+{
+ s64 t;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
+" addic %0,%0,-1\n"
+" stdcx. %0,0,%2\n"
+" bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (&v->counter)
+ : "cc", "xer");
+
+ return t;
+}
+
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1.
+ */
+static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+ s64 t;
+
+ __asm__ __volatile__(
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
+ addic. %0,%0,-1\n\
+ blt- 2f\n\
+ stdcx. %0,0,%1\n\
+ bne- 1b"
+ PPC_ATOMIC_EXIT_BARRIER
+ "\n\
+2:" : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "xer", "memory");
+
+ return t;
+}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+
+#define arch_atomic64_cmpxchg(v, o, n) \
+ (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_cmpxchg_relaxed(v, o, n) \
+ arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic64_cmpxchg_acquire(v, o, n) \
+ arch_cmpxchg_acquire(&((v)->counter), (o), (n))
+
+#define arch_atomic64_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
+#define arch_atomic64_xchg_relaxed(v, new) \
+ arch_xchg_relaxed(&((v)->counter), (new))
+
+/**
+ * atomic64_fetch_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v.
+ */
+static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ s64 t;
+
+ __asm__ __volatile__ (
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
+ cmpd 0,%0,%3 \n\
+ beq 2f \n\
+ add %0,%2,%0 \n"
+" stdcx. %0,0,%1 \n\
+ bne- 1b \n"
+ PPC_ATOMIC_EXIT_BARRIER
+" subf %0,%2,%0 \n\
+2:"
+ : "=&r" (t)
+ : "r" (&v->counter), "r" (a), "r" (u)
+ : "cc", "memory");
+
+ return t;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+
+/**
+ * atomic_inc64_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, so long as @v is non-zero.
+ * Returns non-zero if @v was non-zero, and zero otherwise.
+ */
+static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
+{
+ s64 t1, t2;
+
+ __asm__ __volatile__ (
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
+ cmpdi 0,%0,0\n\
+ beq- 2f\n\
+ addic %1,%0,1\n\
+ stdcx. %1,0,%2\n\
+ bne- 1b\n"
+ PPC_ATOMIC_EXIT_BARRIER
+ "\n\
+2:"
+ : "=&r" (t1), "=&r" (t2)
+ : "r" (&v->counter)
+ : "cc", "xer", "memory");
+
+ return t1 != 0;
+}
+#define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
+
+#endif /* __powerpc64__ */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/arch/powerpc/include/asm/backlight.h b/arch/powerpc/include/asm/backlight.h
new file mode 100644
index 000000000..1b5eab62e
--- /dev/null
+++ b/arch/powerpc/include/asm/backlight.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Routines for handling backlight control on PowerBooks
+ *
+ * For now, implementation resides in
+ * arch/powerpc/platforms/powermac/backlight.c
+ *
+ */
+#ifndef __ASM_POWERPC_BACKLIGHT_H
+#define __ASM_POWERPC_BACKLIGHT_H
+#ifdef __KERNEL__
+
+#include <linux/fb.h>
+#include <linux/mutex.h>
+
+/* For locking instructions, see the implementation file */
+extern struct backlight_device *pmac_backlight;
+extern struct mutex pmac_backlight_mutex;
+
+extern int pmac_backlight_curve_lookup(struct fb_info *info, int value);
+
+extern int pmac_has_backlight_type(const char *type);
+
+extern void pmac_backlight_key(int direction);
+static inline void pmac_backlight_key_up(void)
+{
+ pmac_backlight_key(0);
+}
+static inline void pmac_backlight_key_down(void)
+{
+ pmac_backlight_key(1);
+}
+
+extern void pmac_backlight_set_legacy_brightness_pmu(int brightness);
+extern int pmac_backlight_set_legacy_brightness(int brightness);
+extern int pmac_backlight_get_legacy_brightness(void);
+
+extern void pmac_backlight_enable(void);
+extern void pmac_backlight_disable(void);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
new file mode 100644
index 000000000..e80b2c0e9
--- /dev/null
+++ b/arch/powerpc/include/asm/barrier.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_BARRIER_H
+#define _ASM_POWERPC_BARRIER_H
+
+#include <asm/asm-const.h>
+
+#ifndef __ASSEMBLY__
+#include <asm/ppc-opcode.h>
+#endif
+
+/*
+ * Memory barrier.
+ * The sync instruction guarantees that all memory accesses initiated
+ * by this processor have been performed (with respect to all other
+ * mechanisms that access memory). The eieio instruction is a barrier
+ * providing an ordering (separately) for (a) cacheable stores and (b)
+ * loads and stores to non-cacheable memory (e.g. I/O devices).
+ *
+ * mb() prevents loads and stores being reordered across this point.
+ * rmb() prevents loads being reordered across this point.
+ * wmb() prevents stores being reordered across this point.
+ *
+ * *mb() variants without smp_ prefix must order all types of memory
+ * operations with one another. sync is the only instruction sufficient
+ * to do this.
+ *
+ * For the smp_ barriers, ordering is for cacheable memory operations
+ * only. We have to use the sync instruction for smp_mb(), since lwsync
+ * doesn't order loads with respect to previous stores. Lwsync can be
+ * used for smp_rmb() and smp_wmb().
+ *
+ * However, on CPUs that don't support lwsync, lwsync actually maps to a
+ * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
+ */
+#define mb() __asm__ __volatile__ ("sync" : : : "memory")
+#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
+#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
+
+/* The sub-arch has lwsync */
+#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_E500MC)
+# define SMPWMB LWSYNC
+#elif defined(CONFIG_BOOKE)
+# define SMPWMB mbar
+#else
+# define SMPWMB eieio
+#endif
+
+/* clang defines this macro for a builtin, which will not work with runtime patching */
+#undef __lwsync
+#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+#define dma_rmb() __lwsync()
+#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
+
+#define __smp_lwsync() __lwsync()
+
+#define __smp_mb() mb()
+#define __smp_rmb() __lwsync()
+#define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
+
+/*
+ * This is a barrier which prevents following instructions from being
+ * started until the value of the argument x is known. For example, if
+ * x is a variable loaded from memory, this prevents following
+ * instructions from being executed until the load has been performed.
+ */
+#define data_barrier(x) \
+ asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
+
+#define __smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ __smp_lwsync(); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+
+#define __smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ __smp_lwsync(); \
+ ___p1; \
+})
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#define NOSPEC_BARRIER_SLOT nop
+#elif defined(CONFIG_PPC_E500)
+#define NOSPEC_BARRIER_SLOT nop; nop
+#endif
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+/*
+ * Prevent execution of subsequent instructions until preceding branches have
+ * been fully resolved and are no longer executing speculatively.
+ */
+#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT
+
+// This also acts as a compiler barrier due to the memory clobber.
+#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
+
+#else /* !CONFIG_PPC_BARRIER_NOSPEC */
+#define barrier_nospec_asm
+#define barrier_nospec()
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+
+/*
+ * pmem_wmb() ensures that all stores for which the modification
+ * are written to persistent storage by preceding dcbfps/dcbstps
+ * instructions have updated persistent storage before any data
+ * access or data transfer caused by subsequent instructions is
+ * initiated.
+ */
+#define pmem_wmb() __asm__ __volatile__(PPC_PHWSYNC ::: "memory")
+
+#include <asm-generic/barrier.h>
+
+#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
new file mode 100644
index 000000000..7e0f03229
--- /dev/null
+++ b/arch/powerpc/include/asm/bitops.h
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PowerPC atomic bit operations.
+ *
+ * Merged version by David Gibson <david@gibson.dropbear.id.au>.
+ * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
+ * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They
+ * originally took it from the ppc32 code.
+ *
+ * Within a word, bits are numbered LSB first. Lot's of places make
+ * this assumption by directly testing bits with (val & (1<<nr)).
+ * This can cause confusion for large (> 1 word) bitmaps on a
+ * big-endian system because, unlike little endian, the number of each
+ * bit depends on the word size.
+ *
+ * The bitop functions are defined to work on unsigned longs, so for a
+ * ppc64 system the bits end up numbered:
+ * |63..............0|127............64|191...........128|255...........192|
+ * and on ppc32:
+ * |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
+ *
+ * There are a few little-endian macros used mostly for filesystem
+ * bitmaps, these work on similar bit arrays layouts, but
+ * byte-oriented:
+ * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
+ *
+ * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
+ * number field needs to be reversed compared to the big-endian bit
+ * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
+ */
+
+#ifndef _ASM_POWERPC_BITOPS_H
+#define _ASM_POWERPC_BITOPS_H
+
+#ifdef __KERNEL__
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/compiler.h>
+#include <asm/asm-compat.h>
+#include <asm/synch.h>
+
+/* PPC bit number conversion */
+#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
+#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
+#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
+
+/* Put a PPC bit into a "normal" bit position */
+#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \
+ ((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
+
+#define PPC_BITLSHIFT32(be) (32 - 1 - (be))
+#define PPC_BIT32(bit) (1UL << PPC_BITLSHIFT32(bit))
+#define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be))|PPC_BIT32(bs))
+
+#define PPC_BITLSHIFT8(be) (8 - 1 - (be))
+#define PPC_BIT8(bit) (1UL << PPC_BITLSHIFT8(bit))
+#define PPC_BITMASK8(bs, be) ((PPC_BIT8(bs) - PPC_BIT8(be))|PPC_BIT8(bs))
+
+#include <asm/barrier.h>
+
+/* Macro for generating the ***_bits() functions */
+#define DEFINE_BITOP(fn, op, prefix) \
+static inline void fn(unsigned long mask, \
+ volatile unsigned long *_p) \
+{ \
+ unsigned long old; \
+ unsigned long *p = (unsigned long *)_p; \
+ __asm__ __volatile__ ( \
+ prefix \
+"1:" PPC_LLARX "%0,0,%3,0\n" \
+ #op "%I2 %0,%0,%2\n" \
+ PPC_STLCX "%0,0,%3\n" \
+ "bne- 1b\n" \
+ : "=&r" (old), "+m" (*p) \
+ : "rK" (mask), "r" (p) \
+ : "cc", "memory"); \
+}
+
+DEFINE_BITOP(set_bits, or, "")
+DEFINE_BITOP(change_bits, xor, "")
+
+static __always_inline bool is_rlwinm_mask_valid(unsigned long x)
+{
+ if (!x)
+ return false;
+ if (x & 1)
+ x = ~x; // make the mask non-wrapping
+ x += x & -x; // adding the low set bit results in at most one bit set
+
+ return !(x & (x - 1));
+}
+
+#define DEFINE_CLROP(fn, prefix) \
+static inline void fn(unsigned long mask, volatile unsigned long *_p) \
+{ \
+ unsigned long old; \
+ unsigned long *p = (unsigned long *)_p; \
+ \
+ if (IS_ENABLED(CONFIG_PPC32) && \
+ __builtin_constant_p(mask) && is_rlwinm_mask_valid(~mask)) {\
+ asm volatile ( \
+ prefix \
+ "1:" "lwarx %0,0,%3\n" \
+ "rlwinm %0,%0,0,%2\n" \
+ "stwcx. %0,0,%3\n" \
+ "bne- 1b\n" \
+ : "=&r" (old), "+m" (*p) \
+ : "n" (~mask), "r" (p) \
+ : "cc", "memory"); \
+ } else { \
+ asm volatile ( \
+ prefix \
+ "1:" PPC_LLARX "%0,0,%3,0\n" \
+ "andc %0,%0,%2\n" \
+ PPC_STLCX "%0,0,%3\n" \
+ "bne- 1b\n" \
+ : "=&r" (old), "+m" (*p) \
+ : "r" (mask), "r" (p) \
+ : "cc", "memory"); \
+ } \
+}
+
+DEFINE_CLROP(clear_bits, "")
+DEFINE_CLROP(clear_bits_unlock, PPC_RELEASE_BARRIER)
+
+static inline void arch_set_bit(int nr, volatile unsigned long *addr)
+{
+ set_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
+}
+
+static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
+{
+ clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
+}
+
+static inline void arch_clear_bit_unlock(int nr, volatile unsigned long *addr)
+{
+ clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr));
+}
+
+static inline void arch_change_bit(int nr, volatile unsigned long *addr)
+{
+ change_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
+}
+
+/* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
+ * operands. */
+#define DEFINE_TESTOP(fn, op, prefix, postfix, eh) \
+static inline unsigned long fn( \
+ unsigned long mask, \
+ volatile unsigned long *_p) \
+{ \
+ unsigned long old, t; \
+ unsigned long *p = (unsigned long *)_p; \
+ __asm__ __volatile__ ( \
+ prefix \
+"1:" PPC_LLARX "%0,0,%3,%4\n" \
+ #op "%I2 %1,%0,%2\n" \
+ PPC_STLCX "%1,0,%3\n" \
+ "bne- 1b\n" \
+ postfix \
+ : "=&r" (old), "=&r" (t) \
+ : "rK" (mask), "r" (p), "n" (eh) \
+ : "cc", "memory"); \
+ return (old & mask); \
+}
+
+DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
+ PPC_ATOMIC_EXIT_BARRIER, 0)
+DEFINE_TESTOP(test_and_set_bits_lock, or, "",
+ PPC_ACQUIRE_BARRIER, IS_ENABLED(CONFIG_PPC64))
+DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
+ PPC_ATOMIC_EXIT_BARRIER, 0)
+
+static inline unsigned long test_and_clear_bits(unsigned long mask, volatile unsigned long *_p)
+{
+ unsigned long old, t;
+ unsigned long *p = (unsigned long *)_p;
+
+ if (IS_ENABLED(CONFIG_PPC32) &&
+ __builtin_constant_p(mask) && is_rlwinm_mask_valid(~mask)) {
+ asm volatile (
+ PPC_ATOMIC_ENTRY_BARRIER
+ "1:" "lwarx %0,0,%3\n"
+ "rlwinm %1,%0,0,%2\n"
+ "stwcx. %1,0,%3\n"
+ "bne- 1b\n"
+ PPC_ATOMIC_EXIT_BARRIER
+ : "=&r" (old), "=&r" (t)
+ : "n" (~mask), "r" (p)
+ : "cc", "memory");
+ } else {
+ asm volatile (
+ PPC_ATOMIC_ENTRY_BARRIER
+ "1:" PPC_LLARX "%0,0,%3,0\n"
+ "andc %1,%0,%2\n"
+ PPC_STLCX "%1,0,%3\n"
+ "bne- 1b\n"
+ PPC_ATOMIC_EXIT_BARRIER
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+ }
+
+ return (old & mask);
+}
+
+static inline int arch_test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
+}
+
+static inline int arch_test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ return test_and_set_bits_lock(BIT_MASK(nr),
+ addr + BIT_WORD(nr)) != 0;
+}
+
+static inline int arch_test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
+}
+
+static inline int arch_test_and_change_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
+}
+
+#ifdef CONFIG_PPC64
+static inline unsigned long
+clear_bit_unlock_return_word(int nr, volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long *p = (unsigned long *)addr + BIT_WORD(nr);
+ unsigned long mask = BIT_MASK(nr);
+
+ __asm__ __volatile__ (
+ PPC_RELEASE_BARRIER
+"1:" PPC_LLARX "%0,0,%3,0\n"
+ "andc %1,%0,%2\n"
+ PPC_STLCX "%1,0,%3\n"
+ "bne- 1b\n"
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return old;
+}
+
+/*
+ * This is a special function for mm/filemap.c
+ * Bit 7 corresponds to PG_waiters.
+ */
+#define arch_clear_bit_unlock_is_negative_byte(nr, addr) \
+ (clear_bit_unlock_return_word(nr, addr) & BIT_MASK(7))
+
+#endif /* CONFIG_PPC64 */
+
+#include <asm-generic/bitops/non-atomic.h>
+
+static inline void arch___clear_bit_unlock(int nr, volatile unsigned long *addr)
+{
+ __asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory");
+ __clear_bit(nr, addr);
+}
+
+/*
+ * Return the zero-based bit position (LE, not IBM bit numbering) of
+ * the most significant 1-bit in a double word.
+ */
+#define __ilog2(x) ilog2(x)
+
+#include <asm-generic/bitops/ffz.h>
+
+#include <asm-generic/bitops/builtin-__ffs.h>
+
+#include <asm-generic/bitops/builtin-ffs.h>
+
+/*
+ * fls: find last (most-significant) bit set.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+static __always_inline int fls(unsigned int x)
+{
+ int lz;
+
+ if (__builtin_constant_p(x))
+ return x ? 32 - __builtin_clz(x) : 0;
+ asm("cntlzw %0,%1" : "=r" (lz) : "r" (x));
+ return 32 - lz;
+}
+
+#include <asm-generic/bitops/builtin-__fls.h>
+
+/*
+ * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
+ * instruction; for 32-bit we use the generic version, which does two
+ * 32-bit fls calls.
+ */
+#ifdef CONFIG_PPC64
+static __always_inline int fls64(__u64 x)
+{
+ int lz;
+
+ if (__builtin_constant_p(x))
+ return x ? 64 - __builtin_clzll(x) : 0;
+ asm("cntlzd %0,%1" : "=r" (lz) : "r" (x));
+ return 64 - lz;
+}
+#else
+#include <asm-generic/bitops/fls64.h>
+#endif
+
+#ifdef CONFIG_PPC64
+unsigned int __arch_hweight8(unsigned int w);
+unsigned int __arch_hweight16(unsigned int w);
+unsigned int __arch_hweight32(unsigned int w);
+unsigned long __arch_hweight64(__u64 w);
+#include <asm-generic/bitops/const_hweight.h>
+#else
+#include <asm-generic/bitops/hweight.h>
+#endif
+
+/* wrappers that deal with KASAN instrumentation */
+#include <asm-generic/bitops/instrumented-atomic.h>
+#include <asm-generic/bitops/instrumented-lock.h>
+
+/* Little-endian versions */
+#include <asm-generic/bitops/le.h>
+
+/* Bitmap functions for the ext2 filesystem */
+
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#include <asm-generic/bitops/sched.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_BITOPS_H */
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
new file mode 100644
index 000000000..678f9c9d8
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
+#define _ASM_POWERPC_BOOK3S_32_KUP_H
+
+#include <asm/bug.h>
+#include <asm/book3s/32/mmu-hash.h>
+#include <asm/mmu.h>
+#include <asm/synch.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/jump_label.h>
+
+extern struct static_key_false disable_kuap_key;
+
+static __always_inline bool kuep_is_disabled(void)
+{
+ return !IS_ENABLED(CONFIG_PPC_KUEP);
+}
+
+#ifdef CONFIG_PPC_KUAP
+
+#include <linux/sched.h>
+
+#define KUAP_NONE (~0UL)
+#define KUAP_ALL (~1UL)
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return static_branch_unlikely(&disable_kuap_key);
+}
+
+static inline void kuap_lock_one(unsigned long addr)
+{
+ mtsr(mfsr(addr) | SR_KS, addr);
+ isync(); /* Context sync required after mtsr() */
+}
+
+static inline void kuap_unlock_one(unsigned long addr)
+{
+ mtsr(mfsr(addr) & ~SR_KS, addr);
+ isync(); /* Context sync required after mtsr() */
+}
+
+static inline void kuap_lock_all(void)
+{
+ update_user_segments(mfsr(0) | SR_KS);
+ isync(); /* Context sync required after mtsr() */
+}
+
+static inline void kuap_unlock_all(void)
+{
+ update_user_segments(mfsr(0) & ~SR_KS);
+ isync(); /* Context sync required after mtsr() */
+}
+
+void kuap_lock_all_ool(void);
+void kuap_unlock_all_ool(void);
+
+static inline void kuap_lock_addr(unsigned long addr, bool ool)
+{
+ if (likely(addr != KUAP_ALL))
+ kuap_lock_one(addr);
+ else if (!ool)
+ kuap_lock_all();
+ else
+ kuap_lock_all_ool();
+}
+
+static inline void kuap_unlock(unsigned long addr, bool ool)
+{
+ if (likely(addr != KUAP_ALL))
+ kuap_unlock_one(addr);
+ else if (!ool)
+ kuap_unlock_all();
+ else
+ kuap_unlock_all_ool();
+}
+
+static inline void __kuap_lock(void)
+{
+}
+
+static inline void __kuap_save_and_lock(struct pt_regs *regs)
+{
+ unsigned long kuap = current->thread.kuap;
+
+ regs->kuap = kuap;
+ if (unlikely(kuap == KUAP_NONE))
+ return;
+
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock_addr(kuap, false);
+}
+
+static inline void kuap_user_restore(struct pt_regs *regs)
+{
+}
+
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+{
+ if (unlikely(kuap != KUAP_NONE)) {
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock_addr(kuap, false);
+ }
+
+ if (likely(regs->kuap == KUAP_NONE))
+ return;
+
+ current->thread.kuap = regs->kuap;
+
+ kuap_unlock(regs->kuap, false);
+}
+
+static inline unsigned long __kuap_get_and_assert_locked(void)
+{
+ unsigned long kuap = current->thread.kuap;
+
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
+
+ return kuap;
+}
+
+static __always_inline void __allow_user_access(void __user *to, const void __user *from,
+ u32 size, unsigned long dir)
+{
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
+
+ if (!(dir & KUAP_WRITE))
+ return;
+
+ current->thread.kuap = (__force u32)to;
+ kuap_unlock_one((__force u32)to);
+}
+
+static __always_inline void __prevent_user_access(unsigned long dir)
+{
+ u32 kuap = current->thread.kuap;
+
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
+
+ if (!(dir & KUAP_WRITE))
+ return;
+
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock_addr(kuap, true);
+}
+
+static inline unsigned long __prevent_user_access_return(void)
+{
+ unsigned long flags = current->thread.kuap;
+
+ if (flags != KUAP_NONE) {
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock_addr(flags, true);
+ }
+
+ return flags;
+}
+
+static inline void __restore_user_access(unsigned long flags)
+{
+ if (flags != KUAP_NONE) {
+ current->thread.kuap = flags;
+ kuap_unlock(flags, true);
+ }
+}
+
+static inline bool
+__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ unsigned long kuap = regs->kuap;
+
+ if (!is_write || kuap == KUAP_ALL)
+ return false;
+ if (kuap == KUAP_NONE)
+ return true;
+
+ /* If faulting address doesn't match unlocked segment, unlock all */
+ if ((kuap ^ address) & 0xf0000000)
+ regs->kuap = KUAP_ALL;
+
+ return false;
+}
+
+#endif /* CONFIG_PPC_KUAP */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */
diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
new file mode 100644
index 000000000..78c6a5fde
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
+#define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
+
+/*
+ * 32-bit hash table MMU support
+ */
+
+/*
+ * BATs
+ */
+
+/* Block size masks */
+#define BL_128K 0x000
+#define BL_256K 0x001
+#define BL_512K 0x003
+#define BL_1M 0x007
+#define BL_2M 0x00F
+#define BL_4M 0x01F
+#define BL_8M 0x03F
+#define BL_16M 0x07F
+#define BL_32M 0x0FF
+#define BL_64M 0x1FF
+#define BL_128M 0x3FF
+#define BL_256M 0x7FF
+
+/* BAT Access Protection */
+#define BPP_XX 0x00 /* No access */
+#define BPP_RX 0x01 /* Read only */
+#define BPP_RW 0x02 /* Read/write */
+
+#ifndef __ASSEMBLY__
+/* Contort a phys_addr_t into the right format/bits for a BAT */
+#ifdef CONFIG_PHYS_64BIT
+#define BAT_PHYS_ADDR(x) ((u32)((x & 0x00000000fffe0000ULL) | \
+ ((x & 0x0000000e00000000ULL) >> 24) | \
+ ((x & 0x0000000100000000ULL) >> 30)))
+#define PHYS_BAT_ADDR(x) (((u64)(x) & 0x00000000fffe0000ULL) | \
+ (((u64)(x) << 24) & 0x0000000e00000000ULL) | \
+ (((u64)(x) << 30) & 0x0000000100000000ULL))
+#else
+#define BAT_PHYS_ADDR(x) (x)
+#define PHYS_BAT_ADDR(x) ((x) & 0xfffe0000)
+#endif
+
+struct ppc_bat {
+ u32 batu;
+ u32 batl;
+};
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Hash table
+ */
+
+/* Values for PP (assumes Ks=0, Kp=1) */
+#define PP_RWXX 0 /* Supervisor read/write, User none */
+#define PP_RWRX 1 /* Supervisor read/write, User read */
+#define PP_RWRW 2 /* Supervisor read/write, User read/write */
+#define PP_RXRX 3 /* Supervisor read, User read */
+
+/* Values for Segment Registers */
+#define SR_NX 0x10000000 /* No Execute */
+#define SR_KP 0x20000000 /* User key */
+#define SR_KS 0x40000000 /* Supervisor key */
+
+#ifdef __ASSEMBLY__
+
+#include <asm/asm-offsets.h>
+
+.macro uus_addi sr reg1 reg2 imm
+ .if NUM_USER_SEGMENTS > \sr
+ addi \reg1,\reg2,\imm
+ .endif
+.endm
+
+.macro uus_mtsr sr reg1
+ .if NUM_USER_SEGMENTS > \sr
+ mtsr \sr, \reg1
+ .endif
+.endm
+
+/*
+ * This isync() shouldn't be necessary as the kernel is not excepted to run
+ * any instruction in userspace soon after the update of segments and 'rfi'
+ * instruction is used to return to userspace, but hash based cores
+ * (at least G3) seem to exhibit a random behaviour when the 'isync' is not
+ * there. 603 cores don't have this behaviour so don't do the 'isync' as it
+ * saves several CPU cycles.
+ */
+.macro uus_isync
+#ifdef CONFIG_PPC_BOOK3S_604
+BEGIN_MMU_FTR_SECTION
+ isync
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+#endif
+.endm
+
+.macro update_user_segments_by_4 tmp1 tmp2 tmp3 tmp4
+ uus_addi 1, \tmp2, \tmp1, 0x111
+ uus_addi 2, \tmp3, \tmp1, 0x222
+ uus_addi 3, \tmp4, \tmp1, 0x333
+
+ uus_mtsr 0, \tmp1
+ uus_mtsr 1, \tmp2
+ uus_mtsr 2, \tmp3
+ uus_mtsr 3, \tmp4
+
+ uus_addi 4, \tmp1, \tmp1, 0x444
+ uus_addi 5, \tmp2, \tmp2, 0x444
+ uus_addi 6, \tmp3, \tmp3, 0x444
+ uus_addi 7, \tmp4, \tmp4, 0x444
+
+ uus_mtsr 4, \tmp1
+ uus_mtsr 5, \tmp2
+ uus_mtsr 6, \tmp3
+ uus_mtsr 7, \tmp4
+
+ uus_addi 8, \tmp1, \tmp1, 0x444
+ uus_addi 9, \tmp2, \tmp2, 0x444
+ uus_addi 10, \tmp3, \tmp3, 0x444
+ uus_addi 11, \tmp4, \tmp4, 0x444
+
+ uus_mtsr 8, \tmp1
+ uus_mtsr 9, \tmp2
+ uus_mtsr 10, \tmp3
+ uus_mtsr 11, \tmp4
+
+ uus_addi 12, \tmp1, \tmp1, 0x444
+ uus_addi 13, \tmp2, \tmp2, 0x444
+ uus_addi 14, \tmp3, \tmp3, 0x444
+ uus_addi 15, \tmp4, \tmp4, 0x444
+
+ uus_mtsr 12, \tmp1
+ uus_mtsr 13, \tmp2
+ uus_mtsr 14, \tmp3
+ uus_mtsr 15, \tmp4
+
+ uus_isync
+.endm
+
+#else
+
+/*
+ * This macro defines the mapping from contexts to VSIDs (virtual
+ * segment IDs). We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table. Note, if this
+ * function is changed then hash functions will have to be
+ * changed to correspond.
+ */
+#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
+
+/*
+ * Hardware Page Table Entry
+ * Note that the xpn and x bitfields are used only by processors that
+ * support extended addressing; otherwise, those bits are reserved.
+ */
+struct hash_pte {
+ unsigned long v:1; /* Entry is valid */
+ unsigned long vsid:24; /* Virtual segment identifier */
+ unsigned long h:1; /* Hash algorithm indicator */
+ unsigned long api:6; /* Abbreviated page index */
+ unsigned long rpn:20; /* Real (physical) page number */
+ unsigned long xpn:3; /* Real page number bits 0-2, optional */
+ unsigned long r:1; /* Referenced */
+ unsigned long c:1; /* Changed */
+ unsigned long w:1; /* Write-thru cache mode */
+ unsigned long i:1; /* Cache inhibited */
+ unsigned long m:1; /* Memory coherence */
+ unsigned long g:1; /* Guarded */
+ unsigned long x:1; /* Real page number bit 3, optional */
+ unsigned long pp:2; /* Page protection */
+};
+
+typedef struct {
+ unsigned long id;
+ unsigned long sr0;
+ void __user *vdso;
+} mm_context_t;
+
+#ifdef CONFIG_PPC_KUEP
+#define INIT_MM_CONTEXT(mm) .context.sr0 = SR_NX
+#endif
+
+void update_bats(void);
+static inline void cleanup_cpu_mmu_context(void) { }
+
+/* patch sites */
+extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2;
+extern s32 patch__hash_page_B, patch__hash_page_C;
+extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
+extern s32 patch__flush_hash_B;
+
+#include <asm/reg.h>
+#include <asm/task_size_32.h>
+
+static __always_inline void update_user_segment(u32 n, u32 val)
+{
+ if (n << 28 < TASK_SIZE)
+ mtsr(val + n * 0x111, n << 28);
+}
+
+static __always_inline void update_user_segments(u32 val)
+{
+ val &= 0xf0ffffff;
+
+ update_user_segment(0, val);
+ update_user_segment(1, val);
+ update_user_segment(2, val);
+ update_user_segment(3, val);
+ update_user_segment(4, val);
+ update_user_segment(5, val);
+ update_user_segment(6, val);
+ update_user_segment(7, val);
+ update_user_segment(8, val);
+ update_user_segment(9, val);
+ update_user_segment(10, val);
+ update_user_segment(11, val);
+ update_user_segment(12, val);
+ update_user_segment(13, val);
+ update_user_segment(14, val);
+ update_user_segment(15, val);
+}
+
+int __init find_free_bat(void);
+unsigned int bat_block_size(unsigned long base, unsigned long top);
+#endif /* !__ASSEMBLY__ */
+
+/* We happily ignore the smaller BATs on 601, we don't actually use
+ * those definitions on hash32 at the moment anyway
+ */
+#define mmu_virtual_psize MMU_PAGE_4K
+#define mmu_linear_psize MMU_PAGE_256M
+
+#endif /* _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ */
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
new file mode 100644
index 000000000..dc5c039eb
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_32_PGALLOC_H
+#define _ASM_POWERPC_BOOK3S_32_PGALLOC_H
+
+#include <linux/threads.h>
+#include <linux/slab.h>
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
+
+/*
+ * We don't have any real pmd's, and this code never triggers because
+ * the pgd will always be present..
+ */
+/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
+#define pmd_free(mm, x) do { } while (0)
+#define __pmd_free_tlb(tlb,x,a) do { } while (0)
+/* #define pgd_populate(mm, pmd, pte) BUG() */
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
+ pte_t *pte)
+{
+ *pmdp = __pmd(__pa(pte) | _PMD_PRESENT);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pte_page)
+{
+ *pmdp = __pmd(__pa(pte_page) | _PMD_PRESENT);
+}
+
+static inline void pgtable_free(void *table, unsigned index_size)
+{
+ if (!index_size) {
+ pte_fragment_free((unsigned long *)table, 0);
+ } else {
+ BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(index_size), table);
+ }
+}
+
+#define get_hugepd_cache_index(x) (x)
+
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ unsigned long pgf = (unsigned long)table;
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf |= shift;
+ tlb_remove_table(tlb, (void *)pgf);
+}
+
+static inline void __tlb_remove_table(void *_table)
+{
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+ pgtable_free(table, shift);
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ pgtable_free_tlb(tlb, table, 0);
+}
+#endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
new file mode 100644
index 000000000..75823f39e
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -0,0 +1,620 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
+#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+/*
+ * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
+ * table containing PTEs, together with a set of 16 segment registers,
+ * to define the virtual to physical address mapping.
+ *
+ * We use the hash table as an extended TLB, i.e. a cache of currently
+ * active mappings. We maintain a two-level page table tree, much
+ * like that used by the i386, for the sake of the Linux memory
+ * management code. Low-level assembler code in hash_low_32.S
+ * (procedure hash_page) is responsible for extracting ptes from the
+ * tree and putting them into the hash table when necessary, and
+ * updating the accessed and modified bits in the page table tree.
+ */
+
+#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
+#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
+#define _PAGE_USER 0x004 /* usermode access allowed */
+#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
+#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+#define _PAGE_DIRTY 0x080 /* C: page changed */
+#define _PAGE_ACCESSED 0x100 /* R: page referenced */
+#define _PAGE_EXEC 0x200 /* software: exec allowed */
+#define _PAGE_RW 0x400 /* software: user write access allowed */
+#define _PAGE_SPECIAL 0x800 /* software: Special page */
+
+#ifdef CONFIG_PTE_64BIT
+/* We never clear the high word of the pte */
+#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
+#else
+#define _PTE_NONE_MASK _PAGE_HASHPTE
+#endif
+
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+
+/* And here we include common definitions */
+
+#define _PAGE_KERNEL_RO 0
+#define _PAGE_KERNEL_ROX (_PAGE_EXEC)
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
+#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
+
+#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
+
+#ifndef __ASSEMBLY__
+
+static inline bool pte_user(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_USER;
+}
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Location of the PFN in the PTE. Most 32-bit platforms use the same
+ * as _PAGE_SHIFT here (ie, naturally aligned).
+ * Platform who don't just pre-define the value so we don't override it here.
+ */
+#define PTE_RPN_SHIFT (PAGE_SHIFT)
+
+/*
+ * The mask covered by the RPN must be a ULL on 32-bit platforms with
+ * 64-bit PTEs.
+ */
+#ifdef CONFIG_PTE_64BIT
+#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 36
+#else
+#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#endif
+
+/*
+ * _PAGE_CHG_MASK masks of bits that are to be preserved across
+ * pgprot changes.
+ */
+#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
+ _PAGE_ACCESSED | _PAGE_SPECIAL)
+
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
+
+/*
+ * Permission masks used to generate the __P and __S table.
+ *
+ * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
+ *
+ * Write permissions imply read permissions for now.
+ */
+#define PAGE_NONE __pgprot(_PAGE_BASE)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
+/* Permission masks used for kernel mappings */
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
+
+#define PTE_INDEX_SIZE PTE_SHIFT
+#define PMD_INDEX_SIZE 0
+#define PUD_INDEX_SIZE 0
+#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
+
+#define PMD_CACHE_INDEX PMD_INDEX_SIZE
+#define PUD_CACHE_INDEX PUD_INDEX_SIZE
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE 0
+#define PUD_TABLE_SIZE 0
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
+#endif /* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+
+/*
+ * The normal case is that PTEs are 32-bits and we have a 1-page
+ * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
+ *
+ * For any >32-bit physical address platform, we can use the following
+ * two level page table layout where the pgdir is 8KB and the MS 13 bits
+ * are an index to the second level table. The combined pgdir/pmd first
+ * level has 2048 entries and the second level has 512 64-bit PTE entries.
+ * -Matt
+ */
+/* PGDIR_SHIFT determines what a top-level page table entry can map */
+#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+#ifndef __ASSEMBLY__
+
+int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
+void unmap_kernel_page(unsigned long va);
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
+ * value (for now) on others, from where we can start layout kernel
+ * virtual space that goes below PKMAP and FIXMAP
+ */
+#include <asm/fixmap.h>
+
+/*
+ * ioremap_bot starts at that address. Early ioremaps move down from there,
+ * until mem_init() at which point this becomes the top of the vmalloc
+ * and ioremap space
+ */
+#ifdef CONFIG_HIGHMEM
+#define IOREMAP_TOP PKMAP_BASE
+#else
+#define IOREMAP_TOP FIXADDR_START
+#endif
+
+/* PPC32 shares vmalloc area with ioremap */
+#define IOREMAP_START VMALLOC_START
+#define IOREMAP_END VMALLOC_END
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 16MB value just means that there will be a 64MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * We no longer map larger than phys RAM with the BATs so we don't have
+ * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
+ * about clashes between our early calls to ioremap() that start growing down
+ * from ioremap_base being run into the VM area allocations (growing upwards
+ * from VMALLOC_START). For this reason we have ioremap_bot to check when
+ * we actually run into our mappings setup in the early boot with the VM
+ * system. This really does become a problem for machines with good amounts
+ * of RAM. -- Cort
+ */
+#define VMALLOC_OFFSET (0x1000000) /* 16M */
+
+#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+
+#ifdef CONFIG_KASAN_VMALLOC
+#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
+#define VMALLOC_END ioremap_bot
+#endif
+
+#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
+#define MODULES_VADDR (MODULES_END - SZ_256M)
+
+#ifndef __ASSEMBLY__
+#include <linux/sched.h>
+#include <linux/threads.h>
+
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS 0
+
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
+ (unsigned long long)pte_val(e))
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+/*
+ * Bits in a linux-style PTE. These match the bits in the
+ * (hardware-defined) PowerPC PTE as closely as possible.
+ */
+
+#define pte_clear(mm, addr, ptep) \
+ do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
+#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ *pmdp = __pmd(0);
+}
+
+
+/*
+ * When flushing the tlb entry for a page, we also need to flush the hash
+ * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
+ */
+extern int flush_hash_pages(unsigned context, unsigned long va,
+ unsigned long pmdval, int count);
+
+/* Add an HPTE to the hash table */
+extern void add_hash_page(unsigned context, unsigned long va,
+ unsigned long pmdval);
+
+/* Flush an entry from the TLB/hash table */
+static inline void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
+{
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+ unsigned long ptephys = __pa(ptep) & PAGE_MASK;
+
+ flush_hash_pages(mm->context.id, addr, ptephys, 1);
+ }
+}
+
+/*
+ * PTE updates. This function is called whenever an existing
+ * valid PTE is updated. This does -not- include set_pte_at()
+ * which nowadays only sets a new PTE.
+ *
+ * Depending on the type of MMU, we may need to use atomic updates
+ * and the PTE may be either 32 or 64 bit wide. In the later case,
+ * when using atomic updates, only the low part of the PTE is
+ * accessed atomically.
+ */
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
+{
+ pte_basic_t old;
+
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+ unsigned long tmp;
+
+ asm volatile(
+#ifndef CONFIG_PTE_64BIT
+ "1: lwarx %0, 0, %3\n"
+ " andc %1, %0, %4\n"
+#else
+ "1: lwarx %L0, 0, %3\n"
+ " lwz %0, -4(%3)\n"
+ " andc %1, %L0, %4\n"
+#endif
+ " or %1, %1, %5\n"
+ " stwcx. %1, 0, %3\n"
+ " bne- 1b"
+ : "=&r" (old), "=&r" (tmp), "=m" (*p)
+#ifndef CONFIG_PTE_64BIT
+ : "r" (p),
+#else
+ : "b" ((unsigned long)(p) + 4),
+#endif
+ "r" (clr), "r" (set), "m" (*p)
+ : "cc" );
+ } else {
+ old = pte_val(*p);
+
+ *p = __pte((old & ~(pte_basic_t)clr) | set);
+ }
+
+ return old;
+}
+
+/*
+ * 2.6 calls this without flushing the TLB entry; this is wrong
+ * for our hash-based implementation, we fix that up here.
+ */
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old;
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+ if (old & _PAGE_HASHPTE)
+ flush_hash_entry(mm, ptep, addr);
+
+ return (old & _PAGE_ACCESSED) != 0;
+}
+#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
+ __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
+}
+
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
+ pte_t *ptep, pte_t entry,
+ unsigned long address,
+ int psize)
+{
+ unsigned long set = pte_val(entry) &
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+
+ pte_update(vma->vm_mm, address, ptep, 0, set, 0);
+
+ flush_tlb_page(vma, address);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
+
+#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
+#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
+
+/*
+ * Encode and decode a swap entry.
+ * Note that the bits we use in a PTE for representing a swap entry
+ * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
+ * -- paulus
+ */
+#define __swp_type(entry) ((entry).val & 0x1f)
+#define __swp_offset(entry) ((entry).val >> 5)
+#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
+
+/* Generic accessors to PTE bits */
+static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
+static inline int pte_read(pte_t pte) { return 1; }
+static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
+static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
+static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
+static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
+
+static inline int pte_present(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_PRESENT;
+}
+
+static inline bool pte_hw_valid(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_PRESENT;
+}
+
+static inline bool pte_hashpte(pte_t pte)
+{
+ return !!(pte_val(pte) & _PAGE_HASHPTE);
+}
+
+static inline bool pte_ci(pte_t pte)
+{
+ return !!(pte_val(pte) & _PAGE_NO_CACHE);
+}
+
+/*
+ * We only find page table entry in the last level
+ * Hence no need for other accessors
+ */
+#define pte_access_permitted pte_access_permitted
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ /*
+ * A read-only access is controlled by _PAGE_USER bit.
+ * We have _PAGE_READ set for WRITE and EXECUTE
+ */
+ if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
+ return false;
+
+ if (write && !pte_write(pte))
+ return false;
+
+ return true;
+}
+
+/* Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * Even if PTEs can be unsigned long long, a PFN is always an unsigned
+ * long for now.
+ */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+ return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
+ pgprot_val(pgprot));
+}
+
+static inline unsigned long pte_pfn(pte_t pte)
+{
+ return pte_val(pte) >> PTE_RPN_SHIFT;
+}
+
+/* Generic modifiers for PTE bits */
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_RW);
+}
+
+static inline pte_t pte_exprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_EXEC);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_EXEC);
+}
+
+static inline pte_t pte_mkpte(pte_t pte)
+{
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_RW);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ return pte;
+}
+
+static inline pte_t pte_mkprivileged(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_USER);
+}
+
+static inline pte_t pte_mkuser(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_USER);
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+
+
+/* This low level function performs the actual PTE insertion
+ * Setting the PTE depends on the MMU type and other factors. It's
+ * an horrible mess that I'm not going to try to clean up now but
+ * I'm keeping it in one place rather than spread around
+ */
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, int percpu)
+{
+#if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
+ /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
+ * helper pte_update() which does an atomic update. We need to do that
+ * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
+ * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
+ * the hash bits instead (ie, same as the non-SMP case)
+ */
+ if (percpu)
+ *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+ | (pte_val(pte) & ~_PAGE_HASHPTE));
+ else
+ pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
+
+#elif defined(CONFIG_PTE_64BIT)
+ /* Second case is 32-bit with 64-bit PTE. In this case, we
+ * can just store as long as we do the two halves in the right order
+ * with a barrier in between. This is possible because we take care,
+ * in the hash code, to pre-invalidate if the PTE was already hashed,
+ * which synchronizes us with any concurrent invalidation.
+ * In the percpu case, we also fallback to the simple update preserving
+ * the hash bits
+ */
+ if (percpu) {
+ *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+ | (pte_val(pte) & ~_PAGE_HASHPTE));
+ return;
+ }
+ if (pte_val(*ptep) & _PAGE_HASHPTE)
+ flush_hash_entry(mm, ptep, addr);
+ __asm__ __volatile__("\
+ stw%X0 %2,%0\n\
+ eieio\n\
+ stw%X1 %L2,%1"
+ : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+ : "r" (pte) : "memory");
+
+#else
+ /* Third case is 32-bit hash table in UP mode, we need to preserve
+ * the _PAGE_HASHPTE bit since we may not have invalidated the previous
+ * translation in the hash yet (done in a subsequent flush_tlb_xxx())
+ * and see we need to keep track that this PTE needs invalidating
+ */
+ *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+ | (pte_val(pte) & ~_PAGE_HASHPTE));
+#endif
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+
+#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
+ _PAGE_WRITETHRU)
+
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t prot)
+{
+ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+ _PAGE_NO_CACHE | _PAGE_GUARDED);
+}
+
+#define pgprot_noncached_wc pgprot_noncached_wc
+static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
+{
+ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+ _PAGE_NO_CACHE);
+}
+
+#define pgprot_cached pgprot_cached
+static inline pgprot_t pgprot_cached(pgprot_t prot)
+{
+ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+ _PAGE_COHERENT);
+}
+
+#define pgprot_cached_wthru pgprot_cached_wthru
+static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
+{
+ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+ _PAGE_COHERENT | _PAGE_WRITETHRU);
+}
+
+#define pgprot_cached_noncoherent pgprot_cached_noncoherent
+static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
+{
+ return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t prot)
+{
+ return pgprot_noncached_wc(prot);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h
new file mode 100644
index 000000000..ba1743c52
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H
+#define _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H
+
+#define MMU_NO_CONTEXT (0)
+/*
+ * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
+ */
+void hash__flush_tlb_mm(struct mm_struct *mm);
+void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+void hash__flush_range(struct mm_struct *mm, unsigned long start, unsigned long end);
+
+#ifdef CONFIG_SMP
+void _tlbie(unsigned long address);
+#else
+static inline void _tlbie(unsigned long address)
+{
+ asm volatile ("tlbie %0; sync" : : "r" (address) : "memory");
+}
+#endif
+void _tlbia(void);
+
+/*
+ * Called at the end of a mmu_gather operation to make sure the
+ * TLB flush is completely done.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ /* 603 needs to flush the whole TLB here since it doesn't use a hash table. */
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ _tlbia();
+}
+
+static inline void flush_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ start &= PAGE_MASK;
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ hash__flush_range(mm, start, end);
+ else if (end - start <= PAGE_SIZE)
+ _tlbie(start);
+ else
+ _tlbia();
+}
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ hash__flush_tlb_mm(mm);
+ else
+ _tlbia();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ hash__flush_tlb_page(vma, vmaddr);
+ else
+ _tlbie(vmaddr);
+}
+
+static inline void
+flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ flush_range(vma->vm_mm, start, end);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ flush_range(&init_mm, start, end);
+}
+
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ flush_tlb_page(vma, vmaddr);
+}
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ flush_tlb_mm(mm);
+}
+
+#endif /* _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
new file mode 100644
index 000000000..b6ac4f86c
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_HASH_4K_H
+#define _ASM_POWERPC_BOOK3S_64_HASH_4K_H
+
+#define H_PTE_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 4KB = 2MB
+#define H_PMD_INDEX_SIZE 7 // size: 8B << 7 = 1KB, maps: 2^7 x 2MB = 256MB
+#define H_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 256MB = 128GB
+#define H_PGD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 128GB = 64TB
+
+/*
+ * Each context is 512TB. But on 4k we restrict our max TASK size to 64TB
+ * Hence also limit max EA bits to 64TB.
+ */
+#define MAX_EA_BITS_PER_CONTEXT 46
+
+
+/*
+ * Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB
+ * of vmemmap space. To better support sparse memory layout, we use 61TB
+ * linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap.
+ */
+#define REGION_SHIFT (40)
+#define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT)
+
+/*
+ * Limits the linear mapping range
+ */
+#define H_MAX_PHYSMEM_BITS 46
+
+/*
+ * Define the address range of the kernel non-linear virtual area (61TB)
+ */
+#define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000)
+
+#ifndef __ASSEMBLY__
+#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
+#define H_PMD_TABLE_SIZE (sizeof(pmd_t) << H_PMD_INDEX_SIZE)
+#define H_PUD_TABLE_SIZE (sizeof(pud_t) << H_PUD_INDEX_SIZE)
+#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << H_PGD_INDEX_SIZE)
+
+#define H_PAGE_F_GIX_SHIFT _PAGE_PA_MAX
+#define H_PAGE_F_SECOND _RPAGE_PKEY_BIT0 /* HPTE is in 2ndary HPTEG */
+#define H_PAGE_F_GIX (_RPAGE_RPN43 | _RPAGE_RPN42 | _RPAGE_RPN41)
+#define H_PAGE_BUSY _RPAGE_RSV1
+#define H_PAGE_HASHPTE _RPAGE_PKEY_BIT4
+
+/* PTE flags to conserve for HPTE identification */
+#define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | \
+ H_PAGE_F_SECOND | H_PAGE_F_GIX)
+/*
+ * Not supported by 4k linux page size
+ */
+#define H_PAGE_4K_PFN 0x0
+#define H_PAGE_THP_HUGE 0x0
+#define H_PAGE_COMBO 0x0
+
+/* 8 bytes per each pte entry */
+#define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3)
+#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
+#define H_PMD_FRAG_SIZE_SHIFT (H_PMD_INDEX_SIZE + 3)
+#define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
+
+/* memory key bits, only 8 keys supported */
+#define H_PTE_PKEY_BIT4 0
+#define H_PTE_PKEY_BIT3 0
+#define H_PTE_PKEY_BIT2 _RPAGE_PKEY_BIT3
+#define H_PTE_PKEY_BIT1 _RPAGE_PKEY_BIT2
+#define H_PTE_PKEY_BIT0 _RPAGE_PKEY_BIT1
+
+
+/*
+ * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range()
+ */
+#define remap_4k_pfn(vma, addr, pfn, prot) \
+ remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
+
+#ifdef CONFIG_HUGETLB_PAGE
+static inline int hash__hugepd_ok(hugepd_t hpd)
+{
+ unsigned long hpdval = hpd_val(hpd);
+ /*
+ * if it is not a pte and have hugepd shift mask
+ * set, then it is a hugepd directory pointer
+ */
+ if (!(hpdval & _PAGE_PTE) && (hpdval & _PAGE_PRESENT) &&
+ ((hpdval & HUGEPD_SHIFT_MASK) != 0))
+ return true;
+ return false;
+}
+#endif
+
+/*
+ * 4K PTE format is different from 64K PTE format. Saving the hash_slot is just
+ * a matter of returning the PTE bits that need to be modified. On 64K PTE,
+ * things are a little more involved and hence needs many more parameters to
+ * accomplish the same. However we want to abstract this out from the caller by
+ * keeping the prototype consistent across the two formats.
+ */
+static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
+ unsigned int subpg_index, unsigned long hidx,
+ int offset)
+{
+ return (hidx << H_PAGE_F_GIX_SHIFT) &
+ (H_PAGE_F_SECOND | H_PAGE_F_GIX);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+static inline char *get_hpte_slot_array(pmd_t *pmdp)
+{
+ BUG();
+ return NULL;
+}
+
+static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
+{
+ BUG();
+ return 0;
+}
+
+static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
+ int index)
+{
+ BUG();
+ return 0;
+}
+
+static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
+ unsigned int index, unsigned int hidx)
+{
+ BUG();
+}
+
+static inline int hash__pmd_trans_huge(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+ BUG();
+ return 0;
+}
+
+static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
+{
+ BUG();
+ return pmd;
+}
+
+extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp,
+ unsigned long clr, unsigned long set);
+extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
+extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp);
+extern int hash__has_transparent_hugepage(void);
+#endif
+
+static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
+{
+ BUG();
+ return pmd;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
new file mode 100644
index 000000000..338e62fbe
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
+#define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
+
+#define H_PTE_INDEX_SIZE 8 // size: 8B << 8 = 2KB, maps 2^8 x 64KB = 16MB
+#define H_PMD_INDEX_SIZE 10 // size: 8B << 10 = 8KB, maps 2^10 x 16MB = 16GB
+#define H_PUD_INDEX_SIZE 10 // size: 8B << 10 = 8KB, maps 2^10 x 16GB = 16TB
+#define H_PGD_INDEX_SIZE 8 // size: 8B << 8 = 2KB, maps 2^8 x 16TB = 4PB
+
+/*
+ * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
+ * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
+ * page_to_nid does a page->section->node lookup
+ * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
+ * memory requirements with large number of sections.
+ * 51 bits is the max physical real address on POWER9
+ */
+#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME)
+#define H_MAX_PHYSMEM_BITS 51
+#else
+#define H_MAX_PHYSMEM_BITS 46
+#endif
+
+/*
+ * Each context is 512TB size. SLB miss for first context/default context
+ * is handled in the hotpath.
+ */
+#define MAX_EA_BITS_PER_CONTEXT 49
+#define REGION_SHIFT MAX_EA_BITS_PER_CONTEXT
+
+/*
+ * We use one context for each MAP area.
+ */
+#define H_KERN_MAP_SIZE (1UL << MAX_EA_BITS_PER_CONTEXT)
+
+/*
+ * Define the address range of the kernel non-linear virtual area
+ * 2PB
+ */
+#define H_KERN_VIRT_START ASM_CONST(0xc008000000000000)
+
+/*
+ * 64k aligned address free up few of the lower bits of RPN for us
+ * We steal that here. For more deatils look at pte_pfn/pfn_pte()
+ */
+#define H_PAGE_COMBO _RPAGE_RPN0 /* this is a combo 4k page */
+#define H_PAGE_4K_PFN _RPAGE_RPN1 /* PFN is for a single 4k page */
+#define H_PAGE_BUSY _RPAGE_RSV1 /* software: PTE & hash are busy */
+#define H_PAGE_HASHPTE _RPAGE_RPN43 /* PTE has associated HPTE */
+
+/* memory key bits. */
+#define H_PTE_PKEY_BIT4 _RPAGE_PKEY_BIT4
+#define H_PTE_PKEY_BIT3 _RPAGE_PKEY_BIT3
+#define H_PTE_PKEY_BIT2 _RPAGE_PKEY_BIT2
+#define H_PTE_PKEY_BIT1 _RPAGE_PKEY_BIT1
+#define H_PTE_PKEY_BIT0 _RPAGE_PKEY_BIT0
+
+/*
+ * We need to differentiate between explicit huge page and THP huge
+ * page, since THP huge page also need to track real subpage details
+ */
+#define H_PAGE_THP_HUGE H_PAGE_4K_PFN
+
+/* PTE flags to conserve for HPTE identification */
+#define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | H_PAGE_COMBO)
+/*
+ * We use a 2K PTE page fragment and another 2K for storing
+ * real_pte_t hash index
+ * 8 bytes per each pte entry and another 8 bytes for storing
+ * slot details.
+ */
+#define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3 + 1)
+#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
+#define H_PMD_FRAG_SIZE_SHIFT (H_PMD_INDEX_SIZE + 3 + 1)
+#else
+#define H_PMD_FRAG_SIZE_SHIFT (H_PMD_INDEX_SIZE + 3)
+#endif
+#define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
+
+#ifndef __ASSEMBLY__
+#include <asm/errno.h>
+
+/*
+ * With 64K pages on hash table, we have a special PTE format that
+ * uses a second "half" of the page table to encode sub-page information
+ * in order to deal with 64K made of 4K HW pages. Thus we override the
+ * generic accessors and iterators here
+ */
+#define __real_pte __real_pte
+static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
+{
+ real_pte_t rpte;
+ unsigned long *hidxp;
+
+ rpte.pte = pte;
+
+ /*
+ * Ensure that we do not read the hidx before we read the PTE. Because
+ * the writer side is expected to finish writing the hidx first followed
+ * by the PTE, by using smp_wmb(). pte_set_hash_slot() ensures that.
+ */
+ smp_rmb();
+
+ hidxp = (unsigned long *)(ptep + offset);
+ rpte.hidx = *hidxp;
+ return rpte;
+}
+
+/*
+ * shift the hidx representation by one-modulo-0xf; i.e hidx 0 is respresented
+ * as 1, 1 as 2,... , and 0xf as 0. This convention lets us represent a
+ * invalid hidx 0xf with a 0x0 bit value. PTEs are anyway zero'd when
+ * allocated. We dont have to zero them gain; thus save on the initialization.
+ */
+#define HIDX_UNSHIFT_BY_ONE(x) ((x + 0xfUL) & 0xfUL) /* shift backward by one */
+#define HIDX_SHIFT_BY_ONE(x) ((x + 0x1UL) & 0xfUL) /* shift forward by one */
+#define HIDX_BITS(x, index) (x << (index << 2))
+#define BITS_TO_HIDX(x, index) ((x >> (index << 2)) & 0xfUL)
+#define INVALID_RPTE_HIDX 0x0UL
+
+static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
+{
+ return HIDX_UNSHIFT_BY_ONE(BITS_TO_HIDX(rpte.hidx, index));
+}
+
+/*
+ * Commit the hidx and return PTE bits that needs to be modified. The caller is
+ * expected to modify the PTE bits accordingly and commit the PTE to memory.
+ */
+static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
+ unsigned int subpg_index,
+ unsigned long hidx, int offset)
+{
+ unsigned long *hidxp = (unsigned long *)(ptep + offset);
+
+ rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index);
+ *hidxp = rpte.hidx | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index);
+
+ /*
+ * Anyone reading PTE must ensure hidx bits are read after reading the
+ * PTE by using the read-side barrier smp_rmb(). __real_pte() can be
+ * used for that.
+ */
+ smp_wmb();
+
+ /* No PTE bits to be modified, return 0x0UL */
+ return 0x0UL;
+}
+
+#define __rpte_to_pte(r) ((r).pte)
+extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
+/*
+ * Trick: we set __end to va + 64k, which happens works for
+ * a 16M page as well as we want only one iteration
+ */
+#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \
+ do { \
+ unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \
+ unsigned __split = (psize == MMU_PAGE_4K || \
+ psize == MMU_PAGE_64K_AP); \
+ shift = mmu_psize_defs[psize].shift; \
+ for (index = 0; vpn < __end; index++, \
+ vpn += (1L << (shift - VPN_SHIFT))) { \
+ if (!__split || __rpte_sub_valid(rpte, index))
+
+#define pte_iterate_hashed_end() } } while(0)
+
+#define pte_pagesize_index(mm, addr, pte) \
+ (((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
+
+extern int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t);
+static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t prot)
+{
+ if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) {
+ WARN(1, "remap_4k_pfn called with wrong pfn value\n");
+ return -EINVAL;
+ }
+ return remap_pfn_range(vma, addr, pfn, PAGE_SIZE,
+ __pgprot(pgprot_val(prot) | H_PAGE_4K_PFN));
+}
+
+#define H_PTE_TABLE_SIZE PTE_FRAG_SIZE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE)
+#define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \
+ (sizeof(unsigned long) << PMD_INDEX_SIZE))
+#else
+#define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
+#endif
+#ifdef CONFIG_HUGETLB_PAGE
+#define H_PUD_TABLE_SIZE ((sizeof(pud_t) << PUD_INDEX_SIZE) + \
+ (sizeof(unsigned long) << PUD_INDEX_SIZE))
+#else
+#define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
+#endif
+#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline char *get_hpte_slot_array(pmd_t *pmdp)
+{
+ /*
+ * The hpte hindex is stored in the pgtable whose address is in the
+ * second half of the PMD
+ *
+ * Order this load with the test for pmd_trans_huge in the caller
+ */
+ smp_rmb();
+ return *(char **)(pmdp + PTRS_PER_PMD);
+
+
+}
+/*
+ * The linux hugepage PMD now include the pmd entries followed by the address
+ * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
+ * [ 000 | 1 bit secondary | 3 bit hidx | 1 bit valid]. We use one byte per
+ * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and
+ * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
+ *
+ * The top three bits are intentionally left as zero. This memory location
+ * are also used as normal page PTE pointers. So if we have any pointers
+ * left around while we collapse a hugepage, we need to make sure
+ * _PAGE_PRESENT bit of that is zero when we look at them
+ */
+static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
+{
+ return hpte_slot_array[index] & 0x1;
+}
+
+static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
+ int index)
+{
+ return hpte_slot_array[index] >> 1;
+}
+
+static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
+ unsigned int index, unsigned int hidx)
+{
+ hpte_slot_array[index] = (hidx << 1) | 0x1;
+}
+
+/*
+ *
+ * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs
+ * page. The hugetlbfs page table walking and mangling paths are totally
+ * separated form the core VM paths and they're differentiated by
+ * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run.
+ *
+ * pmd_trans_huge() is defined as false at build time if
+ * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build
+ * time in such case.
+ *
+ * For ppc64 we need to differntiate from explicit hugepages from THP, because
+ * for THP we also track the subpage details at the pmd level. We don't do
+ * that for explicit huge pages.
+ *
+ */
+static inline int hash__pmd_trans_huge(pmd_t pmd)
+{
+ return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) ==
+ (_PAGE_PTE | H_PAGE_THP_HUGE));
+}
+
+static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+ return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
+}
+
+static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE));
+}
+
+extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp,
+ unsigned long clr, unsigned long set);
+extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
+extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp);
+extern int hash__has_transparent_hugepage(void);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP));
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-pkey.h b/arch/powerpc/include/asm/book3s/64/hash-pkey.h
new file mode 100644
index 000000000..f1e60d579
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/hash-pkey.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_HASH_PKEY_H
+#define _ASM_POWERPC_BOOK3S_64_HASH_PKEY_H
+
+/* We use key 3 for KERNEL */
+#define HASH_DEFAULT_KERNEL_KEY (HPTE_R_KEY_BIT0 | HPTE_R_KEY_BIT1)
+
+static inline u64 hash__vmflag_to_pte_pkey_bits(u64 vm_flags)
+{
+ return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT0 : 0x0UL) |
+ ((vm_flags & VM_PKEY_BIT1) ? H_PTE_PKEY_BIT1 : 0x0UL) |
+ ((vm_flags & VM_PKEY_BIT2) ? H_PTE_PKEY_BIT2 : 0x0UL) |
+ ((vm_flags & VM_PKEY_BIT3) ? H_PTE_PKEY_BIT3 : 0x0UL) |
+ ((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT4 : 0x0UL));
+}
+
+static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags)
+{
+ unsigned long pte_pkey;
+
+ pte_pkey = (((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT3) ? HPTE_R_KEY_BIT3 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL));
+
+ if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP) ||
+ mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
+ if ((pte_pkey == 0) && (flags & HPTE_USE_KERNEL_KEY))
+ return HASH_DEFAULT_KERNEL_KEY;
+ }
+
+ return pte_pkey;
+}
+
+static inline u16 hash__pte_to_pkey_bits(u64 pteflags)
+{
+ return (((pteflags & H_PTE_PKEY_BIT4) ? 0x10 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT3) ? 0x8 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT2) ? 0x4 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT1) ? 0x2 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT0) ? 0x1 : 0x0UL));
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
new file mode 100644
index 000000000..17e7a778c
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
+#define _ASM_POWERPC_BOOK3S_64_HASH_H
+#ifdef __KERNEL__
+
+#include <asm/asm-const.h>
+
+/*
+ * Common bits between 4K and 64K pages in a linux-style PTE.
+ * Additional bits may be defined in pgtable-hash64-*.h
+ *
+ */
+#define H_PTE_NONE_MASK _PAGE_HPTEFLAGS
+
+#ifdef CONFIG_PPC_64K_PAGES
+#include <asm/book3s/64/hash-64k.h>
+#else
+#include <asm/book3s/64/hash-4k.h>
+#endif
+
+#define H_PTRS_PER_PTE (1 << H_PTE_INDEX_SIZE)
+#define H_PTRS_PER_PMD (1 << H_PMD_INDEX_SIZE)
+#define H_PTRS_PER_PUD (1 << H_PUD_INDEX_SIZE)
+
+/* Bits to set in a PMD/PUD/PGD entry valid bit*/
+#define HASH_PMD_VAL_BITS (0x8000000000000000UL)
+#define HASH_PUD_VAL_BITS (0x8000000000000000UL)
+#define HASH_PGD_VAL_BITS (0x8000000000000000UL)
+
+/*
+ * Size of EA range mapped by our pagetables.
+ */
+#define H_PGTABLE_EADDR_SIZE (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
+ H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
+#define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
+/*
+ * Top 2 bits are ignored in page table walk.
+ */
+#define EA_MASK (~(0xcUL << 60))
+
+/*
+ * We store the slot details in the second half of page table.
+ * Increase the pud level table so that hugetlb ptes can be stored
+ * at pud level.
+ */
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES)
+#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE + 1)
+#else
+#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE)
+#endif
+
+/*
+ * +------------------------------+
+ * | |
+ * | |
+ * | |
+ * +------------------------------+ Kernel virtual map end (0xc00e000000000000)
+ * | |
+ * | |
+ * | 512TB/16TB of vmemmap |
+ * | |
+ * | |
+ * +------------------------------+ Kernel vmemmap start
+ * | |
+ * | 512TB/16TB of IO map |
+ * | |
+ * +------------------------------+ Kernel IO map start
+ * | |
+ * | 512TB/16TB of vmap |
+ * | |
+ * +------------------------------+ Kernel virt start (0xc008000000000000)
+ * | |
+ * | |
+ * | |
+ * +------------------------------+ Kernel linear (0xc.....)
+ */
+
+#define H_VMALLOC_START H_KERN_VIRT_START
+#define H_VMALLOC_SIZE H_KERN_MAP_SIZE
+#define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE)
+
+#define H_KERN_IO_START H_VMALLOC_END
+#define H_KERN_IO_SIZE H_KERN_MAP_SIZE
+#define H_KERN_IO_END (H_KERN_IO_START + H_KERN_IO_SIZE)
+
+#define H_VMEMMAP_START H_KERN_IO_END
+#define H_VMEMMAP_SIZE H_KERN_MAP_SIZE
+#define H_VMEMMAP_END (H_VMEMMAP_START + H_VMEMMAP_SIZE)
+
+#define NON_LINEAR_REGION_ID(ea) ((((unsigned long)ea - H_KERN_VIRT_START) >> REGION_SHIFT) + 2)
+
+/*
+ * Region IDs
+ */
+#define USER_REGION_ID 0
+#define LINEAR_MAP_REGION_ID 1
+#define VMALLOC_REGION_ID NON_LINEAR_REGION_ID(H_VMALLOC_START)
+#define IO_REGION_ID NON_LINEAR_REGION_ID(H_KERN_IO_START)
+#define VMEMMAP_REGION_ID NON_LINEAR_REGION_ID(H_VMEMMAP_START)
+#define INVALID_REGION_ID (VMEMMAP_REGION_ID + 1)
+
+/*
+ * Defines the address of the vmemap area, in its own region on
+ * hash table CPUs.
+ */
+
+/* PTEIDX nibble */
+#define _PTEIDX_SECONDARY 0x8
+#define _PTEIDX_GROUP_IX 0x7
+
+#define H_PMD_BAD_BITS (PTE_TABLE_SIZE-1)
+#define H_PUD_BAD_BITS (PMD_TABLE_SIZE-1)
+
+#ifndef __ASSEMBLY__
+static inline int get_region_id(unsigned long ea)
+{
+ int region_id;
+ int id = (ea >> 60UL);
+
+ if (id == 0)
+ return USER_REGION_ID;
+
+ if (id != (PAGE_OFFSET >> 60))
+ return INVALID_REGION_ID;
+
+ if (ea < H_KERN_VIRT_START)
+ return LINEAR_MAP_REGION_ID;
+
+ BUILD_BUG_ON(NON_LINEAR_REGION_ID(H_VMALLOC_START) != 2);
+
+ region_id = NON_LINEAR_REGION_ID(ea);
+ return region_id;
+}
+
+#define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
+#define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
+static inline int hash__p4d_bad(p4d_t p4d)
+{
+ return (p4d_val(p4d) == 0);
+}
+#ifdef CONFIG_STRICT_KERNEL_RWX
+extern void hash__mark_rodata_ro(void);
+extern void hash__mark_initmem_nx(void);
+#endif
+
+extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long pte, int huge);
+unsigned long htab_convert_pte_flags(unsigned long pteflags, unsigned long flags);
+/* Atomic PTE updates */
+static inline unsigned long hash__pte_update(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, unsigned long clr,
+ unsigned long set,
+ int huge)
+{
+ __be64 old_be, tmp_be;
+ unsigned long old;
+
+ __asm__ __volatile__(
+ "1: ldarx %0,0,%3 # pte_update\n\
+ and. %1,%0,%6\n\
+ bne- 1b \n\
+ andc %1,%0,%4 \n\
+ or %1,%1,%7\n\
+ stdcx. %1,0,%3 \n\
+ bne- 1b"
+ : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
+ : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
+ "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
+ : "cc" );
+ /* huge pages use the old page table lock */
+ if (!huge)
+ assert_pte_locked(mm, addr);
+
+ old = be64_to_cpu(old_be);
+ if (old & H_PAGE_HASHPTE)
+ hpte_need_flush(mm, addr, ptep, old, huge);
+
+ return old;
+}
+
+/* Set the dirty and/or accessed bits atomically in a linux PTE, this
+ * function doesn't need to flush the hash entry
+ */
+static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
+{
+ __be64 old, tmp, val, mask;
+
+ mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
+ _PAGE_EXEC | _PAGE_SOFT_DIRTY);
+
+ val = pte_raw(entry) & mask;
+
+ __asm__ __volatile__(
+ "1: ldarx %0,0,%4\n\
+ and. %1,%0,%6\n\
+ bne- 1b \n\
+ or %0,%3,%0\n\
+ stdcx. %0,0,%4\n\
+ bne- 1b"
+ :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
+ :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
+ :"cc");
+}
+
+static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
+}
+
+static inline int hash__pte_none(pte_t pte)
+{
+ return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
+}
+
+unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
+ int ssize, real_pte_t rpte, unsigned int subpg_index);
+
+/* This low level function performs the actual PTE insertion
+ * Setting the PTE depends on the MMU type and other factors. It's
+ * an horrible mess that I'm not going to try to clean up now but
+ * I'm keeping it in one place rather than spread around
+ */
+static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, int percpu)
+{
+ /*
+ * Anything else just stores the PTE normally. That covers all 64-bit
+ * cases, and 32-bit non-hash with 32-bit PTEs.
+ */
+ *ptep = pte;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, unsigned long old_pmd);
+#else
+static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp,
+ unsigned long old_pmd)
+{
+ WARN(1, "%s called with THP disabled\n", __func__);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+
+int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
+extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
+ unsigned long page_size,
+ unsigned long phys);
+extern void hash__vmemmap_remove_mapping(unsigned long start,
+ unsigned long page_size);
+
+int hash__create_section_mapping(unsigned long start, unsigned long end,
+ int nid, pgprot_t prot);
+int hash__remove_section_mapping(unsigned long start, unsigned long end);
+
+void hash__kernel_map_pages(struct page *page, int numpages, int enable);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
new file mode 100644
index 000000000..aa1c67c8b
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
+#define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
+
+#include <asm/firmware.h>
+
+/*
+ * For radix we want generic code to handle hugetlb. But then if we want
+ * both hash and radix to be enabled together we need to workaround the
+ * limitations.
+ */
+void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+
+extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t pte);
+
+static inline int hstate_get_psize(struct hstate *hstate)
+{
+ unsigned long shift;
+
+ shift = huge_page_shift(hstate);
+ if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
+ return MMU_PAGE_2M;
+ else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
+ return MMU_PAGE_1G;
+ else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
+ return MMU_PAGE_16M;
+ else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
+ return MMU_PAGE_16G;
+ else {
+ WARN(1, "Wrong huge page shift\n");
+ return mmu_virtual_psize;
+ }
+}
+
+#define __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
+static inline bool gigantic_page_runtime_supported(void)
+{
+ /*
+ * We used gigantic page reservation with hypervisor assist in some case.
+ * We cannot use runtime allocation of gigantic pages in those platforms
+ * This is hash translation mode LPARs.
+ */
+ if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
+ return false;
+
+ return true;
+}
+
+/* hugepd entry valid bit */
+#define HUGEPD_VAL_BITS (0x8000000000000000UL)
+
+#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
+extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+
+#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
+extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t new_pte);
+/*
+ * This should work for other subarchs too. But right now we use the
+ * new format only for 64bit book3s
+ */
+static inline pte_t *hugepd_page(hugepd_t hpd)
+{
+ BUG_ON(!hugepd_ok(hpd));
+ /*
+ * We have only four bits to encode, MMU page size
+ */
+ BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
+ return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
+}
+
+static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
+{
+ return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
+}
+
+static inline unsigned int hugepd_shift(hugepd_t hpd)
+{
+ return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
+}
+static inline void flush_hugetlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ if (radix_enabled())
+ return radix__flush_hugetlb_page(vma, vmaddr);
+}
+
+static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
+ unsigned int pdshift)
+{
+ unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
+
+ return hugepd_page(hpd) + idx;
+}
+
+static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
+{
+ *hpdp = __hugepd(__pa(new) | HUGEPD_VAL_BITS | (shift_to_mmu_psize(pshift) << 2));
+}
+
+void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+
+static inline int check_and_get_huge_psize(int shift)
+{
+ int mmu_psize;
+
+ if (shift > SLICE_HIGH_SHIFT)
+ return -EINVAL;
+
+ mmu_psize = shift_to_mmu_psize(shift);
+
+ /*
+ * We need to make sure that for different page sizes reported by
+ * firmware we only add hugetlb support for page sizes that can be
+ * supported by linux page table layout.
+ * For now we have
+ * Radix: 2M and 1G
+ * Hash: 16M and 16G
+ */
+ if (radix_enabled()) {
+ if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G)
+ return -EINVAL;
+ } else {
+ if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
+ return -EINVAL;
+ }
+ return mmu_psize;
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/book3s/64/kexec.h b/arch/powerpc/include/asm/book3s/64/kexec.h
new file mode 100644
index 000000000..d4b9d476e
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/kexec.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_POWERPC_BOOK3S_64_KEXEC_H_
+#define _ASM_POWERPC_BOOK3S_64_KEXEC_H_
+
+#include <asm/plpar_wrappers.h>
+
+#define reset_sprs reset_sprs
+static inline void reset_sprs(void)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+ mtspr(SPRN_AMR, 0);
+ mtspr(SPRN_UAMOR, 0);
+ }
+
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ mtspr(SPRN_IAMR, 0);
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ mtspr(SPRN_CIABR, 0);
+ else
+ plpar_set_ciabr(0);
+ }
+
+ /* Do we need isync()? We are going via a kexec reset */
+ isync();
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h
new file mode 100644
index 000000000..54cf46808
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/kup.h
@@ -0,0 +1,427 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
+#define _ASM_POWERPC_BOOK3S_64_KUP_H
+
+#include <linux/const.h>
+#include <asm/reg.h>
+
+#define AMR_KUAP_BLOCK_READ UL(0x5455555555555555)
+#define AMR_KUAP_BLOCK_WRITE UL(0xa8aaaaaaaaaaaaaa)
+#define AMR_KUEP_BLOCKED UL(0x5455555555555555)
+#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
+
+#ifdef __ASSEMBLY__
+
+.macro kuap_user_restore gpr1, gpr2
+#if defined(CONFIG_PPC_PKEY)
+ BEGIN_MMU_FTR_SECTION_NESTED(67)
+ b 100f // skip_restore_amr
+ END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
+ /*
+ * AMR and IAMR are going to be different when
+ * returning to userspace.
+ */
+ ld \gpr1, STACK_REGS_AMR(r1)
+
+ /*
+ * If kuap feature is not enabled, do the mtspr
+ * only if AMR value is different.
+ */
+ BEGIN_MMU_FTR_SECTION_NESTED(68)
+ mfspr \gpr2, SPRN_AMR
+ cmpd \gpr1, \gpr2
+ beq 99f
+ END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68)
+
+ isync
+ mtspr SPRN_AMR, \gpr1
+99:
+ /*
+ * Restore IAMR only when returning to userspace
+ */
+ ld \gpr1, STACK_REGS_IAMR(r1)
+
+ /*
+ * If kuep feature is not enabled, do the mtspr
+ * only if IAMR value is different.
+ */
+ BEGIN_MMU_FTR_SECTION_NESTED(69)
+ mfspr \gpr2, SPRN_IAMR
+ cmpd \gpr1, \gpr2
+ beq 100f
+ END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
+
+ isync
+ mtspr SPRN_IAMR, \gpr1
+
+100: //skip_restore_amr
+ /* No isync required, see kuap_user_restore() */
+#endif
+.endm
+
+.macro kuap_kernel_restore gpr1, gpr2
+#if defined(CONFIG_PPC_PKEY)
+
+ BEGIN_MMU_FTR_SECTION_NESTED(67)
+ /*
+ * AMR is going to be mostly the same since we are
+ * returning to the kernel. Compare and do a mtspr.
+ */
+ ld \gpr2, STACK_REGS_AMR(r1)
+ mfspr \gpr1, SPRN_AMR
+ cmpd \gpr1, \gpr2
+ beq 100f
+ isync
+ mtspr SPRN_AMR, \gpr2
+ /*
+ * No isync required, see kuap_restore_amr()
+ * No need to restore IAMR when returning to kernel space.
+ */
+100:
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
+#endif
+.endm
+
+#ifdef CONFIG_PPC_KUAP
+.macro kuap_check_amr gpr1, gpr2
+#ifdef CONFIG_PPC_KUAP_DEBUG
+ BEGIN_MMU_FTR_SECTION_NESTED(67)
+ mfspr \gpr1, SPRN_AMR
+ /* Prevent access to userspace using any key values */
+ LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
+999: tdne \gpr1, \gpr2
+ EMIT_WARN_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
+#endif
+.endm
+#endif
+
+/*
+ * if (pkey) {
+ *
+ * save AMR -> stack;
+ * if (kuap) {
+ * if (AMR != BLOCKED)
+ * KUAP_BLOCKED -> AMR;
+ * }
+ * if (from_user) {
+ * save IAMR -> stack;
+ * if (kuep) {
+ * KUEP_BLOCKED ->IAMR
+ * }
+ * }
+ * return;
+ * }
+ *
+ * if (kuap) {
+ * if (from_kernel) {
+ * save AMR -> stack;
+ * if (AMR != BLOCKED)
+ * KUAP_BLOCKED -> AMR;
+ * }
+ *
+ * }
+ */
+.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
+#if defined(CONFIG_PPC_PKEY)
+
+ /*
+ * if both pkey and kuap is disabled, nothing to do
+ */
+ BEGIN_MMU_FTR_SECTION_NESTED(68)
+ b 100f // skip_save_amr
+ END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
+
+ /*
+ * if pkey is disabled and we are entering from userspace
+ * don't do anything.
+ */
+ BEGIN_MMU_FTR_SECTION_NESTED(67)
+ .ifnb \msr_pr_cr
+ /*
+ * Without pkey we are not changing AMR outside the kernel
+ * hence skip this completely.
+ */
+ bne \msr_pr_cr, 100f // from userspace
+ .endif
+ END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
+
+ /*
+ * pkey is enabled or pkey is disabled but entering from kernel
+ */
+ mfspr \gpr1, SPRN_AMR
+ std \gpr1, STACK_REGS_AMR(r1)
+
+ /*
+ * update kernel AMR with AMR_KUAP_BLOCKED only
+ * if KUAP feature is enabled
+ */
+ BEGIN_MMU_FTR_SECTION_NESTED(69)
+ LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
+ cmpd \use_cr, \gpr1, \gpr2
+ beq \use_cr, 102f
+ /*
+ * We don't isync here because we very recently entered via an interrupt
+ */
+ mtspr SPRN_AMR, \gpr2
+ isync
+102:
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
+
+ /*
+ * if entering from kernel we don't need save IAMR
+ */
+ .ifnb \msr_pr_cr
+ beq \msr_pr_cr, 100f // from kernel space
+ mfspr \gpr1, SPRN_IAMR
+ std \gpr1, STACK_REGS_IAMR(r1)
+
+ /*
+ * update kernel IAMR with AMR_KUEP_BLOCKED only
+ * if KUEP feature is enabled
+ */
+ BEGIN_MMU_FTR_SECTION_NESTED(70)
+ LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
+ mtspr SPRN_IAMR, \gpr2
+ isync
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
+ .endif
+
+100: // skip_save_amr
+#endif
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#include <linux/jump_label.h>
+
+DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
+
+#ifdef CONFIG_PPC_PKEY
+
+extern u64 __ro_after_init default_uamor;
+extern u64 __ro_after_init default_amr;
+extern u64 __ro_after_init default_iamr;
+
+#include <asm/mmu.h>
+#include <asm/ptrace.h>
+
+/* usage of kthread_use_mm() should inherit the
+ * AMR value of the operating address space. But, the AMR value is
+ * thread-specific and we inherit the address space and not thread
+ * access restrictions. Because of this ignore AMR value when accessing
+ * userspace via kernel thread.
+ */
+static inline u64 current_thread_amr(void)
+{
+ if (current->thread.regs)
+ return current->thread.regs->amr;
+ return default_amr;
+}
+
+static inline u64 current_thread_iamr(void)
+{
+ if (current->thread.regs)
+ return current->thread.regs->iamr;
+ return default_iamr;
+}
+#endif /* CONFIG_PPC_PKEY */
+
+#ifdef CONFIG_PPC_KUAP
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return !mmu_has_feature(MMU_FTR_BOOK3S_KUAP);
+}
+
+static inline void kuap_user_restore(struct pt_regs *regs)
+{
+ bool restore_amr = false, restore_iamr = false;
+ unsigned long amr, iamr;
+
+ if (!mmu_has_feature(MMU_FTR_PKEY))
+ return;
+
+ if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
+ amr = mfspr(SPRN_AMR);
+ if (amr != regs->amr)
+ restore_amr = true;
+ } else {
+ restore_amr = true;
+ }
+
+ if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
+ iamr = mfspr(SPRN_IAMR);
+ if (iamr != regs->iamr)
+ restore_iamr = true;
+ } else {
+ restore_iamr = true;
+ }
+
+
+ if (restore_amr || restore_iamr) {
+ isync();
+ if (restore_amr)
+ mtspr(SPRN_AMR, regs->amr);
+ if (restore_iamr)
+ mtspr(SPRN_IAMR, regs->iamr);
+ }
+ /*
+ * No isync required here because we are about to rfi
+ * back to previous context before any user accesses
+ * would be made, which is a CSI.
+ */
+}
+
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
+{
+ if (likely(regs->amr == amr))
+ return;
+
+ isync();
+ mtspr(SPRN_AMR, regs->amr);
+ /*
+ * No isync required here because we are about to rfi
+ * back to previous context before any user accesses
+ * would be made, which is a CSI.
+ *
+ * No need to restore IAMR when returning to kernel space.
+ */
+}
+
+static inline unsigned long __kuap_get_and_assert_locked(void)
+{
+ unsigned long amr = mfspr(SPRN_AMR);
+
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
+ WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
+ return amr;
+}
+
+/* Do nothing, book3s/64 does that in ASM */
+static inline void __kuap_lock(void)
+{
+}
+
+static inline void __kuap_save_and_lock(struct pt_regs *regs)
+{
+}
+
+/*
+ * We support individually allowing read or write, but we don't support nesting
+ * because that would require an expensive read/modify write of the AMR.
+ */
+
+static inline unsigned long get_kuap(void)
+{
+ /*
+ * We return AMR_KUAP_BLOCKED when we don't support KUAP because
+ * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
+ * cause restore_user_access to do a flush.
+ *
+ * This has no effect in terms of actually blocking things on hash,
+ * so it doesn't break anything.
+ */
+ if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
+ return AMR_KUAP_BLOCKED;
+
+ return mfspr(SPRN_AMR);
+}
+
+static __always_inline void set_kuap(unsigned long value)
+{
+ if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
+ return;
+
+ /*
+ * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
+ * before and after the move to AMR. See table 6 on page 1134.
+ */
+ isync();
+ mtspr(SPRN_AMR, value);
+ isync();
+}
+
+static inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ /*
+ * For radix this will be a storage protection fault (DSISR_PROTFAULT).
+ * For hash this will be a key fault (DSISR_KEYFAULT)
+ */
+ /*
+ * We do have exception table entry, but accessing the
+ * userspace results in fault. This could be because we
+ * didn't unlock the AMR or access is denied by userspace
+ * using a key value that blocks access. We are only interested
+ * in catching the use case of accessing without unlocking
+ * the AMR. Hence check for BLOCK_WRITE/READ against AMR.
+ */
+ if (is_write) {
+ return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
+ }
+ return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
+}
+
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
+{
+ unsigned long thread_amr = 0;
+
+ // This is written so we can resolve to a single case at build time
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
+
+ if (mmu_has_feature(MMU_FTR_PKEY))
+ thread_amr = current_thread_amr();
+
+ if (dir == KUAP_READ)
+ set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
+ else if (dir == KUAP_WRITE)
+ set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
+ else if (dir == KUAP_READ_WRITE)
+ set_kuap(thread_amr);
+ else
+ BUILD_BUG();
+}
+
+#else /* CONFIG_PPC_KUAP */
+
+static inline unsigned long get_kuap(void)
+{
+ return AMR_KUAP_BLOCKED;
+}
+
+static inline void set_kuap(unsigned long value) { }
+
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
+{ }
+
+#endif /* !CONFIG_PPC_KUAP */
+
+static __always_inline void prevent_user_access(unsigned long dir)
+{
+ set_kuap(AMR_KUAP_BLOCKED);
+ if (static_branch_unlikely(&uaccess_flush_key))
+ do_uaccess_flush();
+}
+
+static inline unsigned long prevent_user_access_return(void)
+{
+ unsigned long flags = get_kuap();
+
+ set_kuap(AMR_KUAP_BLOCKED);
+ if (static_branch_unlikely(&uaccess_flush_key))
+ do_uaccess_flush();
+
+ return flags;
+}
+
+static inline void restore_user_access(unsigned long flags)
+{
+ set_kuap(flags);
+ if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
+ do_uaccess_flush();
+}
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
new file mode 100644
index 000000000..1c4eebbc6
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -0,0 +1,885 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
+#define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
+/*
+ * PowerPC64 memory management structures
+ *
+ * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
+ * PPC64 rework.
+ */
+
+#include <asm/page.h>
+#include <asm/bug.h>
+#include <asm/asm-const.h>
+
+/*
+ * This is necessary to get the definition of PGTABLE_RANGE which we
+ * need for various slices related matters. Note that this isn't the
+ * complete pgtable.h but only a portion of it.
+ */
+#include <asm/book3s/64/pgtable.h>
+#include <asm/book3s/64/slice.h>
+#include <asm/task_size_64.h>
+#include <asm/cpu_has_feature.h>
+
+/*
+ * SLB
+ */
+
+#define SLB_NUM_BOLTED 2
+#define SLB_CACHE_ENTRIES 8
+#define SLB_MIN_SIZE 32
+
+/* Bits in the SLB ESID word */
+#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
+
+/* Bits in the SLB VSID word */
+#define SLB_VSID_SHIFT 12
+#define SLB_VSID_SHIFT_256M SLB_VSID_SHIFT
+#define SLB_VSID_SHIFT_1T 24
+#define SLB_VSID_SSIZE_SHIFT 62
+#define SLB_VSID_B ASM_CONST(0xc000000000000000)
+#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
+#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
+#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
+#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
+#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
+#define SLB_VSID_L ASM_CONST(0x0000000000000100)
+#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
+#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
+#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
+#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
+#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
+#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
+#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
+
+#define SLB_VSID_KERNEL (SLB_VSID_KP)
+#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
+
+#define SLBIE_C (0x08000000)
+#define SLBIE_SSIZE_SHIFT 25
+
+/*
+ * Hash table
+ */
+
+#define HPTES_PER_GROUP 8
+
+#define HPTE_V_SSIZE_SHIFT 62
+#define HPTE_V_AVPN_SHIFT 7
+#define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff)
+#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
+#define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80)
+#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
+#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
+#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
+#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
+#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
+#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
+#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
+
+/*
+ * ISA 3.0 has a different HPTE format.
+ */
+#define HPTE_R_3_0_SSIZE_SHIFT 58
+#define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT)
+#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
+#define HPTE_R_TS ASM_CONST(0x4000000000000000)
+#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
+#define HPTE_R_KEY_BIT4 ASM_CONST(0x2000000000000000)
+#define HPTE_R_KEY_BIT3 ASM_CONST(0x1000000000000000)
+#define HPTE_R_RPN_SHIFT 12
+#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
+#define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
+#define HPTE_R_PP ASM_CONST(0x0000000000000003)
+#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
+#define HPTE_R_N ASM_CONST(0x0000000000000004)
+#define HPTE_R_G ASM_CONST(0x0000000000000008)
+#define HPTE_R_M ASM_CONST(0x0000000000000010)
+#define HPTE_R_I ASM_CONST(0x0000000000000020)
+#define HPTE_R_W ASM_CONST(0x0000000000000040)
+#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
+#define HPTE_R_C ASM_CONST(0x0000000000000080)
+#define HPTE_R_R ASM_CONST(0x0000000000000100)
+#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
+#define HPTE_R_KEY_BIT2 ASM_CONST(0x0000000000000800)
+#define HPTE_R_KEY_BIT1 ASM_CONST(0x0000000000000400)
+#define HPTE_R_KEY_BIT0 ASM_CONST(0x0000000000000200)
+#define HPTE_R_KEY (HPTE_R_KEY_LO | HPTE_R_KEY_HI)
+
+#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
+#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
+
+/* Values for PP (assumes Ks=0, Kp=1) */
+#define PP_RWXX 0 /* Supervisor read/write, User none */
+#define PP_RWRX 1 /* Supervisor read/write, User read */
+#define PP_RWRW 2 /* Supervisor read/write, User read/write */
+#define PP_RXRX 3 /* Supervisor read, User read */
+#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
+
+/* Fields for tlbiel instruction in architecture 2.06 */
+#define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
+#define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
+#define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
+#define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
+#define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */
+#define TLBIEL_INVAL_SET_SHIFT 12
+
+#define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
+#define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */
+#define POWER9_TLB_SETS_HASH 256 /* # sets in POWER9 TLB Hash mode */
+#define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */
+
+#ifndef __ASSEMBLY__
+
+struct mmu_hash_ops {
+ void (*hpte_invalidate)(unsigned long slot,
+ unsigned long vpn,
+ int bpsize, int apsize,
+ int ssize, int local);
+ long (*hpte_updatepp)(unsigned long slot,
+ unsigned long newpp,
+ unsigned long vpn,
+ int bpsize, int apsize,
+ int ssize, unsigned long flags);
+ void (*hpte_updateboltedpp)(unsigned long newpp,
+ unsigned long ea,
+ int psize, int ssize);
+ long (*hpte_insert)(unsigned long hpte_group,
+ unsigned long vpn,
+ unsigned long prpn,
+ unsigned long rflags,
+ unsigned long vflags,
+ int psize, int apsize,
+ int ssize);
+ long (*hpte_remove)(unsigned long hpte_group);
+ int (*hpte_removebolted)(unsigned long ea,
+ int psize, int ssize);
+ void (*flush_hash_range)(unsigned long number, int local);
+ void (*hugepage_invalidate)(unsigned long vsid,
+ unsigned long addr,
+ unsigned char *hpte_slot_array,
+ int psize, int ssize, int local);
+ int (*resize_hpt)(unsigned long shift);
+ /*
+ * Special for kexec.
+ * To be called in real mode with interrupts disabled. No locks are
+ * taken as such, concurrent access on pre POWER5 hardware could result
+ * in a deadlock.
+ * The linear mapping is destroyed as well.
+ */
+ void (*hpte_clear_all)(void);
+};
+extern struct mmu_hash_ops mmu_hash_ops;
+
+struct hash_pte {
+ __be64 v;
+ __be64 r;
+};
+
+extern struct hash_pte *htab_address;
+extern unsigned long htab_size_bytes;
+extern unsigned long htab_hash_mask;
+
+
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+ if (mmu_psize_defs[psize].shift == shift)
+ return psize;
+ return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+ if (mmu_psize_defs[mmu_psize].shift)
+ return mmu_psize_defs[mmu_psize].shift;
+ BUG();
+}
+
+static inline unsigned int ap_to_shift(unsigned long ap)
+{
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
+ if (mmu_psize_defs[psize].ap == ap)
+ return mmu_psize_defs[psize].shift;
+ }
+
+ return -1;
+}
+
+static inline unsigned long get_sllp_encoding(int psize)
+{
+ unsigned long sllp;
+
+ sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
+ ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
+ return sllp;
+}
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Segment sizes.
+ * These are the values used by hardware in the B field of
+ * SLB entries and the first dword of MMU hashtable entries.
+ * The B field is 2 bits; the values 2 and 3 are unused and reserved.
+ */
+#define MMU_SEGSIZE_256M 0
+#define MMU_SEGSIZE_1T 1
+
+/*
+ * encode page number shift.
+ * in order to fit the 78 bit va in a 64 bit variable we shift the va by
+ * 12 bits. This enable us to address upto 76 bit va.
+ * For hpt hash from a va we can ignore the page size bits of va and for
+ * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
+ * we work in all cases including 4k page size.
+ */
+#define VPN_SHIFT 12
+
+/*
+ * HPTE Large Page (LP) details
+ */
+#define LP_SHIFT 12
+#define LP_BITS 8
+#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+static inline int slb_vsid_shift(int ssize)
+{
+ if (ssize == MMU_SEGSIZE_256M)
+ return SLB_VSID_SHIFT;
+ return SLB_VSID_SHIFT_1T;
+}
+
+static inline int segment_shift(int ssize)
+{
+ if (ssize == MMU_SEGSIZE_256M)
+ return SID_SHIFT;
+ return SID_SHIFT_1T;
+}
+
+/*
+ * This array is indexed by the LP field of the HPTE second dword.
+ * Since this field may contain some RPN bits, some entries are
+ * replicated so that we get the same value irrespective of RPN.
+ * The top 4 bits are the page size index (MMU_PAGE_*) for the
+ * actual page size, the bottom 4 bits are the base page size.
+ */
+extern u8 hpte_page_sizes[1 << LP_BITS];
+
+static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
+ bool is_base_size)
+{
+ unsigned int i, lp;
+
+ if (!(h & HPTE_V_LARGE))
+ return 1ul << 12;
+
+ /* Look at the 8 bit LP value */
+ lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+ i = hpte_page_sizes[lp];
+ if (!i)
+ return 0;
+ if (!is_base_size)
+ i >>= 4;
+ return 1ul << mmu_psize_defs[i & 0xf].shift;
+}
+
+static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+{
+ return __hpte_page_size(h, l, 0);
+}
+
+static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
+{
+ return __hpte_page_size(h, l, 1);
+}
+
+/*
+ * The current system page and segment sizes
+ */
+extern int mmu_kernel_ssize;
+extern int mmu_highuser_ssize;
+extern u16 mmu_slb_size;
+extern unsigned long tce_alloc_start, tce_alloc_end;
+
+/*
+ * If the processor supports 64k normal pages but not 64k cache
+ * inhibited pages, we have to be prepared to switch processes
+ * to use 4k pages when they create cache-inhibited mappings.
+ * If this is the case, mmu_ci_restrictions will be set to 1.
+ */
+extern int mmu_ci_restrictions;
+
+/*
+ * This computes the AVPN and B fields of the first dword of a HPTE,
+ * for use when we want to match an existing PTE. The bottom 7 bits
+ * of the returned value are zero.
+ */
+static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
+ int ssize)
+{
+ unsigned long v;
+ /*
+ * The AVA field omits the low-order 23 bits of the 78 bits VA.
+ * These bits are not needed in the PTE, because the
+ * low-order b of these bits are part of the byte offset
+ * into the virtual page and, if b < 23, the high-order
+ * 23-b of these bits are always used in selecting the
+ * PTEGs to be searched
+ */
+ v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
+ v <<= HPTE_V_AVPN_SHIFT;
+ v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
+ return v;
+}
+
+/*
+ * ISA v3.0 defines a new HPTE format, which differs from the old
+ * format in having smaller AVPN and ARPN fields, and the B field
+ * in the second dword instead of the first.
+ */
+static inline unsigned long hpte_old_to_new_v(unsigned long v)
+{
+ /* trim AVPN, drop B */
+ return v & HPTE_V_COMMON_BITS;
+}
+
+static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
+{
+ /* move B field from 1st to 2nd dword, trim ARPN */
+ return (r & ~HPTE_R_3_0_SSIZE_MASK) |
+ (((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
+}
+
+static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
+{
+ /* insert B field */
+ return (v & HPTE_V_COMMON_BITS) |
+ ((r & HPTE_R_3_0_SSIZE_MASK) <<
+ (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
+}
+
+static inline unsigned long hpte_new_to_old_r(unsigned long r)
+{
+ /* clear out B field */
+ return r & ~HPTE_R_3_0_SSIZE_MASK;
+}
+
+static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
+{
+ unsigned long hpte_v;
+
+ hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ return hpte_v;
+}
+
+/*
+ * This function sets the AVPN and L fields of the HPTE appropriately
+ * using the base page size and actual page size.
+ */
+static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
+ int actual_psize, int ssize)
+{
+ unsigned long v;
+ v = hpte_encode_avpn(vpn, base_psize, ssize);
+ if (actual_psize != MMU_PAGE_4K)
+ v |= HPTE_V_LARGE;
+ return v;
+}
+
+/*
+ * This function sets the ARPN, and LP fields of the HPTE appropriately
+ * for the page size. We assume the pa is already "clean" that is properly
+ * aligned for the requested page size
+ */
+static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
+ int actual_psize)
+{
+ /* A 4K page needs no special encoding */
+ if (actual_psize == MMU_PAGE_4K)
+ return pa & HPTE_R_RPN;
+ else {
+ unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
+ unsigned int shift = mmu_psize_defs[actual_psize].shift;
+ return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
+ }
+}
+
+/*
+ * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
+ */
+static inline unsigned long hpt_vpn(unsigned long ea,
+ unsigned long vsid, int ssize)
+{
+ unsigned long mask;
+ int s_shift = segment_shift(ssize);
+
+ mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
+ return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
+}
+
+/*
+ * This hashes a virtual address
+ */
+static inline unsigned long hpt_hash(unsigned long vpn,
+ unsigned int shift, int ssize)
+{
+ unsigned long mask;
+ unsigned long hash, vsid;
+
+ /* VPN_SHIFT can be atmost 12 */
+ if (ssize == MMU_SEGSIZE_256M) {
+ mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
+ hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
+ ((vpn & mask) >> (shift - VPN_SHIFT));
+ } else {
+ mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
+ vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
+ hash = vsid ^ (vsid << 25) ^
+ ((vpn & mask) >> (shift - VPN_SHIFT)) ;
+ }
+ return hash & 0x7fffffffffUL;
+}
+
+#define HPTE_LOCAL_UPDATE 0x1
+#define HPTE_NOHPTE_UPDATE 0x2
+#define HPTE_USE_KERNEL_KEY 0x4
+
+long hpte_insert_repeating(unsigned long hash, unsigned long vpn, unsigned long pa,
+ unsigned long rlags, unsigned long vflags, int psize, int ssize);
+extern int __hash_page_4K(unsigned long ea, unsigned long access,
+ unsigned long vsid, pte_t *ptep, unsigned long trap,
+ unsigned long flags, int ssize, int subpage_prot);
+extern int __hash_page_64K(unsigned long ea, unsigned long access,
+ unsigned long vsid, pte_t *ptep, unsigned long trap,
+ unsigned long flags, int ssize);
+struct mm_struct;
+unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
+extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
+ unsigned long access, unsigned long trap,
+ unsigned long flags);
+extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
+ unsigned long dsisr);
+void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc);
+int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr, unsigned long msr);
+int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
+ pte_t *ptep, unsigned long trap, unsigned long flags,
+ int ssize, unsigned int shift, unsigned int mmu_psize);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int __hash_page_thp(unsigned long ea, unsigned long access,
+ unsigned long vsid, pmd_t *pmdp, unsigned long trap,
+ unsigned long flags, int ssize, unsigned int psize);
+#else
+static inline int __hash_page_thp(unsigned long ea, unsigned long access,
+ unsigned long vsid, pmd_t *pmdp,
+ unsigned long trap, unsigned long flags,
+ int ssize, unsigned int psize)
+{
+ BUG();
+ return -1;
+}
+#endif
+extern void hash_failure_debug(unsigned long ea, unsigned long access,
+ unsigned long vsid, unsigned long trap,
+ int ssize, int psize, int lpsize,
+ unsigned long pte);
+extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
+ unsigned long pstart, unsigned long prot,
+ int psize, int ssize);
+int htab_remove_mapping(unsigned long vstart, unsigned long vend,
+ int psize, int ssize);
+extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
+extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
+
+extern void hash__setup_new_exec(void);
+
+#ifdef CONFIG_PPC_PSERIES
+void hpte_init_pseries(void);
+#else
+static inline void hpte_init_pseries(void) { }
+#endif
+
+extern void hpte_init_native(void);
+
+struct slb_entry {
+ u64 esid;
+ u64 vsid;
+};
+
+extern void slb_initialize(void);
+void slb_flush_and_restore_bolted(void);
+void slb_flush_all_realmode(void);
+void __slb_restore_bolted_realmode(void);
+void slb_restore_bolted_realmode(void);
+void slb_save_contents(struct slb_entry *slb_ptr);
+void slb_dump_contents(struct slb_entry *slb_ptr);
+
+extern void slb_vmalloc_update(void);
+void preload_new_slb_context(unsigned long start, unsigned long sp);
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+void slb_set_size(u16 size);
+#else
+static inline void slb_set_size(u16 size) { }
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * VSID allocation (256MB segment)
+ *
+ * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
+ * from mmu context id and effective segment id of the address.
+ *
+ * For user processes max context id is limited to MAX_USER_CONTEXT.
+ * more details in get_user_context
+ *
+ * For kernel space get_kernel_context
+ *
+ * The proto-VSIDs are then scrambled into real VSIDs with the
+ * multiplicative hash:
+ *
+ * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
+ *
+ * VSID_MULTIPLIER is prime, so in particular it is
+ * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
+ * Because the modulus is 2^n-1 we can compute it efficiently without
+ * a divide or extra multiply (see below). The scramble function gives
+ * robust scattering in the hash table (at least based on some initial
+ * results).
+ *
+ * We use VSID 0 to indicate an invalid VSID. The means we can't use context id
+ * 0, because a context id of 0 and an EA of 0 gives a proto-VSID of 0, which
+ * will produce a VSID of 0.
+ *
+ * We also need to avoid the last segment of the last context, because that
+ * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
+ * because of the modulo operation in vsid scramble.
+ */
+
+/*
+ * Max Va bits we support as of now is 68 bits. We want 19 bit
+ * context ID.
+ * Restrictions:
+ * GPU has restrictions of not able to access beyond 128TB
+ * (47 bit effective address). We also cannot do more than 20bit PID.
+ * For p4 and p5 which can only do 65 bit VA, we restrict our CONTEXT_BITS
+ * to 16 bits (ie, we can only have 2^16 pids at the same time).
+ */
+#define VA_BITS 68
+#define CONTEXT_BITS 19
+#define ESID_BITS (VA_BITS - (SID_SHIFT + CONTEXT_BITS))
+#define ESID_BITS_1T (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
+
+#define ESID_BITS_MASK ((1 << ESID_BITS) - 1)
+#define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1)
+
+/*
+ * Now certain config support MAX_PHYSMEM more than 512TB. Hence we will need
+ * to use more than one context for linear mapping the kernel.
+ * For vmalloc and memmap, we use just one context with 512TB. With 64 byte
+ * struct page size, we need ony 32 TB in memmap for 2PB (51 bits (MAX_PHYSMEM_BITS)).
+ */
+#if (H_MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT)
+#define MAX_KERNEL_CTX_CNT (1UL << (H_MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT))
+#else
+#define MAX_KERNEL_CTX_CNT 1
+#endif
+
+#define MAX_VMALLOC_CTX_CNT 1
+#define MAX_IO_CTX_CNT 1
+#define MAX_VMEMMAP_CTX_CNT 1
+
+/*
+ * 256MB segment
+ * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
+ * available for user + kernel mapping. VSID 0 is reserved as invalid, contexts
+ * 1-4 are used for kernel mapping. Each segment contains 2^28 bytes. Each
+ * context maps 2^49 bytes (512TB).
+ *
+ * We also need to avoid the last segment of the last context, because that
+ * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
+ * because of the modulo operation in vsid scramble.
+ *
+ */
+#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
+
+// The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
+#define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
+ MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
+
+/*
+ * For platforms that support on 65bit VA we limit the context bits
+ */
+#define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)
+
+/*
+ * This should be computed such that protovosid * vsid_mulitplier
+ * doesn't overflow 64 bits. The vsid_mutliplier should also be
+ * co-prime to vsid_modulus. We also need to make sure that number
+ * of bits in multiplied result (dividend) is less than twice the number of
+ * protovsid bits for our modulus optmization to work.
+ *
+ * The below table shows the current values used.
+ * |-------+------------+----------------------+------------+-------------------|
+ * | | Prime Bits | proto VSID_BITS_65VA | Total Bits | 2* prot VSID_BITS |
+ * |-------+------------+----------------------+------------+-------------------|
+ * | 1T | 24 | 25 | 49 | 50 |
+ * |-------+------------+----------------------+------------+-------------------|
+ * | 256MB | 24 | 37 | 61 | 74 |
+ * |-------+------------+----------------------+------------+-------------------|
+ *
+ * |-------+------------+----------------------+------------+--------------------|
+ * | | Prime Bits | proto VSID_BITS_68VA | Total Bits | 2* proto VSID_BITS |
+ * |-------+------------+----------------------+------------+--------------------|
+ * | 1T | 24 | 28 | 52 | 56 |
+ * |-------+------------+----------------------+------------+--------------------|
+ * | 256MB | 24 | 40 | 64 | 80 |
+ * |-------+------------+----------------------+------------+--------------------|
+ *
+ */
+#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
+#define VSID_BITS_256M (VA_BITS - SID_SHIFT)
+#define VSID_BITS_65_256M (65 - SID_SHIFT)
+/*
+ * Modular multiplicative inverse of VSID_MULTIPLIER under modulo VSID_MODULUS
+ */
+#define VSID_MULINV_256M ASM_CONST(665548017062)
+
+#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
+#define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T)
+#define VSID_BITS_65_1T (65 - SID_SHIFT_1T)
+#define VSID_MULINV_1T ASM_CONST(209034062)
+
+/* 1TB VSID reserved for VRMA */
+#define VRMA_VSID 0x1ffffffUL
+#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
+
+/* 4 bits per slice and we have one slice per 1TB */
+#define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
+#define LOW_SLICE_ARRAY_SZ (BITS_PER_LONG / BITS_PER_BYTE)
+#define TASK_SLICE_ARRAY_SZ(x) ((x)->hash_context->slb_addr_limit >> 41)
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+/*
+ * For the sub-page protection option, we extend the PGD with one of
+ * these. Basically we have a 3-level tree, with the top level being
+ * the protptrs array. To optimize speed and memory consumption when
+ * only addresses < 4GB are being protected, pointers to the first
+ * four pages of sub-page protection words are stored in the low_prot
+ * array.
+ * Each page of sub-page protection words protects 1GB (4 bytes
+ * protects 64k). For the 3-level tree, each page of pointers then
+ * protects 8TB.
+ */
+struct subpage_prot_table {
+ unsigned long maxaddr; /* only addresses < this are protected */
+ unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
+ unsigned int *low_prot[4];
+};
+
+#define SBP_L1_BITS (PAGE_SHIFT - 2)
+#define SBP_L2_BITS (PAGE_SHIFT - 3)
+#define SBP_L1_COUNT (1 << SBP_L1_BITS)
+#define SBP_L2_COUNT (1 << SBP_L2_BITS)
+#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
+#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
+
+extern void subpage_prot_free(struct mm_struct *mm);
+#else
+static inline void subpage_prot_free(struct mm_struct *mm) {}
+#endif /* CONFIG_PPC_SUBPAGE_PROT */
+
+/*
+ * One bit per slice. We have lower slices which cover 256MB segments
+ * upto 4G range. That gets us 16 low slices. For the rest we track slices
+ * in 1TB size.
+ */
+struct slice_mask {
+ u64 low_slices;
+ DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
+};
+
+struct hash_mm_context {
+ u16 user_psize; /* page size index */
+
+ /* SLB page size encodings*/
+ unsigned char low_slices_psize[LOW_SLICE_ARRAY_SZ];
+ unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
+ unsigned long slb_addr_limit;
+#ifdef CONFIG_PPC_64K_PAGES
+ struct slice_mask mask_64k;
+#endif
+ struct slice_mask mask_4k;
+#ifdef CONFIG_HUGETLB_PAGE
+ struct slice_mask mask_16m;
+ struct slice_mask mask_16g;
+#endif
+
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+ struct subpage_prot_table *spt;
+#endif /* CONFIG_PPC_SUBPAGE_PROT */
+};
+
+#if 0
+/*
+ * The code below is equivalent to this function for arguments
+ * < 2^VSID_BITS, which is all this should ever be called
+ * with. However gcc is not clever enough to compute the
+ * modulus (2^n-1) without a second multiply.
+ */
+#define vsid_scramble(protovsid, size) \
+ ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
+
+/* simplified form avoiding mod operation */
+#define vsid_scramble(protovsid, size) \
+ ({ \
+ unsigned long x; \
+ x = (protovsid) * VSID_MULTIPLIER_##size; \
+ x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
+ (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
+ })
+
+#else /* 1 */
+static inline unsigned long vsid_scramble(unsigned long protovsid,
+ unsigned long vsid_multiplier, int vsid_bits)
+{
+ unsigned long vsid;
+ unsigned long vsid_modulus = ((1UL << vsid_bits) - 1);
+ /*
+ * We have same multipler for both 256 and 1T segements now
+ */
+ vsid = protovsid * vsid_multiplier;
+ vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus);
+ return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus;
+}
+
+#endif /* 1 */
+
+/* Returns the segment size indicator for a user address */
+static inline int user_segment_size(unsigned long addr)
+{
+ /* Use 1T segments if possible for addresses >= 1T */
+ if (addr >= (1UL << SID_SHIFT_1T))
+ return mmu_highuser_ssize;
+ return MMU_SEGSIZE_256M;
+}
+
+static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
+ int ssize)
+{
+ unsigned long va_bits = VA_BITS;
+ unsigned long vsid_bits;
+ unsigned long protovsid;
+
+ /*
+ * Bad address. We return VSID 0 for that
+ */
+ if ((ea & EA_MASK) >= H_PGTABLE_RANGE)
+ return 0;
+
+ if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
+ va_bits = 65;
+
+ if (ssize == MMU_SEGSIZE_256M) {
+ vsid_bits = va_bits - SID_SHIFT;
+ protovsid = (context << ESID_BITS) |
+ ((ea >> SID_SHIFT) & ESID_BITS_MASK);
+ return vsid_scramble(protovsid, VSID_MULTIPLIER_256M, vsid_bits);
+ }
+ /* 1T segment */
+ vsid_bits = va_bits - SID_SHIFT_1T;
+ protovsid = (context << ESID_BITS_1T) |
+ ((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK);
+ return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits);
+}
+
+/*
+ * For kernel space, we use context ids as
+ * below. Range is 512TB per context.
+ *
+ * 0x00001 - [ 0xc000000000000000 - 0xc001ffffffffffff]
+ * 0x00002 - [ 0xc002000000000000 - 0xc003ffffffffffff]
+ * 0x00003 - [ 0xc004000000000000 - 0xc005ffffffffffff]
+ * 0x00004 - [ 0xc006000000000000 - 0xc007ffffffffffff]
+ *
+ * vmap, IO, vmemap
+ *
+ * 0x00005 - [ 0xc008000000000000 - 0xc009ffffffffffff]
+ * 0x00006 - [ 0xc00a000000000000 - 0xc00bffffffffffff]
+ * 0x00007 - [ 0xc00c000000000000 - 0xc00dffffffffffff]
+ *
+ */
+static inline unsigned long get_kernel_context(unsigned long ea)
+{
+ unsigned long region_id = get_region_id(ea);
+ unsigned long ctx;
+ /*
+ * Depending on Kernel config, kernel region can have one context
+ * or more.
+ */
+ if (region_id == LINEAR_MAP_REGION_ID) {
+ /*
+ * We already verified ea to be not beyond the addr limit.
+ */
+ ctx = 1 + ((ea & EA_MASK) >> MAX_EA_BITS_PER_CONTEXT);
+ } else
+ ctx = region_id + MAX_KERNEL_CTX_CNT - 1;
+ return ctx;
+}
+
+/*
+ * This is only valid for addresses >= PAGE_OFFSET
+ */
+static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
+{
+ unsigned long context;
+
+ if (!is_kernel_addr(ea))
+ return 0;
+
+ context = get_kernel_context(ea);
+ return get_vsid(context, ea, ssize);
+}
+
+unsigned htab_shift_for_mem_size(unsigned long mem_size);
+
+enum slb_index {
+ LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */
+ KSTACK_INDEX = 1, /* Kernel stack map */
+};
+
+#define slb_esid_mask(ssize) \
+ (((ssize) == MMU_SEGSIZE_256M) ? ESID_MASK : ESID_MASK_1T)
+
+static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
+ enum slb_index index)
+{
+ return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
+}
+
+static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
+ unsigned long flags)
+{
+ return (vsid << slb_vsid_shift(ssize)) | flags |
+ ((unsigned long)ssize << SLB_VSID_SSIZE_SHIFT);
+}
+
+static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
+ unsigned long flags)
+{
+ return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
new file mode 100644
index 000000000..570a4960c
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_
+#define _ASM_POWERPC_BOOK3S_64_MMU_H_
+
+#include <asm/page.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * Page size definition
+ *
+ * shift : is the "PAGE_SHIFT" value for that page size
+ * sllp : is a bit mask with the value of SLB L || LP to be or'ed
+ * directly to a slbmte "vsid" value
+ * penc : is the HPTE encoding mask for the "LP" field:
+ *
+ */
+struct mmu_psize_def {
+ unsigned int shift; /* number of bits */
+ int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
+ unsigned int tlbiel; /* tlbiel supported for that page size */
+ unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
+ unsigned long h_rpt_pgsize; /* H_RPT_INVALIDATE page size encoding */
+ union {
+ unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
+ unsigned long ap; /* Ap encoding used by PowerISA 3.0 */
+ };
+};
+extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+#endif /* __ASSEMBLY__ */
+
+/* 64-bit classic hash table MMU */
+#include <asm/book3s/64/mmu-hash.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * ISA 3.0 partition and process table entry format
+ */
+struct prtb_entry {
+ __be64 prtb0;
+ __be64 prtb1;
+};
+extern struct prtb_entry *process_tb;
+
+struct patb_entry {
+ __be64 patb0;
+ __be64 patb1;
+};
+extern struct patb_entry *partition_tb;
+
+/* Bits in patb0 field */
+#define PATB_HR (1UL << 63)
+#define RPDB_MASK 0x0fffffffffffff00UL
+#define RPDB_SHIFT (1UL << 8)
+#define RTS1_SHIFT 61 /* top 2 bits of radix tree size */
+#define RTS1_MASK (3UL << RTS1_SHIFT)
+#define RTS2_SHIFT 5 /* bottom 3 bits of radix tree size */
+#define RTS2_MASK (7UL << RTS2_SHIFT)
+#define RPDS_MASK 0x1f /* root page dir. size field */
+
+/* Bits in patb1 field */
+#define PATB_GR (1UL << 63) /* guest uses radix; must match HR */
+#define PRTS_MASK 0x1f /* process table size field */
+#define PRTB_MASK 0x0ffffffffffff000UL
+
+/* Number of supported LPID bits */
+extern unsigned int mmu_lpid_bits;
+
+/* Number of supported PID bits */
+extern unsigned int mmu_pid_bits;
+
+/* Base PID to allocate from */
+extern unsigned int mmu_base_pid;
+
+/*
+ * memory block size used with radix translation.
+ */
+extern unsigned long __ro_after_init radix_mem_block_size;
+
+#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
+#define PRTB_ENTRIES (1ul << mmu_pid_bits)
+
+#define PATB_SIZE_SHIFT (mmu_lpid_bits + 4)
+#define PATB_ENTRIES (1ul << mmu_lpid_bits)
+
+typedef unsigned long mm_context_id_t;
+struct spinlock;
+
+/* Maximum possible number of NPUs in a system. */
+#define NV_MAX_NPUS 8
+
+typedef struct {
+ union {
+ /*
+ * We use id as the PIDR content for radix. On hash we can use
+ * more than one id. The extended ids are used when we start
+ * having address above 512TB. We allocate one extended id
+ * for each 512TB. The new id is then used with the 49 bit
+ * EA to build a new VA. We always use ESID_BITS_1T_MASK bits
+ * from EA and new context ids to build the new VAs.
+ */
+ mm_context_id_t id;
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
+#endif
+ };
+
+ /* Number of bits in the mm_cpumask */
+ atomic_t active_cpus;
+
+ /* Number of users of the external (Nest) MMU */
+ atomic_t copros;
+
+ /* Number of user space windows opened in process mm_context */
+ atomic_t vas_windows;
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ struct hash_mm_context *hash_context;
+#endif
+
+ void __user *vdso;
+ /*
+ * pagetable fragment support
+ */
+ void *pte_frag;
+ void *pmd_frag;
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ struct list_head iommu_group_mem_list;
+#endif
+
+#ifdef CONFIG_PPC_MEM_KEYS
+ /*
+ * Each bit represents one protection key.
+ * bit set -> key allocated
+ * bit unset -> key available for allocation
+ */
+ u32 pkey_allocation_map;
+ s16 execute_only_pkey; /* key holding execute-only protection */
+#endif
+} mm_context_t;
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
+{
+ return ctx->hash_context->user_psize;
+}
+
+static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
+{
+ ctx->hash_context->user_psize = user_psize;
+}
+
+static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
+{
+ return ctx->hash_context->low_slices_psize;
+}
+
+static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
+{
+ return ctx->hash_context->high_slices_psize;
+}
+
+static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
+{
+ return ctx->hash_context->slb_addr_limit;
+}
+
+static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
+{
+ ctx->hash_context->slb_addr_limit = limit;
+}
+
+static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
+{
+#ifdef CONFIG_PPC_64K_PAGES
+ if (psize == MMU_PAGE_64K)
+ return &ctx->hash_context->mask_64k;
+#endif
+#ifdef CONFIG_HUGETLB_PAGE
+ if (psize == MMU_PAGE_16M)
+ return &ctx->hash_context->mask_16m;
+ if (psize == MMU_PAGE_16G)
+ return &ctx->hash_context->mask_16g;
+#endif
+ BUG_ON(psize != MMU_PAGE_4K);
+
+ return &ctx->hash_context->mask_4k;
+}
+
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
+{
+ return ctx->hash_context->spt;
+}
+#endif
+
+/*
+ * The current system page and segment sizes
+ */
+extern int mmu_virtual_psize;
+extern int mmu_vmalloc_psize;
+extern int mmu_io_psize;
+#else /* CONFIG_PPC_64S_HASH_MMU */
+#ifdef CONFIG_PPC_64K_PAGES
+#define mmu_virtual_psize MMU_PAGE_64K
+#else
+#define mmu_virtual_psize MMU_PAGE_4K
+#endif
+#endif
+extern int mmu_linear_psize;
+extern int mmu_vmemmap_psize;
+
+/* MMU initialization */
+void mmu_early_init_devtree(void);
+void hash__early_init_devtree(void);
+void radix__early_init_devtree(void);
+#ifdef CONFIG_PPC_PKEY
+void pkey_early_init_devtree(void);
+#else
+static inline void pkey_early_init_devtree(void) {}
+#endif
+
+extern void hash__early_init_mmu(void);
+extern void radix__early_init_mmu(void);
+static inline void __init early_init_mmu(void)
+{
+ if (radix_enabled())
+ return radix__early_init_mmu();
+ return hash__early_init_mmu();
+}
+extern void hash__early_init_mmu_secondary(void);
+extern void radix__early_init_mmu_secondary(void);
+static inline void early_init_mmu_secondary(void)
+{
+ if (radix_enabled())
+ return radix__early_init_mmu_secondary();
+ return hash__early_init_mmu_secondary();
+}
+
+extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
+ phys_addr_t first_memblock_size);
+static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
+ phys_addr_t first_memblock_size)
+{
+ /*
+ * Hash has more strict restrictions. At this point we don't
+ * know which translations we will pick. Hence go with hash
+ * restrictions.
+ */
+ if (!early_radix_enabled())
+ hash__setup_initial_memory_limit(first_memblock_base,
+ first_memblock_size);
+}
+
+#ifdef CONFIG_PPC_PSERIES
+void __init radix_init_pseries(void);
+#else
+static inline void radix_init_pseries(void) { }
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define arch_clear_mm_cpumask_cpu(cpu, mm) \
+ do { \
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { \
+ atomic_dec(&(mm)->context.active_cpus); \
+ cpumask_clear_cpu(cpu, mm_cpumask(mm)); \
+ } \
+ } while (0)
+
+void cleanup_cpu_mmu_context(void);
+#endif
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
+{
+ int index = ea >> MAX_EA_BITS_PER_CONTEXT;
+
+ if (likely(index < ARRAY_SIZE(ctx->extended_id)))
+ return ctx->extended_id[index];
+
+ /* should never happen */
+ WARN_ON(1);
+ return 0;
+}
+
+static inline unsigned long get_user_vsid(mm_context_t *ctx,
+ unsigned long ea, int ssize)
+{
+ unsigned long context = get_user_context(ctx, ea);
+
+ return get_vsid(context, ea, ssize);
+}
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
new file mode 100644
index 000000000..dd2cff53a
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
+#define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
+/*
+ */
+
+#include <linux/slab.h>
+#include <linux/cpumask.h>
+#include <linux/kmemleak.h>
+#include <linux/percpu.h>
+
+struct vmemmap_backing {
+ struct vmemmap_backing *list;
+ unsigned long phys;
+ unsigned long virt_addr;
+};
+extern struct vmemmap_backing *vmemmap_list;
+
+extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
+extern void pmd_fragment_free(unsigned long *);
+extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
+extern void __tlb_remove_table(void *_table);
+void pte_frag_destroy(void *pte_frag);
+
+static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
+{
+#ifdef CONFIG_PPC_64K_PAGES
+ return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
+#else
+ struct page *page;
+ page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
+ 4);
+ if (!page)
+ return NULL;
+ return (pgd_t *) page_address(page);
+#endif
+}
+
+static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+#ifdef CONFIG_PPC_64K_PAGES
+ free_page((unsigned long)pgd);
+#else
+ free_pages((unsigned long)pgd, 4);
+#endif
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *pgd;
+
+ if (radix_enabled())
+ return radix__pgd_alloc(mm);
+
+ pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+ if (unlikely(!pgd))
+ return pgd;
+
+ /*
+ * Don't scan the PGD for pointers, it contains references to PUDs but
+ * those references are not full pointers and so can't be recognised by
+ * kmemleak.
+ */
+ kmemleak_no_scan(pgd);
+
+ /*
+ * With hugetlb, we don't clear the second half of the page table.
+ * If we share the same slab cache with the pmd or pud level table,
+ * we need to make sure we zero out the full table on alloc.
+ * With 4K we don't store slot in the second half. Hence we don't
+ * need to do this for 4k.
+ */
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \
+ (H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX)
+ memset(pgd, 0, PGD_TABLE_SIZE);
+#endif
+ return pgd;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ if (radix_enabled())
+ return radix__pgd_free(mm, pgd);
+ kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
+
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud)
+{
+ *pgd = __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS);
+}
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ pud_t *pud;
+
+ pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+ /*
+ * Tell kmemleak to ignore the PUD, that means don't scan it for
+ * pointers and don't consider it a leak. PUDs are typically only
+ * referred to by their PGD, but kmemleak is not able to recognise those
+ * as pointers, leading to false leak reports.
+ */
+ kmemleak_ignore(pud);
+
+ return pud;
+}
+
+static inline void __pud_free(pud_t *pud)
+{
+ struct page *page = virt_to_page(pud);
+
+ /*
+ * Early pud pages allocated via memblock allocator
+ * can't be directly freed to slab. KFENCE pages have
+ * both reserved and slab flags set so need to be freed
+ * kmem_cache_free.
+ */
+ if (PageReserved(page) && !PageSlab(page))
+ free_reserved_page(page);
+ else
+ kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ return __pud_free(pud);
+}
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ *pud = __pud(__pgtable_ptr_val(pmd) | PUD_VAL_BITS);
+}
+
+static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+ unsigned long address)
+{
+ pgtable_free_tlb(tlb, pud, PUD_INDEX);
+}
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return pmd_fragment_alloc(mm, addr);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ pmd_fragment_free((unsigned long *)pmd);
+}
+
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long address)
+{
+ return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
+}
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+{
+ *pmd = __pmd(__pgtable_ptr_val(pte) | PMD_VAL_BITS);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ pgtable_t pte_page)
+{
+ *pmd = __pmd(__pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ pgtable_free_tlb(tlb, table, PTE_INDEX);
+}
+
+extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
+static inline void update_page_count(int psize, long count)
+{
+ if (IS_ENABLED(CONFIG_PROC_FS))
+ atomic_long_add(count, &direct_pages_count[psize]);
+}
+
+#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
new file mode 100644
index 000000000..48f21820a
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_4K_H
+#define _ASM_POWERPC_BOOK3S_64_PGTABLE_4K_H
+/*
+ * hash 4k can't share hugetlb and also doesn't support THP
+ */
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_HUGETLB_PAGE
+static inline int pmd_huge(pmd_t pmd)
+{
+ /*
+ * leaf pte for huge page
+ */
+ if (radix_enabled())
+ return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
+ return 0;
+}
+
+static inline int pud_huge(pud_t pud)
+{
+ /*
+ * leaf pte for huge page
+ */
+ if (radix_enabled())
+ return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
+ return 0;
+}
+
+/*
+ * With radix , we have hugepage ptes in the pud and pmd entries. We don't
+ * need to setup hugepage directory for them. Our pte and page directory format
+ * enable us to have this enabled.
+ */
+static inline int hugepd_ok(hugepd_t hpd)
+{
+ if (radix_enabled())
+ return 0;
+ return hash__hugepd_ok(hpd);
+}
+#define is_hugepd(hpd) (hugepd_ok(hpd))
+
+/*
+ * 16M and 16G huge page directory tables are allocated from slab cache
+ *
+ */
+#define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24)
+#define H_16G_CACHE_INDEX \
+ (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34)
+
+static inline int get_hugepd_cache_index(int index)
+{
+ switch (index) {
+ case H_16M_CACHE_INDEX:
+ return HTLB_16M_INDEX;
+ case H_16G_CACHE_INDEX:
+ return HTLB_16G_INDEX;
+ default:
+ BUG();
+ }
+ /* should not reach */
+}
+
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /*_ASM_POWERPC_BOOK3S_64_PGTABLE_4K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
new file mode 100644
index 000000000..2fce3498b
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H
+#define _ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
+ * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
+ *
+ * Defined in such a way that we can optimize away code block at build time
+ * if CONFIG_HUGETLB_PAGE=n.
+ *
+ * returns true for pmd migration entries, THP, devmap, hugetlb
+ * But compile time dependent on CONFIG_HUGETLB_PAGE
+ */
+static inline int pmd_huge(pmd_t pmd)
+{
+ /*
+ * leaf pte for huge page
+ */
+ return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
+}
+
+static inline int pud_huge(pud_t pud)
+{
+ /*
+ * leaf pte for huge page
+ */
+ return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
+}
+
+/*
+ * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
+ * need to setup hugepage directory for them. Our pte and page directory format
+ * enable us to have this enabled.
+ */
+static inline int hugepd_ok(hugepd_t hpd)
+{
+ return 0;
+}
+
+#define is_hugepd(pdep) 0
+
+/*
+ * This should never get called
+ */
+static inline int get_hugepd_cache_index(int index)
+{
+ BUG();
+}
+
+#endif /* CONFIG_HUGETLB_PAGE */
+
+static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t prot)
+{
+ if (radix_enabled())
+ BUG();
+ return hash__remap_4k_pfn(vma, addr, pfn, prot);
+}
+#endif /* __ASSEMBLY__ */
+#endif /*_ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
new file mode 100644
index 000000000..c436d8422
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -0,0 +1,1444 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
+#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
+
+#include <asm-generic/pgtable-nop4d.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/mmdebug.h>
+#include <linux/bug.h>
+#include <linux/sizes.h>
+#endif
+
+/*
+ * Common bits between hash and Radix page table
+ */
+
+#define _PAGE_EXEC 0x00001 /* execute permission */
+#define _PAGE_WRITE 0x00002 /* write access allowed */
+#define _PAGE_READ 0x00004 /* read access allowed */
+#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
+#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
+#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
+#define _PAGE_SAO 0x00010 /* Strong access order */
+#define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */
+#define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */
+#define _PAGE_DIRTY 0x00080 /* C: page changed */
+#define _PAGE_ACCESSED 0x00100 /* R: page referenced */
+/*
+ * Software bits
+ */
+#define _RPAGE_SW0 0x2000000000000000UL
+#define _RPAGE_SW1 0x00800
+#define _RPAGE_SW2 0x00400
+#define _RPAGE_SW3 0x00200
+#define _RPAGE_RSV1 0x00040UL
+
+#define _RPAGE_PKEY_BIT4 0x1000000000000000UL
+#define _RPAGE_PKEY_BIT3 0x0800000000000000UL
+#define _RPAGE_PKEY_BIT2 0x0400000000000000UL
+#define _RPAGE_PKEY_BIT1 0x0200000000000000UL
+#define _RPAGE_PKEY_BIT0 0x0100000000000000UL
+
+#define _PAGE_PTE 0x4000000000000000UL /* distinguishes PTEs from pointers */
+#define _PAGE_PRESENT 0x8000000000000000UL /* pte contains a translation */
+/*
+ * We need to mark a pmd pte invalid while splitting. We can do that by clearing
+ * the _PAGE_PRESENT bit. But then that will be taken as a swap pte. In order to
+ * differentiate between two use a SW field when invalidating.
+ *
+ * We do that temporary invalidate for regular pte entry in ptep_set_access_flags
+ *
+ * This is used only when _PAGE_PRESENT is cleared.
+ */
+#define _PAGE_INVALID _RPAGE_SW0
+
+/*
+ * Top and bottom bits of RPN which can be used by hash
+ * translation mode, because we expect them to be zero
+ * otherwise.
+ */
+#define _RPAGE_RPN0 0x01000
+#define _RPAGE_RPN1 0x02000
+#define _RPAGE_RPN43 0x0080000000000000UL
+#define _RPAGE_RPN42 0x0040000000000000UL
+#define _RPAGE_RPN41 0x0020000000000000UL
+
+/* Max physical address bit as per radix table */
+#define _RPAGE_PA_MAX 56
+
+/*
+ * Max physical address bit we will use for now.
+ *
+ * This is mostly a hardware limitation and for now Power9 has
+ * a 51 bit limit.
+ *
+ * This is different from the number of physical bit required to address
+ * the last byte of memory. That is defined by MAX_PHYSMEM_BITS.
+ * MAX_PHYSMEM_BITS is a linux limitation imposed by the maximum
+ * number of sections we can support (SECTIONS_SHIFT).
+ *
+ * This is different from Radix page table limitation above and
+ * should always be less than that. The limit is done such that
+ * we can overload the bits between _RPAGE_PA_MAX and _PAGE_PA_MAX
+ * for hash linux page table specific bits.
+ *
+ * In order to be compatible with future hardware generations we keep
+ * some offsets and limit this for now to 53
+ */
+#define _PAGE_PA_MAX 53
+
+#define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */
+#define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */
+#define _PAGE_DEVMAP _RPAGE_SW1 /* software: ZONE_DEVICE page */
+
+/*
+ * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE
+ * Instead of fixing all of them, add an alternate define which
+ * maps CI pte mapping.
+ */
+#define _PAGE_NO_CACHE _PAGE_TOLERANT
+/*
+ * We support _RPAGE_PA_MAX bit real address in pte. On the linux side
+ * we are limited by _PAGE_PA_MAX. Clear everything above _PAGE_PA_MAX
+ * and every thing below PAGE_SHIFT;
+ */
+#define PTE_RPN_MASK (((1UL << _PAGE_PA_MAX) - 1) & (PAGE_MASK))
+/*
+ * set of bits not changed in pmd_modify. Even though we have hash specific bits
+ * in here, on radix we expect them to be zero.
+ */
+#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+ _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+/*
+ * user access blocked by key
+ */
+#define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
+#define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ)
+#define _PAGE_KERNEL_ROX (_PAGE_PRIVILEGED | _PAGE_READ | _PAGE_EXEC)
+#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
+/*
+ * _PAGE_CHG_MASK masks of bits that are to be preserved across
+ * pgprot changes
+ */
+#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+ _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _PAGE_BASE (_PAGE_BASE_NC)
+
+/* Permission masks used to generate the __P and __S table,
+ *
+ * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
+ *
+ * Write permissions imply read permissions for now (we could make write-only
+ * pages on BookE but we don't bother for now). Execute permission control is
+ * possible on platforms that define _PAGE_EXEC
+ */
+#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_READ)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
+/* Radix only, Hash uses PAGE_READONLY_X + execute-only pkey instead */
+#define PAGE_EXECONLY __pgprot(_PAGE_BASE | _PAGE_EXEC)
+
+/* Permission masks used for kernel mappings */
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_TOLERANT)
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NON_IDEMPOTENT)
+#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
+
+#ifndef __ASSEMBLY__
+/*
+ * page table defines
+ */
+extern unsigned long __pte_index_size;
+extern unsigned long __pmd_index_size;
+extern unsigned long __pud_index_size;
+extern unsigned long __pgd_index_size;
+extern unsigned long __pud_cache_index;
+#define PTE_INDEX_SIZE __pte_index_size
+#define PMD_INDEX_SIZE __pmd_index_size
+#define PUD_INDEX_SIZE __pud_index_size
+#define PGD_INDEX_SIZE __pgd_index_size
+/* pmd table use page table fragments */
+#define PMD_CACHE_INDEX 0
+#define PUD_CACHE_INDEX __pud_cache_index
+/*
+ * Because of use of pte fragments and THP, size of page table
+ * are not always derived out of index size above.
+ */
+extern unsigned long __pte_table_size;
+extern unsigned long __pmd_table_size;
+extern unsigned long __pud_table_size;
+extern unsigned long __pgd_table_size;
+#define PTE_TABLE_SIZE __pte_table_size
+#define PMD_TABLE_SIZE __pmd_table_size
+#define PUD_TABLE_SIZE __pud_table_size
+#define PGD_TABLE_SIZE __pgd_table_size
+
+extern unsigned long __pmd_val_bits;
+extern unsigned long __pud_val_bits;
+extern unsigned long __pgd_val_bits;
+#define PMD_VAL_BITS __pmd_val_bits
+#define PUD_VAL_BITS __pud_val_bits
+#define PGD_VAL_BITS __pgd_val_bits
+
+extern unsigned long __pte_frag_nr;
+#define PTE_FRAG_NR __pte_frag_nr
+extern unsigned long __pte_frag_size_shift;
+#define PTE_FRAG_SIZE_SHIFT __pte_frag_size_shift
+#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
+
+extern unsigned long __pmd_frag_nr;
+#define PMD_FRAG_NR __pmd_frag_nr
+extern unsigned long __pmd_frag_size_shift;
+#define PMD_FRAG_SIZE_SHIFT __pmd_frag_size_shift
+#define PMD_FRAG_SIZE (1UL << PMD_FRAG_SIZE_SHIFT)
+
+#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
+#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+
+#define MAX_PTRS_PER_PTE ((H_PTRS_PER_PTE > R_PTRS_PER_PTE) ? H_PTRS_PER_PTE : R_PTRS_PER_PTE)
+#define MAX_PTRS_PER_PMD ((H_PTRS_PER_PMD > R_PTRS_PER_PMD) ? H_PTRS_PER_PMD : R_PTRS_PER_PMD)
+#define MAX_PTRS_PER_PUD ((H_PTRS_PER_PUD > R_PTRS_PER_PUD) ? H_PTRS_PER_PUD : R_PTRS_PER_PUD)
+#define MAX_PTRS_PER_PGD (1 << (H_PGD_INDEX_SIZE > RADIX_PGD_INDEX_SIZE ? \
+ H_PGD_INDEX_SIZE : RADIX_PGD_INDEX_SIZE))
+
+/* PMD_SHIFT determines what a second-level page table entry can map */
+#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+/* PUD_SHIFT determines what a third-level page table entry can map */
+#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
+#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS 0xc0000000000000ffUL
+/* Bits to mask out from a PUD to get to the PMD page */
+#define PUD_MASKED_BITS 0xc0000000000000ffUL
+/* Bits to mask out from a PGD to get to the PUD page */
+#define P4D_MASKED_BITS 0xc0000000000000ffUL
+
+/*
+ * Used as an indicator for rcu callback functions
+ */
+enum pgtable_index {
+ PTE_INDEX = 0,
+ PMD_INDEX,
+ PUD_INDEX,
+ PGD_INDEX,
+ /*
+ * Below are used with 4k page size and hugetlb
+ */
+ HTLB_16M_INDEX,
+ HTLB_16G_INDEX,
+};
+
+extern unsigned long __vmalloc_start;
+extern unsigned long __vmalloc_end;
+#define VMALLOC_START __vmalloc_start
+#define VMALLOC_END __vmalloc_end
+
+static inline unsigned int ioremap_max_order(void)
+{
+ if (radix_enabled())
+ return PUD_SHIFT;
+ return 7 + PAGE_SHIFT; /* default from linux/vmalloc.h */
+}
+#define IOREMAP_MAX_ORDER ioremap_max_order()
+
+extern unsigned long __kernel_virt_start;
+extern unsigned long __kernel_io_start;
+extern unsigned long __kernel_io_end;
+#define KERN_VIRT_START __kernel_virt_start
+#define KERN_IO_START __kernel_io_start
+#define KERN_IO_END __kernel_io_end
+
+extern struct page *vmemmap;
+extern unsigned long pci_io_base;
+#endif /* __ASSEMBLY__ */
+
+#include <asm/book3s/64/hash.h>
+#include <asm/book3s/64/radix.h>
+
+#if H_MAX_PHYSMEM_BITS > R_MAX_PHYSMEM_BITS
+#define MAX_PHYSMEM_BITS H_MAX_PHYSMEM_BITS
+#else
+#define MAX_PHYSMEM_BITS R_MAX_PHYSMEM_BITS
+#endif
+
+
+#ifdef CONFIG_PPC_64K_PAGES
+#include <asm/book3s/64/pgtable-64k.h>
+#else
+#include <asm/book3s/64/pgtable-4k.h>
+#endif
+
+#include <asm/barrier.h>
+/*
+ * IO space itself carved into the PIO region (ISA and PHB IO space) and
+ * the ioremap space
+ *
+ * ISA_IO_BASE = KERN_IO_START, 64K reserved area
+ * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
+ * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
+ */
+#define FULL_IO_SIZE 0x80000000ul
+#define ISA_IO_BASE (KERN_IO_START)
+#define ISA_IO_END (KERN_IO_START + 0x10000ul)
+#define PHB_IO_BASE (ISA_IO_END)
+#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
+#define IOREMAP_BASE (PHB_IO_END)
+#define IOREMAP_START (ioremap_bot)
+#define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE)
+#define FIXADDR_SIZE SZ_32M
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This is the default implementation of various PTE accessors, it's
+ * used in all cases except Book3S with 64K pages where we have a
+ * concept of sub-pages
+ */
+#ifndef __real_pte
+
+#define __real_pte(e, p, o) ((real_pte_t){(e)})
+#define __rpte_to_pte(r) ((r).pte)
+#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
+
+#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
+ do { \
+ index = 0; \
+ shift = mmu_psize_defs[psize].shift; \
+
+#define pte_iterate_hashed_end() } while(0)
+
+/*
+ * We expect this to be called only for user addresses or kernel virtual
+ * addresses other than the linear mapping.
+ */
+#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
+
+#endif /* __real_pte */
+
+static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long clr,
+ unsigned long set, int huge)
+{
+ if (radix_enabled())
+ return radix__pte_update(mm, addr, ptep, clr, set, huge);
+ return hash__pte_update(mm, addr, ptep, clr, set, huge);
+}
+/*
+ * For hash even if we have _PAGE_ACCESSED = 0, we do a pte_update.
+ * We currently remove entries from the hashtable regardless of whether
+ * the entry was young or dirty.
+ *
+ * We should be more intelligent about this but for the moment we override
+ * these functions and force a tlb flush unconditionally
+ * For radix: H_PAGE_HASHPTE should be zero. Hence we can use the same
+ * function for both hash and radix.
+ */
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old;
+
+ if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
+ return 0;
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+ return (old & _PAGE_ACCESSED) != 0;
+}
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
+({ \
+ __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
+})
+
+/*
+ * On Book3S CPUs, clearing the accessed bit without a TLB flush
+ * doesn't cause data corruption. [ It could cause incorrect
+ * page aging and the (mistaken) reclaim of hot pages, but the
+ * chance of that should be relatively low. ]
+ *
+ * So as a performance optimization don't flush the TLB when
+ * clearing the accessed bit, it will eventually be flushed by
+ * a context switch or a VM operation anyway. [ In the rare
+ * event of it not getting flushed for a long time the delay
+ * shouldn't really matter because there's no real memory
+ * pressure for swapout to react to. ]
+ *
+ * Note: this optimisation also exists in pte_needs_flush() and
+ * huge_pmd_needs_flush().
+ */
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young ptep_test_and_clear_young
+
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+#define pmdp_clear_flush_young pmdp_test_and_clear_young
+
+static inline int __pte_write(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+ /*
+ * Saved write ptes are prot none ptes that doesn't have
+ * privileged bit sit. We mark prot none as one which has
+ * present and pviliged bit set and RWX cleared. To mark
+ * protnone which used to have _PAGE_WRITE set we clear
+ * the privileged bit.
+ */
+ return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
+}
+#else
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+ return false;
+}
+#endif
+
+static inline int pte_write(pte_t pte)
+{
+ return __pte_write(pte) || pte_savedwrite(pte);
+}
+
+static inline int pte_read(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_READ));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ if (__pte_write(*ptep))
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+ else if (unlikely(pte_savedwrite(*ptep)))
+ pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ /*
+ * We should not find protnone for hugetlb, but this complete the
+ * interface.
+ */
+ if (__pte_write(*ptep))
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
+ else if (unlikely(pte_savedwrite(*ptep)))
+ pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
+}
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
+ return __pte(old);
+}
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, int full)
+{
+ if (full && radix_enabled()) {
+ /*
+ * We know that this is a full mm pte clear and
+ * hence can be sure there is no parallel set_pte.
+ */
+ return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
+ }
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t * ptep)
+{
+ pte_update(mm, addr, ptep, ~0UL, 0, 0);
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_ACCESSED));
+}
+
+static inline int pte_special(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SPECIAL));
+}
+
+static inline bool pte_exec(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_EXEC));
+}
+
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline bool pte_soft_dirty(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SOFT_DIRTY));
+}
+
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SOFT_DIRTY));
+}
+
+static inline pte_t pte_clear_soft_dirty(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SOFT_DIRTY));
+}
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+#ifdef CONFIG_NUMA_BALANCING
+static inline int pte_protnone(pte_t pte)
+{
+ return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE | _PAGE_RWX)) ==
+ cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
+}
+
+#define pte_mk_savedwrite pte_mk_savedwrite
+static inline pte_t pte_mk_savedwrite(pte_t pte)
+{
+ /*
+ * Used by Autonuma subsystem to preserve the write bit
+ * while marking the pte PROT_NONE. Only allow this
+ * on PROT_NONE pte
+ */
+ VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) !=
+ cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED));
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
+}
+
+#define pte_clear_savedwrite pte_clear_savedwrite
+static inline pte_t pte_clear_savedwrite(pte_t pte)
+{
+ /*
+ * Used by KSM subsystem to make a protnone pte readonly.
+ */
+ VM_BUG_ON(!pte_protnone(pte));
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
+}
+#else
+#define pte_clear_savedwrite pte_clear_savedwrite
+static inline pte_t pte_clear_savedwrite(pte_t pte)
+{
+ VM_WARN_ON(1);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE));
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+static inline bool pte_hw_valid(pte_t pte)
+{
+ return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE)) ==
+ cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
+}
+
+static inline int pte_present(pte_t pte)
+{
+ /*
+ * A pte is considerent present if _PAGE_PRESENT is set.
+ * We also need to consider the pte present which is marked
+ * invalid during ptep_set_access_flags. Hence we look for _PAGE_INVALID
+ * if we find _PAGE_PRESENT cleared.
+ */
+
+ if (pte_hw_valid(pte))
+ return true;
+ return (pte_raw(pte) & cpu_to_be64(_PAGE_INVALID | _PAGE_PTE)) ==
+ cpu_to_be64(_PAGE_INVALID | _PAGE_PTE);
+}
+
+#ifdef CONFIG_PPC_MEM_KEYS
+extern bool arch_pte_access_permitted(u64 pte, bool write, bool execute);
+#else
+static inline bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
+{
+ return true;
+}
+#endif /* CONFIG_PPC_MEM_KEYS */
+
+static inline bool pte_user(pte_t pte)
+{
+ return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
+}
+
+#define pte_access_permitted pte_access_permitted
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ /*
+ * _PAGE_READ is needed for any access and will be
+ * cleared for PROT_NONE
+ */
+ if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
+ return false;
+
+ if (write && !pte_write(pte))
+ return false;
+
+ return arch_pte_access_permitted(pte_val(pte), write, 0);
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * Even if PTEs can be unsigned long long, a PFN is always an unsigned
+ * long for now.
+ */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+ VM_BUG_ON(pfn >> (64 - PAGE_SHIFT));
+ VM_BUG_ON((pfn << PAGE_SHIFT) & ~PTE_RPN_MASK);
+
+ return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot) | _PAGE_PTE);
+}
+
+static inline unsigned long pte_pfn(pte_t pte)
+{
+ return (pte_val(pte) & PTE_RPN_MASK) >> PAGE_SHIFT;
+}
+
+/* Generic modifiers for PTE bits */
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ if (unlikely(pte_savedwrite(pte)))
+ return pte_clear_savedwrite(pte);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE));
+}
+
+static inline pte_t pte_exprotect(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_EXEC));
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_DIRTY));
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_ACCESSED));
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_EXEC));
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ /*
+ * write implies read, hence set both
+ */
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_RW));
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_ACCESSED));
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL));
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ return pte;
+}
+
+static inline pte_t pte_mkdevmap(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP));
+}
+
+static inline pte_t pte_mkprivileged(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
+}
+
+static inline pte_t pte_mkuser(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
+}
+
+/*
+ * This is potentially called with a pmd as the argument, in which case it's not
+ * safe to check _PAGE_DEVMAP unless we also confirm that _PAGE_PTE is set.
+ * That's because the bit we use for _PAGE_DEVMAP is not reserved for software
+ * use in page directory entries (ie. non-ptes).
+ */
+static inline int pte_devmap(pte_t pte)
+{
+ u64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);
+
+ return (pte_raw(pte) & mask) == mask;
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ /* FIXME!! check whether this need to be a conditional */
+ return __pte_raw((pte_raw(pte) & cpu_to_be64(_PAGE_CHG_MASK)) |
+ cpu_to_be64(pgprot_val(newprot)));
+}
+
+/* Encode and de-code a swap entry */
+#define MAX_SWAPFILES_CHECK() do { \
+ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
+ /* \
+ * Don't have overlapping bits with _PAGE_HPTEFLAGS \
+ * We filter HPTEFLAGS on set_pte. \
+ */ \
+ BUILD_BUG_ON(_PAGE_HPTEFLAGS & SWP_TYPE_MASK); \
+ BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \
+ BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_EXCLUSIVE); \
+ } while (0)
+
+#define SWP_TYPE_BITS 5
+#define SWP_TYPE_MASK ((1UL << SWP_TYPE_BITS) - 1)
+#define __swp_type(x) ((x).val & SWP_TYPE_MASK)
+#define __swp_offset(x) (((x).val & PTE_RPN_MASK) >> PAGE_SHIFT)
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ (type) | (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)})
+/*
+ * swp_entry_t must be independent of pte bits. We build a swp_entry_t from
+ * swap type and offset we get from swap and convert that to pte to find a
+ * matching pte in linux page table.
+ * Clear bits not found in swap entries here.
+ */
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
+#define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
+#define __pmd_to_swp_entry(pmd) (__pte_to_swp_entry(pmd_pte(pmd)))
+#define __swp_entry_to_pmd(x) (pte_pmd(__swp_entry_to_pte(x)))
+
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SWP_SOFT_DIRTY _PAGE_SOFT_DIRTY
+#else
+#define _PAGE_SWP_SOFT_DIRTY 0UL
+#endif /* CONFIG_MEM_SOFT_DIRTY */
+
+#define _PAGE_SWP_EXCLUSIVE _PAGE_NON_IDEMPOTENT
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_SOFT_DIRTY));
+}
+
+static inline bool pte_swp_soft_dirty(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_SOFT_DIRTY));
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SWP_SOFT_DIRTY));
+}
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_EXCLUSIVE));
+}
+
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_EXCLUSIVE));
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SWP_EXCLUSIVE));
+}
+
+static inline bool check_pte_access(unsigned long access, unsigned long ptev)
+{
+ /*
+ * This check for _PAGE_RWX and _PAGE_PRESENT bits
+ */
+ if (access & ~ptev)
+ return false;
+ /*
+ * This check for access to privilege space
+ */
+ if ((access & _PAGE_PRIVILEGED) != (ptev & _PAGE_PRIVILEGED))
+ return false;
+
+ return true;
+}
+/*
+ * Generic functions with hash/radix callbacks
+ */
+
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
+ pte_t *ptep, pte_t entry,
+ unsigned long address,
+ int psize)
+{
+ if (radix_enabled())
+ return radix__ptep_set_access_flags(vma, ptep, entry,
+ address, psize);
+ return hash__ptep_set_access_flags(ptep, entry);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ if (radix_enabled())
+ return radix__pte_same(pte_a, pte_b);
+ return hash__pte_same(pte_a, pte_b);
+}
+
+static inline int pte_none(pte_t pte)
+{
+ if (radix_enabled())
+ return radix__pte_none(pte);
+ return hash__pte_none(pte);
+}
+
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, int percpu)
+{
+
+ VM_WARN_ON(!(pte_raw(pte) & cpu_to_be64(_PAGE_PTE)));
+ /*
+ * Keep the _PAGE_PTE added till we are sure we handle _PAGE_PTE
+ * in all the callers.
+ */
+ pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
+
+ if (radix_enabled())
+ return radix__set_pte_at(mm, addr, ptep, pte, percpu);
+ return hash__set_pte_at(mm, addr, ptep, pte, percpu);
+}
+
+#define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT)
+
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t prot)
+{
+ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+ _PAGE_NON_IDEMPOTENT);
+}
+
+#define pgprot_noncached_wc pgprot_noncached_wc
+static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
+{
+ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+ _PAGE_TOLERANT);
+}
+
+#define pgprot_cached pgprot_cached
+static inline pgprot_t pgprot_cached(pgprot_t prot)
+{
+ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL));
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t prot)
+{
+ return pgprot_noncached_wc(prot);
+}
+/*
+ * check a pte mapping have cache inhibited property
+ */
+static inline bool pte_ci(pte_t pte)
+{
+ __be64 pte_v = pte_raw(pte);
+
+ if (((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_TOLERANT)) ||
+ ((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_NON_IDEMPOTENT)))
+ return true;
+ return false;
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_VM) && !radix_enabled()) {
+ /*
+ * Don't use this if we can possibly have a hash page table
+ * entry mapping this.
+ */
+ WARN_ON((pmd_val(*pmdp) & (H_PAGE_HASHPTE | _PAGE_PTE)) == (H_PAGE_HASHPTE | _PAGE_PTE));
+ }
+ *pmdp = __pmd(0);
+}
+
+static inline int pmd_none(pmd_t pmd)
+{
+ return !pmd_raw(pmd);
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+ /*
+ * A pmd is considerent present if _PAGE_PRESENT is set.
+ * We also need to consider the pmd present which is marked
+ * invalid during a split. Hence we look for _PAGE_INVALID
+ * if we find _PAGE_PRESENT cleared.
+ */
+ if (pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID))
+ return true;
+
+ return false;
+}
+
+static inline int pmd_is_serializing(pmd_t pmd)
+{
+ /*
+ * If the pmd is undergoing a split, the _PAGE_PRESENT bit is clear
+ * and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate).
+ *
+ * This condition may also occur when flushing a pmd while flushing
+ * it (see ptep_modify_prot_start), so callers must ensure this
+ * case is fine as well.
+ */
+ if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) ==
+ cpu_to_be64(_PAGE_INVALID))
+ return true;
+
+ return false;
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+ if (radix_enabled())
+ return radix__pmd_bad(pmd);
+ return hash__pmd_bad(pmd);
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_VM) && !radix_enabled()) {
+ /*
+ * Don't use this if we can possibly have a hash page table
+ * entry mapping this.
+ */
+ WARN_ON((pud_val(*pudp) & (H_PAGE_HASHPTE | _PAGE_PTE)) == (H_PAGE_HASHPTE | _PAGE_PTE));
+ }
+ *pudp = __pud(0);
+}
+
+static inline int pud_none(pud_t pud)
+{
+ return !pud_raw(pud);
+}
+
+static inline int pud_present(pud_t pud)
+{
+ return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
+}
+
+extern struct page *pud_page(pud_t pud);
+extern struct page *pmd_page(pmd_t pmd);
+static inline pte_t pud_pte(pud_t pud)
+{
+ return __pte_raw(pud_raw(pud));
+}
+
+static inline pud_t pte_pud(pte_t pte)
+{
+ return __pud_raw(pte_raw(pte));
+}
+#define pud_write(pud) pte_write(pud_pte(pud))
+
+static inline int pud_bad(pud_t pud)
+{
+ if (radix_enabled())
+ return radix__pud_bad(pud);
+ return hash__pud_bad(pud);
+}
+
+#define pud_access_permitted pud_access_permitted
+static inline bool pud_access_permitted(pud_t pud, bool write)
+{
+ return pte_access_permitted(pud_pte(pud), write);
+}
+
+#define __p4d_raw(x) ((p4d_t) { __pgd_raw(x) })
+static inline __be64 p4d_raw(p4d_t x)
+{
+ return pgd_raw(x.pgd);
+}
+
+#define p4d_write(p4d) pte_write(p4d_pte(p4d))
+
+static inline void p4d_clear(p4d_t *p4dp)
+{
+ *p4dp = __p4d(0);
+}
+
+static inline int p4d_none(p4d_t p4d)
+{
+ return !p4d_raw(p4d);
+}
+
+static inline int p4d_present(p4d_t p4d)
+{
+ return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PRESENT));
+}
+
+static inline pte_t p4d_pte(p4d_t p4d)
+{
+ return __pte_raw(p4d_raw(p4d));
+}
+
+static inline p4d_t pte_p4d(pte_t pte)
+{
+ return __p4d_raw(pte_raw(pte));
+}
+
+static inline int p4d_bad(p4d_t p4d)
+{
+ if (radix_enabled())
+ return radix__p4d_bad(p4d);
+ return hash__p4d_bad(p4d);
+}
+
+#define p4d_access_permitted p4d_access_permitted
+static inline bool p4d_access_permitted(p4d_t p4d, bool write)
+{
+ return pte_access_permitted(p4d_pte(p4d), write);
+}
+
+extern struct page *p4d_page(p4d_t p4d);
+
+/* Pointers in the page table tree are physical addresses */
+#define __pgtable_ptr_val(ptr) __pa(ptr)
+
+static inline pud_t *p4d_pgtable(p4d_t p4d)
+{
+ return (pud_t *)__va(p4d_val(p4d) & ~P4D_MASKED_BITS);
+}
+
+static inline pmd_t *pud_pgtable(pud_t pud)
+{
+ return (pmd_t *)__va(pud_val(pud) & ~PUD_MASKED_BITS);
+}
+
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pud_ERROR(e) \
+ pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
+{
+ if (radix_enabled()) {
+#if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
+ unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
+ WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
+#endif
+ return radix__map_kernel_page(ea, pa, prot, PAGE_SIZE);
+ }
+ return hash__map_kernel_page(ea, pa, prot);
+}
+
+void unmap_kernel_page(unsigned long va);
+
+static inline int __meminit vmemmap_create_mapping(unsigned long start,
+ unsigned long page_size,
+ unsigned long phys)
+{
+ if (radix_enabled())
+ return radix__vmemmap_create_mapping(start, page_size, phys);
+ return hash__vmemmap_create_mapping(start, page_size, phys);
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+static inline void vmemmap_remove_mapping(unsigned long start,
+ unsigned long page_size)
+{
+ if (radix_enabled())
+ return radix__vmemmap_remove_mapping(start, page_size);
+ return hash__vmemmap_remove_mapping(start, page_size);
+}
+#endif
+
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
+static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (radix_enabled())
+ radix__kernel_map_pages(page, numpages, enable);
+ else
+ hash__kernel_map_pages(page, numpages, enable);
+}
+#endif
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+ return __pte_raw(pmd_raw(pmd));
+}
+
+static inline pmd_t pte_pmd(pte_t pte)
+{
+ return __pmd_raw(pte_raw(pte));
+}
+
+static inline pte_t *pmdp_ptep(pmd_t *pmd)
+{
+ return (pte_t *)pmd;
+}
+#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
+#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
+#define pmd_young(pmd) pte_young(pmd_pte(pmd))
+#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
+#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mk_savedwrite(pmd) pte_pmd(pte_mk_savedwrite(pmd_pte(pmd)))
+#define pmd_clear_savedwrite(pmd) pte_pmd(pte_clear_savedwrite(pmd_pte(pmd)))
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+#define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
+#define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
+#define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
+
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+#define pmd_swp_mksoft_dirty(pmd) pte_pmd(pte_swp_mksoft_dirty(pmd_pte(pmd)))
+#define pmd_swp_soft_dirty(pmd) pte_swp_soft_dirty(pmd_pte(pmd))
+#define pmd_swp_clear_soft_dirty(pmd) pte_pmd(pte_swp_clear_soft_dirty(pmd_pte(pmd)))
+#endif
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+#ifdef CONFIG_NUMA_BALANCING
+static inline int pmd_protnone(pmd_t pmd)
+{
+ return pte_protnone(pmd_pte(pmd));
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+#define __pmd_write(pmd) __pte_write(pmd_pte(pmd))
+#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
+
+#define pmd_access_permitted pmd_access_permitted
+static inline bool pmd_access_permitted(pmd_t pmd, bool write)
+{
+ /*
+ * pmdp_invalidate sets this combination (which is not caught by
+ * !pte_present() check in pte_access_permitted), to prevent
+ * lock-free lookups, as part of the serialize_against_pte_lookup()
+ * synchronisation.
+ *
+ * This also catches the case where the PTE's hardware PRESENT bit is
+ * cleared while TLB is flushed, which is suboptimal but should not
+ * be frequent.
+ */
+ if (pmd_is_serializing(pmd))
+ return false;
+
+ return pte_access_permitted(pmd_pte(pmd), write);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
+extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
+extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
+extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd);
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmd)
+{
+}
+
+extern int hash__has_transparent_hugepage(void);
+static inline int has_transparent_hugepage(void)
+{
+ if (radix_enabled())
+ return radix__has_transparent_hugepage();
+ return hash__has_transparent_hugepage();
+}
+#define has_transparent_hugepage has_transparent_hugepage
+
+static inline unsigned long
+pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
+ unsigned long clr, unsigned long set)
+{
+ if (radix_enabled())
+ return radix__pmd_hugepage_update(mm, addr, pmdp, clr, set);
+ return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
+}
+
+/*
+ * returns true for pmd migration entries, THP, devmap, hugetlb
+ * But compile time dependent on THP config
+ */
+static inline int pmd_large(pmd_t pmd)
+{
+ return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
+}
+
+/*
+ * For radix we should always find H_PAGE_HASHPTE zero. Hence
+ * the below will work for radix too
+ */
+static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp)
+{
+ unsigned long old;
+
+ if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
+ return 0;
+ old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
+ return ((old & _PAGE_ACCESSED) != 0);
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ if (__pmd_write((*pmdp)))
+ pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
+ else if (unlikely(pmd_savedwrite(*pmdp)))
+ pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
+}
+
+/*
+ * Only returns true for a THP. False for pmd migration entry.
+ * We also need to return true when we come across a pte that
+ * in between a thp split. While splitting THP, we mark the pmd
+ * invalid (pmdp_invalidate()) before we set it with pte page
+ * address. A pmd_trans_huge() check against a pmd entry during that time
+ * should return true.
+ * We should not call this on a hugetlb entry. We should check for HugeTLB
+ * entry using vma->vm_flags
+ * The page table walk rule is explained in Documentation/mm/transhuge.rst
+ */
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ if (!pmd_present(pmd))
+ return false;
+
+ if (radix_enabled())
+ return radix__pmd_trans_huge(pmd);
+ return hash__pmd_trans_huge(pmd);
+}
+
+#define __HAVE_ARCH_PMD_SAME
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+ if (radix_enabled())
+ return radix__pmd_same(pmd_a, pmd_b);
+ return hash__pmd_same(pmd_a, pmd_b);
+}
+
+static inline pmd_t __pmd_mkhuge(pmd_t pmd)
+{
+ if (radix_enabled())
+ return radix__pmd_mkhuge(pmd);
+ return hash__pmd_mkhuge(pmd);
+}
+
+/*
+ * pfn_pmd return a pmd_t that can be used as pmd pte entry.
+ */
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+#ifdef CONFIG_DEBUG_VM
+ if (radix_enabled())
+ WARN_ON((pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)) == 0);
+ else
+ WARN_ON((pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE | H_PAGE_THP_HUGE)) !=
+ cpu_to_be64(_PAGE_PTE | H_PAGE_THP_HUGE));
+#endif
+ return pmd;
+}
+
+#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty);
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp)
+{
+ if (radix_enabled())
+ return radix__pmdp_huge_get_and_clear(mm, addr, pmdp);
+ return hash__pmdp_huge_get_and_clear(mm, addr, pmdp);
+}
+
+static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ if (radix_enabled())
+ return radix__pmdp_collapse_flush(vma, address, pmdp);
+ return hash__pmdp_collapse_flush(vma, address, pmdp);
+}
+#define pmdp_collapse_flush pmdp_collapse_flush
+
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
+pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
+ unsigned long addr,
+ pmd_t *pmdp, int full);
+
+#define __HAVE_ARCH_PGTABLE_DEPOSIT
+static inline void pgtable_trans_huge_deposit(struct mm_struct *mm,
+ pmd_t *pmdp, pgtable_t pgtable)
+{
+ if (radix_enabled())
+ return radix__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
+ return hash__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
+}
+
+#define __HAVE_ARCH_PGTABLE_WITHDRAW
+static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm,
+ pmd_t *pmdp)
+{
+ if (radix_enabled())
+ return radix__pgtable_trans_huge_withdraw(mm, pmdp);
+ return hash__pgtable_trans_huge_withdraw(mm, pmdp);
+}
+
+#define __HAVE_ARCH_PMDP_INVALIDATE
+extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
+
+#define pmd_move_must_withdraw pmd_move_must_withdraw
+struct spinlock;
+extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+ struct spinlock *old_pmd_ptl,
+ struct vm_area_struct *vma);
+/*
+ * Hash translation mode use the deposited table to store hash pte
+ * slot information.
+ */
+#define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
+static inline bool arch_needs_pgtable_deposit(void)
+{
+ if (radix_enabled())
+ return false;
+ return true;
+}
+extern void serialize_against_pte_lookup(struct mm_struct *mm);
+
+
+static inline pmd_t pmd_mkdevmap(pmd_t pmd)
+{
+ if (radix_enabled())
+ return radix__pmd_mkdevmap(pmd);
+ return hash__pmd_mkdevmap(pmd);
+}
+
+static inline int pmd_devmap(pmd_t pmd)
+{
+ return pte_devmap(pmd_pte(pmd));
+}
+
+static inline int pud_devmap(pud_t pud)
+{
+ return 0;
+}
+
+static inline int pgd_devmap(pgd_t pgd)
+{
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static inline int pud_pfn(pud_t pud)
+{
+ /*
+ * Currently all calls to pud_pfn() are gated around a pud_devmap()
+ * check so this should never be used. If it grows another user we
+ * want to know about it.
+ */
+ BUILD_BUG();
+ return 0;
+}
+#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
+void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
+ pte_t *, pte_t, pte_t);
+
+/*
+ * Returns true for a R -> RW upgrade of pte
+ */
+static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_val)
+{
+ if (!(old_val & _PAGE_READ))
+ return false;
+
+ if ((!(old_val & _PAGE_WRITE)) && (new_val & _PAGE_WRITE))
+ return true;
+
+ return false;
+}
+
+/*
+ * Like pmd_huge() and pmd_large(), but works regardless of config options
+ */
+#define pmd_is_leaf pmd_is_leaf
+#define pmd_leaf pmd_is_leaf
+static inline bool pmd_is_leaf(pmd_t pmd)
+{
+ return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
+}
+
+#define pud_is_leaf pud_is_leaf
+#define pud_leaf pud_is_leaf
+static inline bool pud_is_leaf(pud_t pud)
+{
+ return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/pkeys.h b/arch/powerpc/include/asm/book3s/64/pkeys.h
new file mode 100644
index 000000000..5b178139f
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/pkeys.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _ASM_POWERPC_BOOK3S_64_PKEYS_H
+#define _ASM_POWERPC_BOOK3S_64_PKEYS_H
+
+#include <asm/book3s/64/hash-pkey.h>
+
+static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags)
+{
+ if (!mmu_has_feature(MMU_FTR_PKEY))
+ return 0x0UL;
+
+ if (radix_enabled())
+ BUG();
+ return hash__vmflag_to_pte_pkey_bits(vm_flags);
+}
+
+static inline u16 pte_to_pkey_bits(u64 pteflags)
+{
+ if (radix_enabled())
+ BUG();
+ return hash__pte_to_pkey_bits(pteflags);
+}
+
+#endif /*_ASM_POWERPC_KEYS_H */
diff --git a/arch/powerpc/include/asm/book3s/64/radix-4k.h b/arch/powerpc/include/asm/book3s/64/radix-4k.h
new file mode 100644
index 000000000..035ceecd6
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/radix-4k.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGTABLE_RADIX_4K_H
+#define _ASM_POWERPC_PGTABLE_RADIX_4K_H
+
+/*
+ * For 4K page size supported index is 13/9/9/9
+ */
+#define RADIX_PTE_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 4K = 2MB
+#define RADIX_PMD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 2MB = 1GB
+#define RADIX_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 1GB = 512GB
+#define RADIX_PGD_INDEX_SIZE 13 // size: 8B << 13 = 64KB, maps 2^13 x 512GB = 4PB
+
+/*
+ * One fragment per page
+ */
+#define RADIX_PTE_FRAG_SIZE_SHIFT (RADIX_PTE_INDEX_SIZE + 3)
+#define RADIX_PTE_FRAG_NR (PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT)
+
+#define RADIX_PMD_FRAG_SIZE_SHIFT (RADIX_PMD_INDEX_SIZE + 3)
+#define RADIX_PMD_FRAG_NR (PAGE_SIZE >> RADIX_PMD_FRAG_SIZE_SHIFT)
+
+#endif /* _ASM_POWERPC_PGTABLE_RADIX_4K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/radix-64k.h b/arch/powerpc/include/asm/book3s/64/radix-64k.h
new file mode 100644
index 000000000..54e33828b
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/radix-64k.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGTABLE_RADIX_64K_H
+#define _ASM_POWERPC_PGTABLE_RADIX_64K_H
+
+/*
+ * For 64K page size supported index is 13/9/9/5
+ */
+#define RADIX_PTE_INDEX_SIZE 5 // size: 8B << 5 = 256B, maps 2^5 x 64K = 2MB
+#define RADIX_PMD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 2MB = 1GB
+#define RADIX_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 1GB = 512GB
+#define RADIX_PGD_INDEX_SIZE 13 // size: 8B << 13 = 64KB, maps 2^13 x 512GB = 4PB
+
+/*
+ * We use a 256 byte PTE page fragment in radix
+ * 8 bytes per each PTE entry.
+ */
+#define RADIX_PTE_FRAG_SIZE_SHIFT (RADIX_PTE_INDEX_SIZE + 3)
+#define RADIX_PTE_FRAG_NR (PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT)
+
+#define RADIX_PMD_FRAG_SIZE_SHIFT (RADIX_PMD_INDEX_SIZE + 3)
+#define RADIX_PMD_FRAG_NR (PAGE_SIZE >> RADIX_PMD_FRAG_SIZE_SHIFT)
+
+#endif /* _ASM_POWERPC_PGTABLE_RADIX_64K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
new file mode 100644
index 000000000..686001eda
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGTABLE_RADIX_H
+#define _ASM_POWERPC_PGTABLE_RADIX_H
+
+#include <asm/asm-const.h>
+
+#ifndef __ASSEMBLY__
+#include <asm/cmpxchg.h>
+#endif
+
+#ifdef CONFIG_PPC_64K_PAGES
+#include <asm/book3s/64/radix-64k.h>
+#else
+#include <asm/book3s/64/radix-4k.h>
+#endif
+
+#ifndef __ASSEMBLY__
+#include <asm/book3s/64/tlbflush-radix.h>
+#include <asm/cpu_has_feature.h>
+#endif
+
+/* An empty PTE can still have a R or C writeback */
+#define RADIX_PTE_NONE_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
+
+/* Bits to set in a RPMD/RPUD/RPGD */
+#define RADIX_PMD_VAL_BITS (0x8000000000000000UL | RADIX_PTE_INDEX_SIZE)
+#define RADIX_PUD_VAL_BITS (0x8000000000000000UL | RADIX_PMD_INDEX_SIZE)
+#define RADIX_PGD_VAL_BITS (0x8000000000000000UL | RADIX_PUD_INDEX_SIZE)
+
+/* Don't have anything in the reserved bits and leaf bits */
+#define RADIX_PMD_BAD_BITS 0x60000000000000e0UL
+#define RADIX_PUD_BAD_BITS 0x60000000000000e0UL
+#define RADIX_P4D_BAD_BITS 0x60000000000000e0UL
+
+#define RADIX_PMD_SHIFT (PAGE_SHIFT + RADIX_PTE_INDEX_SIZE)
+#define RADIX_PUD_SHIFT (RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE)
+#define RADIX_PGD_SHIFT (RADIX_PUD_SHIFT + RADIX_PUD_INDEX_SIZE)
+
+#define R_PTRS_PER_PTE (1 << RADIX_PTE_INDEX_SIZE)
+#define R_PTRS_PER_PMD (1 << RADIX_PMD_INDEX_SIZE)
+#define R_PTRS_PER_PUD (1 << RADIX_PUD_INDEX_SIZE)
+
+/*
+ * Size of EA range mapped by our pagetables.
+ */
+#define RADIX_PGTABLE_EADDR_SIZE (RADIX_PTE_INDEX_SIZE + RADIX_PMD_INDEX_SIZE + \
+ RADIX_PUD_INDEX_SIZE + RADIX_PGD_INDEX_SIZE + PAGE_SHIFT)
+#define RADIX_PGTABLE_RANGE (ASM_CONST(1) << RADIX_PGTABLE_EADDR_SIZE)
+
+/*
+ * We support 52 bit address space, Use top bit for kernel
+ * virtual mapping. Also make sure kernel fit in the top
+ * quadrant.
+ *
+ * +------------------+
+ * +------------------+ Kernel virtual map (0xc008000000000000)
+ * | |
+ * | |
+ * | |
+ * 0b11......+------------------+ Kernel linear map (0xc....)
+ * | |
+ * | 2 quadrant |
+ * | |
+ * 0b10......+------------------+
+ * | |
+ * | 1 quadrant |
+ * | |
+ * 0b01......+------------------+
+ * | |
+ * | 0 quadrant |
+ * | |
+ * 0b00......+------------------+
+ *
+ *
+ * 3rd quadrant expanded:
+ * +------------------------------+ Highest address (0xc010000000000000)
+ * +------------------------------+ KASAN shadow end (0xc00fc00000000000)
+ * | |
+ * | |
+ * +------------------------------+ Kernel vmemmap end/shadow start (0xc00e000000000000)
+ * | |
+ * | 512TB |
+ * | |
+ * +------------------------------+ Kernel IO map end/vmemap start
+ * | |
+ * | 512TB |
+ * | |
+ * +------------------------------+ Kernel vmap end/ IO map start
+ * | |
+ * | 512TB |
+ * | |
+ * +------------------------------+ Kernel virt start (0xc008000000000000)
+ * | |
+ * | |
+ * | |
+ * +------------------------------+ Kernel linear (0xc.....)
+ */
+
+/* For the sizes of the shadow area, see kasan.h */
+
+/*
+ * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
+ * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
+ * page_to_nid does a page->section->node lookup
+ * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
+ * memory requirements with large number of sections.
+ * 51 bits is the max physical real address on POWER9
+ */
+
+#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME)
+#define R_MAX_PHYSMEM_BITS 51
+#else
+#define R_MAX_PHYSMEM_BITS 46
+#endif
+
+#define RADIX_KERN_VIRT_START ASM_CONST(0xc008000000000000)
+/*
+ * 49 = MAX_EA_BITS_PER_CONTEXT (hash specific). To make sure we pick
+ * the same value as hash.
+ */
+#define RADIX_KERN_MAP_SIZE (1UL << 49)
+
+#define RADIX_VMALLOC_START RADIX_KERN_VIRT_START
+#define RADIX_VMALLOC_SIZE RADIX_KERN_MAP_SIZE
+#define RADIX_VMALLOC_END (RADIX_VMALLOC_START + RADIX_VMALLOC_SIZE)
+
+#define RADIX_KERN_IO_START RADIX_VMALLOC_END
+#define RADIX_KERN_IO_SIZE RADIX_KERN_MAP_SIZE
+#define RADIX_KERN_IO_END (RADIX_KERN_IO_START + RADIX_KERN_IO_SIZE)
+
+#define RADIX_VMEMMAP_START RADIX_KERN_IO_END
+#define RADIX_VMEMMAP_SIZE RADIX_KERN_MAP_SIZE
+#define RADIX_VMEMMAP_END (RADIX_VMEMMAP_START + RADIX_VMEMMAP_SIZE)
+
+#ifndef __ASSEMBLY__
+#define RADIX_PTE_TABLE_SIZE (sizeof(pte_t) << RADIX_PTE_INDEX_SIZE)
+#define RADIX_PMD_TABLE_SIZE (sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE)
+#define RADIX_PUD_TABLE_SIZE (sizeof(pud_t) << RADIX_PUD_INDEX_SIZE)
+#define RADIX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+extern void radix__mark_rodata_ro(void);
+extern void radix__mark_initmem_nx(void);
+#endif
+
+extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
+ pte_t entry, unsigned long address,
+ int psize);
+
+extern void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t pte);
+
+static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
+ unsigned long set)
+{
+ __be64 old_be, tmp_be;
+
+ __asm__ __volatile__(
+ "1: ldarx %0,0,%3 # pte_update\n"
+ " andc %1,%0,%5 \n"
+ " or %1,%1,%4 \n"
+ " stdcx. %1,0,%3 \n"
+ " bne- 1b"
+ : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
+ : "r" (ptep), "r" (cpu_to_be64(set)), "r" (cpu_to_be64(clr))
+ : "cc" );
+
+ return be64_to_cpu(old_be);
+}
+
+static inline unsigned long radix__pte_update(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, unsigned long clr,
+ unsigned long set,
+ int huge)
+{
+ unsigned long old_pte;
+
+ old_pte = __radix_pte_update(ptep, clr, set);
+ if (!huge)
+ assert_pte_locked(mm, addr);
+
+ return old_pte;
+}
+
+static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, int full)
+{
+ unsigned long old_pte;
+
+ if (full) {
+ old_pte = pte_val(*ptep);
+ *ptep = __pte(0);
+ } else
+ old_pte = radix__pte_update(mm, addr, ptep, ~0ul, 0, 0);
+
+ return __pte(old_pte);
+}
+
+static inline int radix__pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0);
+}
+
+static inline int radix__pte_none(pte_t pte)
+{
+ return (pte_val(pte) & ~RADIX_PTE_NONE_MASK) == 0;
+}
+
+static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, int percpu)
+{
+ *ptep = pte;
+
+ /*
+ * The architecture suggests a ptesync after setting the pte, which
+ * orders the store that updates the pte with subsequent page table
+ * walk accesses which may load the pte. Without this it may be
+ * possible for a subsequent access to result in spurious fault.
+ *
+ * This is not necessary for correctness, because a spurious fault
+ * is tolerated by the page fault handler, and this store will
+ * eventually be seen. In testing, there was no noticable increase
+ * in user faults on POWER9. Avoiding ptesync here is a significant
+ * win for things like fork. If a future microarchitecture benefits
+ * from ptesync, it should probably go into update_mmu_cache, rather
+ * than set_pte_at (which is used to set ptes unrelated to faults).
+ *
+ * Spurious faults from the kernel memory are not tolerated, so there
+ * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
+ * the pte update sequence from ISA Book III 6.10 Translation Table
+ * Update Synchronization Requirements.
+ */
+}
+
+static inline int radix__pmd_bad(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & RADIX_PMD_BAD_BITS);
+}
+
+static inline int radix__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+ return ((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) == 0);
+}
+
+static inline int radix__pud_bad(pud_t pud)
+{
+ return !!(pud_val(pud) & RADIX_PUD_BAD_BITS);
+}
+
+
+static inline int radix__p4d_bad(p4d_t p4d)
+{
+ return !!(p4d_val(p4d) & RADIX_P4D_BAD_BITS);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+static inline int radix__pmd_trans_huge(pmd_t pmd)
+{
+ return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE;
+}
+
+static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) | _PAGE_PTE);
+}
+
+extern unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, unsigned long clr,
+ unsigned long set);
+extern pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
+extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp);
+static inline int radix__has_transparent_hugepage(void)
+{
+ /* For radix 2M at PMD level means thp */
+ if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
+ return 1;
+ return 0;
+}
+#endif
+
+static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
+}
+
+extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
+ unsigned long page_size,
+ unsigned long phys);
+extern void radix__vmemmap_remove_mapping(unsigned long start,
+ unsigned long page_size);
+
+extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
+ pgprot_t flags, unsigned int psz);
+
+static inline unsigned long radix__get_tree_size(void)
+{
+ unsigned long rts_field;
+ /*
+ * We support 52 bits, hence:
+ * bits 52 - 31 = 21, 0b10101
+ * RTS encoding details
+ * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
+ * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
+ */
+ rts_field = (0x5UL << 5); /* 6 - 8 bits */
+ rts_field |= (0x2UL << 61);
+
+ return rts_field;
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int radix__create_section_mapping(unsigned long start, unsigned long end,
+ int nid, pgprot_t prot);
+int radix__remove_section_mapping(unsigned long start, unsigned long end);
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
+void radix__kernel_map_pages(struct page *page, int numpages, int enable);
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/powerpc/include/asm/book3s/64/slice.h b/arch/powerpc/include/asm/book3s/64/slice.h
new file mode 100644
index 000000000..5fbe18544
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/slice.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H
+#define _ASM_POWERPC_BOOK3S_64_SLICE_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+#ifdef CONFIG_HUGETLB_PAGE
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+#endif
+
+#define SLICE_LOW_SHIFT 28
+#define SLICE_LOW_TOP (0x100000000ul)
+#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
+#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
+
+#define SLICE_HIGH_SHIFT 40
+#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
+#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
+
+#define SLB_ADDR_LIMIT_DEFAULT DEFAULT_MAP_WINDOW_USER64
+
+struct mm_struct;
+
+unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+ unsigned long flags, unsigned int psize,
+ int topdown);
+
+unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
+
+void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
+ unsigned long len, unsigned int psize);
+
+void slice_init_new_context_exec(struct mm_struct *mm);
+void slice_setup_new_exec(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
new file mode 100644
index 000000000..751921f6d
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
+#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
+
+/*
+ * TLB flushing for 64-bit hash-MMU CPUs
+ */
+
+#include <linux/percpu.h>
+#include <asm/page.h>
+
+#define PPC64_TLB_BATCH_NR 192
+
+struct ppc64_tlb_batch {
+ int active;
+ unsigned long index;
+ struct mm_struct *mm;
+ real_pte_t pte[PPC64_TLB_BATCH_NR];
+ unsigned long vpn[PPC64_TLB_BATCH_NR];
+ unsigned int psize;
+ int ssize;
+};
+DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
+
+extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+
+static inline void arch_enter_lazy_mmu_mode(void)
+{
+ struct ppc64_tlb_batch *batch;
+
+ if (radix_enabled())
+ return;
+ /*
+ * apply_to_page_range can call us this preempt enabled when
+ * operating on kernel page tables.
+ */
+ preempt_disable();
+ batch = this_cpu_ptr(&ppc64_tlb_batch);
+ batch->active = 1;
+}
+
+static inline void arch_leave_lazy_mmu_mode(void)
+{
+ struct ppc64_tlb_batch *batch;
+
+ if (radix_enabled())
+ return;
+ batch = this_cpu_ptr(&ppc64_tlb_batch);
+
+ if (batch->index)
+ __flush_tlb_pending(batch);
+ batch->active = 0;
+ preempt_enable();
+}
+
+#define arch_flush_lazy_mmu_mode() do {} while (0)
+
+extern void hash__tlbiel_all(unsigned int action);
+
+extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
+ int ssize, unsigned long flags);
+extern void flush_hash_range(unsigned long number, int local);
+extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
+ pmd_t *pmdp, unsigned int psize, int ssize,
+ unsigned long flags);
+static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
+{
+}
+
+static inline void hash__flush_tlb_mm(struct mm_struct *mm)
+{
+}
+
+static inline void hash__local_flush_all_mm(struct mm_struct *mm)
+{
+ /*
+ * There's no Page Walk Cache for hash, so what is needed is
+ * the same as flush_tlb_mm(), which doesn't really make sense
+ * with hash. So the only thing we could do is flush the
+ * entire LPID! Punt for now, as it's not being used.
+ */
+ WARN_ON_ONCE(1);
+}
+
+static inline void hash__flush_all_mm(struct mm_struct *mm)
+{
+ /*
+ * There's no Page Walk Cache for hash, so what is needed is
+ * the same as flush_tlb_mm(), which doesn't really make sense
+ * with hash. So the only thing we could do is flush the
+ * entire LPID! Punt for now, as it's not being used.
+ */
+ WARN_ON_ONCE(1);
+}
+
+static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+}
+
+static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+}
+
+static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
+static inline void hash__flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+}
+
+
+struct mmu_gather;
+extern void hash__tlb_flush(struct mmu_gather *tlb);
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+/* Private function for use by PCI IO mapping code */
+extern void __flush_hash_table_range(unsigned long start, unsigned long end);
+void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr);
+#else
+static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { }
+#endif
+#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
new file mode 100644
index 000000000..77797a2a8
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_TLBFLUSH_RADIX_H
+#define _ASM_POWERPC_TLBFLUSH_RADIX_H
+
+#include <asm/hvcall.h>
+
+#define RIC_FLUSH_TLB 0
+#define RIC_FLUSH_PWC 1
+#define RIC_FLUSH_ALL 2
+
+struct vm_area_struct;
+struct mm_struct;
+struct mmu_gather;
+
+static inline u64 psize_to_rpti_pgsize(unsigned long psize)
+{
+ if (psize == MMU_PAGE_4K)
+ return H_RPTI_PAGE_4K;
+ if (psize == MMU_PAGE_64K)
+ return H_RPTI_PAGE_64K;
+ if (psize == MMU_PAGE_2M)
+ return H_RPTI_PAGE_2M;
+ if (psize == MMU_PAGE_1G)
+ return H_RPTI_PAGE_1G;
+ return H_RPTI_PAGE_ALL;
+}
+
+static inline int mmu_get_ap(int psize)
+{
+ return mmu_psize_defs[psize].ap;
+}
+
+#ifdef CONFIG_PPC_RADIX_MMU
+extern void radix__tlbiel_all(unsigned int action);
+extern void radix__flush_tlb_lpid_page(unsigned int lpid,
+ unsigned long addr,
+ unsigned long page_size);
+extern void radix__flush_pwc_lpid(unsigned int lpid);
+extern void radix__flush_all_lpid(unsigned int lpid);
+extern void radix__flush_all_lpid_guest(unsigned int lpid);
+#else
+static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); }
+static inline void radix__flush_tlb_lpid_page(unsigned int lpid,
+ unsigned long addr,
+ unsigned long page_size)
+{
+ WARN_ON(1);
+}
+static inline void radix__flush_pwc_lpid(unsigned int lpid)
+{
+ WARN_ON(1);
+}
+static inline void radix__flush_all_lpid(unsigned int lpid)
+{
+ WARN_ON(1);
+}
+static inline void radix__flush_all_lpid_guest(unsigned int lpid)
+{
+ WARN_ON(1);
+}
+#endif
+
+extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
+ unsigned long end, int psize);
+void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
+ unsigned long end, int psize);
+extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
+extern void radix__local_flush_all_mm(struct mm_struct *mm);
+extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
+ int psize);
+extern void radix__tlb_flush(struct mmu_gather *tlb);
+#ifdef CONFIG_SMP
+extern void radix__flush_tlb_mm(struct mm_struct *mm);
+extern void radix__flush_all_mm(struct mm_struct *mm);
+extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
+ int psize);
+#else
+#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm)
+#define radix__flush_all_mm(mm) radix__local_flush_all_mm(mm)
+#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr)
+#define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p)
+#endif
+extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
+extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
+extern void radix__flush_tlb_all(void);
+
+#endif
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
new file mode 100644
index 000000000..985bd8e69
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
+#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
+
+#define MMU_NO_CONTEXT ~0UL
+
+#include <linux/mm_types.h>
+#include <asm/book3s/64/tlbflush-hash.h>
+#include <asm/book3s/64/tlbflush-radix.h>
+
+/* TLB flush actions. Used as argument to tlbiel_all() */
+enum {
+ TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
+ TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
+};
+
+static inline void tlbiel_all(void)
+{
+ /*
+ * This is used for host machine check and bootup.
+ *
+ * This uses early_radix_enabled and implementations use
+ * early_cpu_has_feature etc because that works early in boot
+ * and this is the machine check path which is not performance
+ * critical.
+ */
+ if (early_radix_enabled())
+ radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
+ else
+ hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
+}
+
+static inline void tlbiel_all_lpid(bool radix)
+{
+ /*
+ * This is used for guest machine check.
+ */
+ if (radix)
+ radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
+ else
+ hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
+}
+
+
+#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (radix_enabled())
+ return radix__flush_pmd_tlb_range(vma, start, end);
+ return hash__flush_tlb_range(vma, start, end);
+}
+
+#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end)
+{
+ if (radix_enabled())
+ return radix__flush_hugetlb_tlb_range(vma, start, end);
+ return hash__flush_tlb_range(vma, start, end);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (radix_enabled())
+ return radix__flush_tlb_range(vma, start, end);
+ return hash__flush_tlb_range(vma, start, end);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ if (radix_enabled())
+ return radix__flush_tlb_kernel_range(start, end);
+ return hash__flush_tlb_kernel_range(start, end);
+}
+
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ if (radix_enabled())
+ return radix__local_flush_tlb_mm(mm);
+ return hash__local_flush_tlb_mm(mm);
+}
+
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ if (radix_enabled())
+ return radix__local_flush_tlb_page(vma, vmaddr);
+ return hash__local_flush_tlb_page(vma, vmaddr);
+}
+
+static inline void local_flush_all_mm(struct mm_struct *mm)
+{
+ if (radix_enabled())
+ return radix__local_flush_all_mm(mm);
+ return hash__local_flush_all_mm(mm);
+}
+
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ if (radix_enabled())
+ return radix__tlb_flush(tlb);
+ return hash__tlb_flush(tlb);
+}
+
+#ifdef CONFIG_SMP
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (radix_enabled())
+ return radix__flush_tlb_mm(mm);
+ return hash__flush_tlb_mm(mm);
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ if (radix_enabled())
+ return radix__flush_tlb_page(vma, vmaddr);
+ return hash__flush_tlb_page(vma, vmaddr);
+}
+
+static inline void flush_all_mm(struct mm_struct *mm)
+{
+ if (radix_enabled())
+ return radix__flush_all_mm(mm);
+ return hash__flush_all_mm(mm);
+}
+#else
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
+#define flush_all_mm(mm) local_flush_all_mm(mm)
+#endif /* CONFIG_SMP */
+
+#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
+static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
+ unsigned long address)
+{
+ /*
+ * Book3S 64 does not require spurious fault flushes because the PTE
+ * must be re-fetched in case of an access permission problem. So the
+ * only reason for a spurious fault should be concurrent modification
+ * to the PTE, in which case the PTE will eventually be re-fetched by
+ * the MMU when it attempts the access again.
+ *
+ * See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table
+ * Entry, Setting a Reference or Change Bit or Upgrading Access
+ * Authority (PTE Subject to Atomic Hardware Updates):
+ *
+ * "If the only change being made to a valid PTE that is subject to
+ * atomic hardware updates is to set the Reference or Change bit to
+ * 1 or to upgrade access authority, a simpler sequence suffices
+ * because the translation hardware will refetch the PTE if an
+ * access is attempted for which the only problems were reference
+ * and/or change bits needing to be set or insufficient access
+ * authority."
+ *
+ * The nest MMU in POWER9 does not perform this PTE re-fetch, but
+ * it avoids the spurious fault problem by flushing the TLB before
+ * upgrading PTE permissions, see radix__ptep_set_access_flags.
+ */
+}
+
+static inline bool __pte_protnone(unsigned long pte)
+{
+ return (pte & (pgprot_val(PAGE_NONE) | _PAGE_RWX)) == pgprot_val(PAGE_NONE);
+}
+
+static inline bool __pte_flags_need_flush(unsigned long oldval,
+ unsigned long newval)
+{
+ unsigned long delta = oldval ^ newval;
+
+ /*
+ * The return value of this function doesn't matter for hash,
+ * ptep_modify_prot_start() does a pte_update() which does or schedules
+ * any necessary hash table update and flush.
+ */
+ if (!radix_enabled())
+ return true;
+
+ /*
+ * We do not expect kernel mappings or non-PTEs or not-present PTEs.
+ */
+ VM_WARN_ON_ONCE(!__pte_protnone(oldval) && oldval & _PAGE_PRIVILEGED);
+ VM_WARN_ON_ONCE(!__pte_protnone(newval) && newval & _PAGE_PRIVILEGED);
+ VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
+ VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
+ VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
+ VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));
+
+ /*
+ * Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED.
+ *
+ * In theory, some changed software bits could be tolerated, in
+ * practice those should rarely if ever matter.
+ */
+
+ if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))
+ return true;
+
+ /*
+ * If any of the above was present in old but cleared in new, flush.
+ * With the exception of _PAGE_ACCESSED, don't worry about flushing
+ * if that was cleared (see the comment in ptep_clear_flush_young()).
+ */
+ if ((delta & ~_PAGE_ACCESSED) & oldval)
+ return true;
+
+ return false;
+}
+
+static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
+{
+ return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
+}
+#define pte_needs_flush pte_needs_flush
+
+static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
+{
+ return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
+}
+#define huge_pmd_needs_flush huge_pmd_needs_flush
+
+extern bool tlbie_capable;
+extern bool tlbie_enabled;
+
+static inline bool cputlb_use_tlbie(void)
+{
+ return tlbie_enabled;
+}
+
+#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/pgalloc.h b/arch/powerpc/include/asm/book3s/pgalloc.h
new file mode 100644
index 000000000..6b178ca14
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/pgalloc.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_PGALLOC_H
+#define _ASM_POWERPC_BOOK3S_PGALLOC_H
+
+#include <linux/mm.h>
+
+extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+
+#ifdef CONFIG_PPC64
+#include <asm/book3s/64/pgalloc.h>
+#else
+#include <asm/book3s/32/pgalloc.h>
+#endif
+
+#endif /* _ASM_POWERPC_BOOK3S_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
new file mode 100644
index 000000000..d18b748ea
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_PGTABLE_H
+#define _ASM_POWERPC_BOOK3S_PGTABLE_H
+
+#ifdef CONFIG_PPC64
+#include <asm/book3s/64/pgtable.h>
+#else
+#include <asm/book3s/32/pgtable.h>
+#endif
+
+#ifndef __ASSEMBLY__
+/* Insert a PTE, top-level function is out of line. It uses an inline
+ * low level function in the respective pgtable-* files
+ */
+extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+ pte_t pte);
+
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, pte_t entry, int dirty);
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+
+/*
+ * This gets called at the end of handling a page fault, when
+ * the kernel has put a new PTE into the page table for the process.
+ * We use it to ensure coherency between the i-cache and d-cache
+ * for the page which has just been mapped in.
+ * On machines which use an MMU hash table, we use this to put a
+ * corresponding HPTE into the hash table ahead of time, instead of
+ * waiting for the inevitable extra hash-table miss exception.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+ if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ return;
+ if (radix_enabled())
+ return;
+ __update_mmu_cache(vma, address, ptep);
+}
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/powerpc/include/asm/book3s/tlbflush.h b/arch/powerpc/include/asm/book3s/tlbflush.h
new file mode 100644
index 000000000..dec11de41
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/tlbflush.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_TLBFLUSH_H
+#define _ASM_POWERPC_BOOK3S_TLBFLUSH_H
+
+#ifdef CONFIG_PPC64
+#include <asm/book3s/64/tlbflush.h>
+#else
+#include <asm/book3s/32/tlbflush.h>
+#endif
+
+#endif /* _ASM_POWERPC_BOOK3S_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/bootx.h b/arch/powerpc/include/asm/bootx.h
new file mode 100644
index 000000000..1c121f3c5
--- /dev/null
+++ b/arch/powerpc/include/asm/bootx.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file describes the structure passed from the BootX application
+ * (for MacOS) when it is used to boot Linux.
+ *
+ * Written by Benjamin Herrenschmidt.
+ */
+
+#ifndef __ASM_BOOTX_H__
+#define __ASM_BOOTX_H__
+
+#include <uapi/asm/bootx.h>
+
+/* (*) The format of the colormap is 256 * 3 * 2 bytes. Each color index
+ * is represented by 3 short words containing a 16 bits (unsigned) color
+ * component. Later versions may contain the gamma table for direct-color
+ * devices here.
+ */
+#define BOOTX_COLORTABLE_SIZE (256UL*3UL*2UL)
+
+/* BootX passes the device-tree using a format that comes from earlier
+ * ppc32 kernels. This used to match what is in prom.h, but not anymore
+ * so we now define it here
+ */
+struct bootx_dt_prop {
+ u32 name;
+ int length;
+ u32 value;
+ u32 next;
+};
+
+struct bootx_dt_node {
+ u32 unused0;
+ u32 unused1;
+ u32 phandle; /* not really available */
+ u32 unused2;
+ u32 unused3;
+ u32 unused4;
+ u32 unused5;
+ u32 full_name;
+ u32 properties;
+ u32 parent;
+ u32 child;
+ u32 sibling;
+ u32 next;
+ u32 allnext;
+};
+
+extern void bootx_init(unsigned long r4, unsigned long phys);
+
+#endif
diff --git a/arch/powerpc/include/asm/bpf_perf_event.h b/arch/powerpc/include/asm/bpf_perf_event.h
new file mode 100644
index 000000000..e8a7b4ffb
--- /dev/null
+++ b/arch/powerpc/include/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BPF_PERF_EVENT_H
+#define _ASM_POWERPC_BPF_PERF_EVENT_H
+
+#include <asm/ptrace.h>
+
+typedef struct user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _ASM_POWERPC_BPF_PERF_EVENT_H */
diff --git a/arch/powerpc/include/asm/btext.h b/arch/powerpc/include/asm/btext.h
new file mode 100644
index 000000000..860f8868f
--- /dev/null
+++ b/arch/powerpc/include/asm/btext.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for using the procedures in btext.c.
+ *
+ * Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ */
+#ifndef __PPC_BTEXT_H
+#define __PPC_BTEXT_H
+#ifdef __KERNEL__
+
+extern int btext_find_display(int allow_nonstdout);
+extern void btext_update_display(unsigned long phys, int width, int height,
+ int depth, int pitch);
+extern void btext_setup_display(int width, int height, int depth, int pitch,
+ unsigned long address);
+#ifdef CONFIG_PPC32
+extern void btext_prepare_BAT(void);
+#else
+static inline void btext_prepare_BAT(void) { }
+#endif
+extern void btext_map(void);
+extern void btext_unmap(void);
+
+extern void btext_drawchar(char c);
+extern void btext_drawstring(const char *str);
+void __init btext_drawhex(unsigned long v);
+void __init btext_drawtext(const char *c, unsigned int len);
+
+void __init btext_clearscreen(void);
+void __init btext_flushscreen(void);
+void __init btext_flushline(void);
+
+#endif /* __KERNEL__ */
+#endif /* __PPC_BTEXT_H */
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
new file mode 100644
index 000000000..61a473635
--- /dev/null
+++ b/arch/powerpc/include/asm/bug.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BUG_H
+#define _ASM_POWERPC_BUG_H
+#ifdef __KERNEL__
+
+#include <asm/asm-compat.h>
+#include <asm/extable.h>
+
+#ifdef CONFIG_BUG
+
+#ifdef __ASSEMBLY__
+#include <asm/asm-offsets.h>
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+.macro __EMIT_BUG_ENTRY addr,file,line,flags
+ .section __bug_table,"aw"
+5001: .4byte \addr - .
+ .4byte 5002f - .
+ .short \line, \flags
+ .org 5001b+BUG_ENTRY_SIZE
+ .previous
+ .section .rodata,"a"
+5002: .asciz "\file"
+ .previous
+.endm
+#else
+.macro __EMIT_BUG_ENTRY addr,file,line,flags
+ .section __bug_table,"aw"
+5001: .4byte \addr - .
+ .short \flags
+ .org 5001b+BUG_ENTRY_SIZE
+ .previous
+.endm
+#endif /* verbose */
+
+.macro EMIT_WARN_ENTRY addr,file,line,flags
+ EX_TABLE(\addr,\addr+4)
+ __EMIT_BUG_ENTRY \addr,\file,\line,\flags
+.endm
+
+.macro EMIT_BUG_ENTRY addr,file,line,flags
+ .if \flags & 1 /* BUGFLAG_WARNING */
+ .err /* Use EMIT_WARN_ENTRY for warnings */
+ .endif
+ __EMIT_BUG_ENTRY \addr,\file,\line,\flags
+.endm
+
+#else /* !__ASSEMBLY__ */
+/* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and
+ sizeof(struct bug_entry), respectively */
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define _EMIT_BUG_ENTRY \
+ ".section __bug_table,\"aw\"\n" \
+ "2: .4byte 1b - .\n" \
+ " .4byte %0 - .\n" \
+ " .short %1, %2\n" \
+ ".org 2b+%3\n" \
+ ".previous\n"
+#else
+#define _EMIT_BUG_ENTRY \
+ ".section __bug_table,\"aw\"\n" \
+ "2: .4byte 1b - .\n" \
+ " .short %2\n" \
+ ".org 2b+%3\n" \
+ ".previous\n"
+#endif
+
+#define BUG_ENTRY(insn, flags, ...) \
+ __asm__ __volatile__( \
+ "1: " insn "\n" \
+ _EMIT_BUG_ENTRY \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (flags), \
+ "i" (sizeof(struct bug_entry)), \
+ ##__VA_ARGS__)
+
+#define WARN_ENTRY(insn, flags, label, ...) \
+ asm_volatile_goto( \
+ "1: " insn "\n" \
+ EX_TABLE(1b, %l[label]) \
+ _EMIT_BUG_ENTRY \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (flags), \
+ "i" (sizeof(struct bug_entry)), \
+ ##__VA_ARGS__ : : label)
+
+/*
+ * BUG_ON() and WARN_ON() do their best to cooperate with compile-time
+ * optimisations. However depending on the complexity of the condition
+ * some compiler versions may not produce optimal results.
+ */
+
+#define BUG() do { \
+ BUG_ENTRY("twi 31, 0, 0", 0); \
+ unreachable(); \
+} while (0)
+#define HAVE_ARCH_BUG
+
+#define __WARN_FLAGS(flags) do { \
+ __label__ __label_warn_on; \
+ \
+ WARN_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags), __label_warn_on); \
+ unreachable(); \
+ \
+__label_warn_on: \
+ break; \
+} while (0)
+
+#ifdef CONFIG_PPC64
+#define BUG_ON(x) do { \
+ if (__builtin_constant_p(x)) { \
+ if (x) \
+ BUG(); \
+ } else { \
+ BUG_ENTRY(PPC_TLNEI " %4, 0", 0, "r" ((__force long)(x))); \
+ } \
+} while (0)
+
+#define WARN_ON(x) ({ \
+ bool __ret_warn_on = false; \
+ do { \
+ if (__builtin_constant_p((x))) { \
+ if (!(x)) \
+ break; \
+ __WARN(); \
+ __ret_warn_on = true; \
+ } else { \
+ __label__ __label_warn_on; \
+ \
+ WARN_ENTRY(PPC_TLNEI " %4, 0", \
+ BUGFLAG_WARNING | BUGFLAG_TAINT(TAINT_WARN), \
+ __label_warn_on, \
+ "r" ((__force long)(x))); \
+ break; \
+__label_warn_on: \
+ __ret_warn_on = true; \
+ } \
+ } while (0); \
+ unlikely(__ret_warn_on); \
+})
+
+#define HAVE_ARCH_BUG_ON
+#define HAVE_ARCH_WARN_ON
+#endif
+
+#endif /* __ASSEMBLY __ */
+#else
+#ifdef __ASSEMBLY__
+.macro EMIT_BUG_ENTRY addr,file,line,flags
+.endm
+.macro EMIT_WARN_ENTRY addr,file,line,flags
+.endm
+#else /* !__ASSEMBLY__ */
+#define _EMIT_BUG_ENTRY
+#define _EMIT_WARN_ENTRY
+#endif
+#endif /* CONFIG_BUG */
+
+#include <asm-generic/bug.h>
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs;
+void hash__do_page_fault(struct pt_regs *);
+void bad_page_fault(struct pt_regs *, int);
+extern void _exception(int, struct pt_regs *, int, unsigned long);
+extern void _exception_pkey(struct pt_regs *, unsigned long, int);
+extern void die(const char *, struct pt_regs *, long);
+void die_mce(const char *str, struct pt_regs *regs, long err);
+extern bool die_will_crash(void);
+extern void panic_flush_kmsg_start(void);
+extern void panic_flush_kmsg_end(void);
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_BUG_H */
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
new file mode 100644
index 000000000..ae0a68a83
--- /dev/null
+++ b/arch/powerpc/include/asm/cache.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_CACHE_H
+#define _ASM_POWERPC_CACHE_H
+
+#ifdef __KERNEL__
+
+
+/* bytes per L1 cache line */
+#if defined(CONFIG_PPC_8xx)
+#define L1_CACHE_SHIFT 4
+#define MAX_COPY_PREFETCH 1
+#define IFETCH_ALIGN_SHIFT 2
+#elif defined(CONFIG_PPC_E500MC)
+#define L1_CACHE_SHIFT 6
+#define MAX_COPY_PREFETCH 4
+#define IFETCH_ALIGN_SHIFT 3
+#elif defined(CONFIG_PPC32)
+#define MAX_COPY_PREFETCH 4
+#define IFETCH_ALIGN_SHIFT 3 /* 603 fetches 2 insn at a time */
+#if defined(CONFIG_PPC_47x)
+#define L1_CACHE_SHIFT 7
+#else
+#define L1_CACHE_SHIFT 5
+#endif
+#else /* CONFIG_PPC64 */
+#define L1_CACHE_SHIFT 7
+#define IFETCH_ALIGN_SHIFT 4 /* POWER8,9 */
+#endif
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+#define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT)
+
+#if !defined(__ASSEMBLY__)
+#ifdef CONFIG_PPC64
+
+struct ppc_cache_info {
+ u32 size;
+ u32 line_size;
+ u32 block_size; /* L1 only */
+ u32 log_block_size;
+ u32 blocks_per_page;
+ u32 sets;
+ u32 assoc;
+};
+
+struct ppc64_caches {
+ struct ppc_cache_info l1d;
+ struct ppc_cache_info l1i;
+ struct ppc_cache_info l2;
+ struct ppc_cache_info l3;
+};
+
+extern struct ppc64_caches ppc64_caches;
+
+static inline u32 l1_dcache_shift(void)
+{
+ return ppc64_caches.l1d.log_block_size;
+}
+
+static inline u32 l1_dcache_bytes(void)
+{
+ return ppc64_caches.l1d.block_size;
+}
+
+static inline u32 l1_icache_shift(void)
+{
+ return ppc64_caches.l1i.log_block_size;
+}
+
+static inline u32 l1_icache_bytes(void)
+{
+ return ppc64_caches.l1i.block_size;
+}
+#else
+static inline u32 l1_dcache_shift(void)
+{
+ return L1_CACHE_SHIFT;
+}
+
+static inline u32 l1_dcache_bytes(void)
+{
+ return L1_CACHE_BYTES;
+}
+
+static inline u32 l1_icache_shift(void)
+{
+ return L1_CACHE_SHIFT;
+}
+
+static inline u32 l1_icache_bytes(void)
+{
+ return L1_CACHE_BYTES;
+}
+
+#endif
+
+#define __read_mostly __section(".data..read_mostly")
+
+#ifdef CONFIG_PPC_BOOK3S_32
+extern long _get_L2CR(void);
+extern long _get_L3CR(void);
+extern void _set_L2CR(unsigned long);
+extern void _set_L3CR(unsigned long);
+#else
+#define _get_L2CR() 0L
+#define _get_L3CR() 0L
+#define _set_L2CR(val) do { } while(0)
+#define _set_L3CR(val) do { } while(0)
+#endif
+
+static inline void dcbz(void *addr)
+{
+ __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void dcbi(void *addr)
+{
+ __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void dcbf(void *addr)
+{
+ __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void dcbst(void *addr)
+{
+ __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void icbi(void *addr)
+{
+ asm volatile ("icbi 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void iccci(void *addr)
+{
+ asm volatile ("iccci 0, %0" : : "r"(addr) : "memory");
+}
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_CACHE_H */
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
new file mode 100644
index 000000000..7564dd4fd
--- /dev/null
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ */
+#ifndef _ASM_POWERPC_CACHEFLUSH_H
+#define _ASM_POWERPC_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/cputable.h>
+#include <asm/cpu_has_feature.h>
+
+/*
+ * This flag is used to indicate that the page pointed to by a pte is clean
+ * and does not require cleaning before returning it to the user.
+ */
+#define PG_dcache_clean PG_arch_1
+
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * Book3s has no ptesync after setting a pte, so without this ptesync it's
+ * possible for a kernel virtual mapping access to return a spurious fault
+ * if it's accessed right after the pte is set. The page fault handler does
+ * not expect this type of fault. flush_cache_vmap is not exactly the right
+ * place to put this, but it seems to work well enough.
+ */
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ asm volatile("ptesync" ::: "memory");
+}
+#define flush_cache_vmap flush_cache_vmap
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+/*
+ * This is called when a page has been modified by the kernel.
+ * It just marks the page as not i-cache clean. We do the i-cache
+ * flush later when the page is given to a user process, if necessary.
+ */
+static inline void flush_dcache_page(struct page *page)
+{
+ if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+ return;
+ /* avoid an atomic op if possible */
+ if (test_bit(PG_dcache_clean, &page->flags))
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+
+void flush_icache_range(unsigned long start, unsigned long stop);
+#define flush_icache_range flush_icache_range
+
+void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len);
+#define flush_icache_user_page flush_icache_user_page
+
+void flush_dcache_icache_page(struct page *page);
+
+/**
+ * flush_dcache_range(): Write any modified data cache blocks out to memory and
+ * invalidate them. Does not invalidate the corresponding instruction cache
+ * blocks.
+ *
+ * @start: the start address
+ * @stop: the stop address (exclusive)
+ */
+static inline void flush_dcache_range(unsigned long start, unsigned long stop)
+{
+ unsigned long shift = l1_dcache_shift();
+ unsigned long bytes = l1_dcache_bytes();
+ void *addr = (void *)(start & ~(bytes - 1));
+ unsigned long size = stop - (unsigned long)addr + (bytes - 1);
+ unsigned long i;
+
+ if (IS_ENABLED(CONFIG_PPC64))
+ mb(); /* sync */
+
+ for (i = 0; i < size >> shift; i++, addr += bytes)
+ dcbf(addr);
+ mb(); /* sync */
+
+}
+
+/*
+ * Write any modified data cache blocks out to memory.
+ * Does not invalidate the corresponding cache lines (especially for
+ * any corresponding instruction cache).
+ */
+static inline void clean_dcache_range(unsigned long start, unsigned long stop)
+{
+ unsigned long shift = l1_dcache_shift();
+ unsigned long bytes = l1_dcache_bytes();
+ void *addr = (void *)(start & ~(bytes - 1));
+ unsigned long size = stop - (unsigned long)addr + (bytes - 1);
+ unsigned long i;
+
+ for (i = 0; i < size >> shift; i++, addr += bytes)
+ dcbst(addr);
+ mb(); /* sync */
+}
+
+/*
+ * Like above, but invalidate the D-cache. This is used by the 8xx
+ * to invalidate the cache so the PPC core doesn't get stale data
+ * from the CPM (no cache snooping here :-).
+ */
+static inline void invalidate_dcache_range(unsigned long start,
+ unsigned long stop)
+{
+ unsigned long shift = l1_dcache_shift();
+ unsigned long bytes = l1_dcache_bytes();
+ void *addr = (void *)(start & ~(bytes - 1));
+ unsigned long size = stop - (unsigned long)addr + (bytes - 1);
+ unsigned long i;
+
+ for (i = 0; i < size >> shift; i++, addr += bytes)
+ dcbi(addr);
+ mb(); /* sync */
+}
+
+#ifdef CONFIG_4xx
+static inline void flush_instruction_cache(void)
+{
+ iccci((void *)KERNELBASE);
+ isync();
+}
+#else
+void flush_instruction_cache(void);
+#endif
+
+#include <asm-generic/cacheflush.h>
+
+#endif /* _ASM_POWERPC_CACHEFLUSH_H */
diff --git a/arch/powerpc/include/asm/cell-pmu.h b/arch/powerpc/include/asm/cell-pmu.h
new file mode 100644
index 000000000..6a79b5d1c
--- /dev/null
+++ b/arch/powerpc/include/asm/cell-pmu.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cell Broadband Engine Performance Monitor
+ *
+ * (C) Copyright IBM Corporation 2006
+ *
+ * Author:
+ * David Erb (djerb@us.ibm.com)
+ * Kevin Corry (kevcorry@us.ibm.com)
+ */
+
+#ifndef __ASM_CELL_PMU_H__
+#define __ASM_CELL_PMU_H__
+
+/* The Cell PMU has four hardware performance counters, which can be
+ * configured as four 32-bit counters or eight 16-bit counters.
+ */
+#define NR_PHYS_CTRS 4
+#define NR_CTRS (NR_PHYS_CTRS * 2)
+
+/* Macros for the pm_control register. */
+#define CBE_PM_16BIT_CTR(ctr) (1 << (24 - ((ctr) & (NR_PHYS_CTRS - 1))))
+#define CBE_PM_ENABLE_PERF_MON 0x80000000
+#define CBE_PM_STOP_AT_MAX 0x40000000
+#define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3)
+#define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28)
+#define CBE_PM_TRACE_BUF_OVFLW(bit) (((bit) & 0x1) << 17)
+#define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18)
+#define CBE_PM_FREEZE_ALL_CTRS 0x00100000
+#define CBE_PM_ENABLE_EXT_TRACE 0x00008000
+#define CBE_PM_SPU_ADDR_TRACE_SET(msk) (((msk) & 0x3) << 9)
+
+/* Macros for the trace_address register. */
+#define CBE_PM_TRACE_BUF_FULL 0x00000800
+#define CBE_PM_TRACE_BUF_EMPTY 0x00000400
+#define CBE_PM_TRACE_BUF_DATA_COUNT(ta) ((ta) & 0x3ff)
+#define CBE_PM_TRACE_BUF_MAX_COUNT 0x400
+
+/* Macros for the pm07_control registers. */
+#define CBE_PM_CTR_INPUT_MUX(pm07_control) (((pm07_control) >> 26) & 0x3f)
+#define CBE_PM_CTR_INPUT_CONTROL 0x02000000
+#define CBE_PM_CTR_POLARITY 0x01000000
+#define CBE_PM_CTR_COUNT_CYCLES 0x00800000
+#define CBE_PM_CTR_ENABLE 0x00400000
+#define PM07_CTR_INPUT_MUX(x) (((x) & 0x3F) << 26)
+#define PM07_CTR_INPUT_CONTROL(x) (((x) & 1) << 25)
+#define PM07_CTR_POLARITY(x) (((x) & 1) << 24)
+#define PM07_CTR_COUNT_CYCLES(x) (((x) & 1) << 23)
+#define PM07_CTR_ENABLE(x) (((x) & 1) << 22)
+
+/* Macros for the pm_status register. */
+#define CBE_PM_CTR_OVERFLOW_INTR(ctr) (1 << (31 - ((ctr) & 7)))
+
+enum pm_reg_name {
+ group_control,
+ debug_bus_control,
+ trace_address,
+ ext_tr_timer,
+ pm_status,
+ pm_control,
+ pm_interval,
+ pm_start_stop,
+};
+
+/* Routines for reading/writing the PMU registers. */
+extern u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr);
+extern void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
+extern u32 cbe_read_ctr(u32 cpu, u32 ctr);
+extern void cbe_write_ctr(u32 cpu, u32 ctr, u32 val);
+
+extern u32 cbe_read_pm07_control(u32 cpu, u32 ctr);
+extern void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val);
+extern u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg);
+extern void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
+
+extern u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr);
+extern void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
+
+extern void cbe_enable_pm(u32 cpu);
+extern void cbe_disable_pm(u32 cpu);
+
+extern void cbe_read_trace_buffer(u32 cpu, u64 *buf);
+
+extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
+extern void cbe_disable_pm_interrupts(u32 cpu);
+extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu);
+extern void cbe_sync_irq(int node);
+
+#define CBE_COUNT_SUPERVISOR_MODE 0
+#define CBE_COUNT_HYPERVISOR_MODE 1
+#define CBE_COUNT_PROBLEM_MODE 2
+#define CBE_COUNT_ALL_MODES 3
+
+#endif /* __ASM_CELL_PMU_H__ */
diff --git a/arch/powerpc/include/asm/cell-regs.h b/arch/powerpc/include/asm/cell-regs.h
new file mode 100644
index 000000000..e1c431ef3
--- /dev/null
+++ b/arch/powerpc/include/asm/cell-regs.h
@@ -0,0 +1,327 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cbe_regs.h
+ *
+ * This file is intended to hold the various register definitions for CBE
+ * on-chip system devices (memory controller, IO controller, etc...)
+ *
+ * (C) Copyright IBM Corporation 2001,2006
+ *
+ * Authors: Maximino Aguilar (maguilar@us.ibm.com)
+ * David J. Erb (djerb@us.ibm.com)
+ *
+ * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
+ */
+
+#ifndef CBE_REGS_H
+#define CBE_REGS_H
+
+#include <asm/cell-pmu.h>
+
+/*
+ *
+ * Some HID register definitions
+ *
+ */
+
+/* CBE specific HID0 bits */
+#define HID0_CBE_THERM_WAKEUP 0x0000020000000000ul
+#define HID0_CBE_SYSERR_WAKEUP 0x0000008000000000ul
+#define HID0_CBE_THERM_INT_EN 0x0000000400000000ul
+#define HID0_CBE_SYSERR_INT_EN 0x0000000200000000ul
+
+#define MAX_CBE 2
+
+/*
+ *
+ * Pervasive unit register definitions
+ *
+ */
+
+union spe_reg {
+ u64 val;
+ u8 spe[8];
+};
+
+union ppe_spe_reg {
+ u64 val;
+ struct {
+ u32 ppe;
+ u32 spe;
+ };
+};
+
+
+struct cbe_pmd_regs {
+ /* Debug Bus Control */
+ u64 pad_0x0000; /* 0x0000 */
+
+ u64 group_control; /* 0x0008 */
+
+ u8 pad_0x0010_0x00a8 [0x00a8 - 0x0010]; /* 0x0010 */
+
+ u64 debug_bus_control; /* 0x00a8 */
+
+ u8 pad_0x00b0_0x0100 [0x0100 - 0x00b0]; /* 0x00b0 */
+
+ u64 trace_aux_data; /* 0x0100 */
+ u64 trace_buffer_0_63; /* 0x0108 */
+ u64 trace_buffer_64_127; /* 0x0110 */
+ u64 trace_address; /* 0x0118 */
+ u64 ext_tr_timer; /* 0x0120 */
+
+ u8 pad_0x0128_0x0400 [0x0400 - 0x0128]; /* 0x0128 */
+
+ /* Performance Monitor */
+ u64 pm_status; /* 0x0400 */
+ u64 pm_control; /* 0x0408 */
+ u64 pm_interval; /* 0x0410 */
+ u64 pm_ctr[4]; /* 0x0418 */
+ u64 pm_start_stop; /* 0x0438 */
+ u64 pm07_control[8]; /* 0x0440 */
+
+ u8 pad_0x0480_0x0800 [0x0800 - 0x0480]; /* 0x0480 */
+
+ /* Thermal Sensor Registers */
+ union spe_reg ts_ctsr1; /* 0x0800 */
+ u64 ts_ctsr2; /* 0x0808 */
+ union spe_reg ts_mtsr1; /* 0x0810 */
+ u64 ts_mtsr2; /* 0x0818 */
+ union spe_reg ts_itr1; /* 0x0820 */
+ u64 ts_itr2; /* 0x0828 */
+ u64 ts_gitr; /* 0x0830 */
+ u64 ts_isr; /* 0x0838 */
+ u64 ts_imr; /* 0x0840 */
+ union spe_reg tm_cr1; /* 0x0848 */
+ u64 tm_cr2; /* 0x0850 */
+ u64 tm_simr; /* 0x0858 */
+ union ppe_spe_reg tm_tpr; /* 0x0860 */
+ union spe_reg tm_str1; /* 0x0868 */
+ u64 tm_str2; /* 0x0870 */
+ union ppe_spe_reg tm_tsr; /* 0x0878 */
+
+ /* Power Management */
+ u64 pmcr; /* 0x0880 */
+#define CBE_PMD_PAUSE_ZERO_CONTROL 0x10000
+ u64 pmsr; /* 0x0888 */
+
+ /* Time Base Register */
+ u64 tbr; /* 0x0890 */
+
+ u8 pad_0x0898_0x0c00 [0x0c00 - 0x0898]; /* 0x0898 */
+
+ /* Fault Isolation Registers */
+ u64 checkstop_fir; /* 0x0c00 */
+ u64 recoverable_fir; /* 0x0c08 */
+ u64 spec_att_mchk_fir; /* 0x0c10 */
+ u32 fir_mode_reg; /* 0x0c18 */
+ u8 pad_0x0c1c_0x0c20 [4]; /* 0x0c1c */
+#define CBE_PMD_FIR_MODE_M8 0x00800
+ u64 fir_enable_mask; /* 0x0c20 */
+
+ u8 pad_0x0c28_0x0ca8 [0x0ca8 - 0x0c28]; /* 0x0c28 */
+ u64 ras_esc_0; /* 0x0ca8 */
+ u8 pad_0x0cb0_0x1000 [0x1000 - 0x0cb0]; /* 0x0cb0 */
+};
+
+extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np);
+extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu);
+
+/*
+ * PMU shadow registers
+ *
+ * Many of the registers in the performance monitoring unit are write-only,
+ * so we need to save a copy of what we write to those registers.
+ *
+ * The actual data counters are read/write. However, writing to the counters
+ * only takes effect if the PMU is enabled. Otherwise the value is stored in
+ * a hardware latch until the next time the PMU is enabled. So we save a copy
+ * of the counter values if we need to read them back while the PMU is
+ * disabled. The counter_value_in_latch field is a bitmap indicating which
+ * counters currently have a value waiting to be written.
+ */
+
+struct cbe_pmd_shadow_regs {
+ u32 group_control;
+ u32 debug_bus_control;
+ u32 trace_address;
+ u32 ext_tr_timer;
+ u32 pm_status;
+ u32 pm_control;
+ u32 pm_interval;
+ u32 pm_start_stop;
+ u32 pm07_control[NR_CTRS];
+
+ u32 pm_ctr[NR_PHYS_CTRS];
+ u32 counter_value_in_latch;
+};
+
+extern struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np);
+extern struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu);
+
+/*
+ *
+ * IIC unit register definitions
+ *
+ */
+
+struct cbe_iic_pending_bits {
+ u32 data;
+ u8 flags;
+ u8 class;
+ u8 source;
+ u8 prio;
+};
+
+#define CBE_IIC_IRQ_VALID 0x80
+#define CBE_IIC_IRQ_IPI 0x40
+
+struct cbe_iic_thread_regs {
+ struct cbe_iic_pending_bits pending;
+ struct cbe_iic_pending_bits pending_destr;
+ u64 generate;
+ u64 prio;
+};
+
+struct cbe_iic_regs {
+ u8 pad_0x0000_0x0400[0x0400 - 0x0000]; /* 0x0000 */
+
+ /* IIC interrupt registers */
+ struct cbe_iic_thread_regs thread[2]; /* 0x0400 */
+
+ u64 iic_ir; /* 0x0440 */
+#define CBE_IIC_IR_PRIO(x) (((x) & 0xf) << 12)
+#define CBE_IIC_IR_DEST_NODE(x) (((x) & 0xf) << 4)
+#define CBE_IIC_IR_DEST_UNIT(x) ((x) & 0xf)
+#define CBE_IIC_IR_IOC_0 0x0
+#define CBE_IIC_IR_IOC_1S 0xb
+#define CBE_IIC_IR_PT_0 0xe
+#define CBE_IIC_IR_PT_1 0xf
+
+ u64 iic_is; /* 0x0448 */
+#define CBE_IIC_IS_PMI 0x2
+
+ u8 pad_0x0450_0x0500[0x0500 - 0x0450]; /* 0x0450 */
+
+ /* IOC FIR */
+ u64 ioc_fir_reset; /* 0x0500 */
+ u64 ioc_fir_set; /* 0x0508 */
+ u64 ioc_checkstop_enable; /* 0x0510 */
+ u64 ioc_fir_error_mask; /* 0x0518 */
+ u64 ioc_syserr_enable; /* 0x0520 */
+ u64 ioc_fir; /* 0x0528 */
+
+ u8 pad_0x0530_0x1000[0x1000 - 0x0530]; /* 0x0530 */
+};
+
+extern struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np);
+extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu);
+
+
+struct cbe_mic_tm_regs {
+ u8 pad_0x0000_0x0040[0x0040 - 0x0000]; /* 0x0000 */
+
+ u64 mic_ctl_cnfg2; /* 0x0040 */
+#define CBE_MIC_ENABLE_AUX_TRC 0x8000000000000000LL
+#define CBE_MIC_DISABLE_PWR_SAV_2 0x0200000000000000LL
+#define CBE_MIC_DISABLE_AUX_TRC_WRAP 0x0100000000000000LL
+#define CBE_MIC_ENABLE_AUX_TRC_INT 0x0080000000000000LL
+
+ u64 pad_0x0048; /* 0x0048 */
+
+ u64 mic_aux_trc_base; /* 0x0050 */
+ u64 mic_aux_trc_max_addr; /* 0x0058 */
+ u64 mic_aux_trc_cur_addr; /* 0x0060 */
+ u64 mic_aux_trc_grf_addr; /* 0x0068 */
+ u64 mic_aux_trc_grf_data; /* 0x0070 */
+
+ u64 pad_0x0078; /* 0x0078 */
+
+ u64 mic_ctl_cnfg_0; /* 0x0080 */
+#define CBE_MIC_DISABLE_PWR_SAV_0 0x8000000000000000LL
+
+ u64 pad_0x0088; /* 0x0088 */
+
+ u64 slow_fast_timer_0; /* 0x0090 */
+ u64 slow_next_timer_0; /* 0x0098 */
+
+ u8 pad_0x00a0_0x00f8[0x00f8 - 0x00a0]; /* 0x00a0 */
+ u64 mic_df_ecc_address_0; /* 0x00f8 */
+
+ u8 pad_0x0100_0x01b8[0x01b8 - 0x0100]; /* 0x0100 */
+ u64 mic_df_ecc_address_1; /* 0x01b8 */
+
+ u64 mic_ctl_cnfg_1; /* 0x01c0 */
+#define CBE_MIC_DISABLE_PWR_SAV_1 0x8000000000000000LL
+
+ u64 pad_0x01c8; /* 0x01c8 */
+
+ u64 slow_fast_timer_1; /* 0x01d0 */
+ u64 slow_next_timer_1; /* 0x01d8 */
+
+ u8 pad_0x01e0_0x0208[0x0208 - 0x01e0]; /* 0x01e0 */
+ u64 mic_exc; /* 0x0208 */
+#define CBE_MIC_EXC_BLOCK_SCRUB 0x0800000000000000ULL
+#define CBE_MIC_EXC_FAST_SCRUB 0x0100000000000000ULL
+
+ u64 mic_mnt_cfg; /* 0x0210 */
+#define CBE_MIC_MNT_CFG_CHAN_0_POP 0x0002000000000000ULL
+#define CBE_MIC_MNT_CFG_CHAN_1_POP 0x0004000000000000ULL
+
+ u64 mic_df_config; /* 0x0218 */
+#define CBE_MIC_ECC_DISABLE_0 0x4000000000000000ULL
+#define CBE_MIC_ECC_REP_SINGLE_0 0x2000000000000000ULL
+#define CBE_MIC_ECC_DISABLE_1 0x0080000000000000ULL
+#define CBE_MIC_ECC_REP_SINGLE_1 0x0040000000000000ULL
+
+ u8 pad_0x0220_0x0230[0x0230 - 0x0220]; /* 0x0220 */
+ u64 mic_fir; /* 0x0230 */
+#define CBE_MIC_FIR_ECC_SINGLE_0_ERR 0x0200000000000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_0_ERR 0x0100000000000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_1_ERR 0x0080000000000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_1_ERR 0x0040000000000000ULL
+#define CBE_MIC_FIR_ECC_ERR_MASK 0xffff000000000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_0_CTE 0x0000020000000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_0_CTE 0x0000010000000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_1_CTE 0x0000008000000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_1_CTE 0x0000004000000000ULL
+#define CBE_MIC_FIR_ECC_CTE_MASK 0x0000ffff00000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_0_RESET 0x0000000002000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_0_RESET 0x0000000001000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_1_RESET 0x0000000000800000ULL
+#define CBE_MIC_FIR_ECC_MULTI_1_RESET 0x0000000000400000ULL
+#define CBE_MIC_FIR_ECC_RESET_MASK 0x00000000ffff0000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_0_SET 0x0000000000000200ULL
+#define CBE_MIC_FIR_ECC_MULTI_0_SET 0x0000000000000100ULL
+#define CBE_MIC_FIR_ECC_SINGLE_1_SET 0x0000000000000080ULL
+#define CBE_MIC_FIR_ECC_MULTI_1_SET 0x0000000000000040ULL
+#define CBE_MIC_FIR_ECC_SET_MASK 0x000000000000ffffULL
+ u64 mic_fir_debug; /* 0x0238 */
+
+ u8 pad_0x0240_0x1000[0x1000 - 0x0240]; /* 0x0240 */
+};
+
+extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np);
+extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
+
+
+/* Cell page table entries */
+#define CBE_IOPTE_PP_W 0x8000000000000000ul /* protection: write */
+#define CBE_IOPTE_PP_R 0x4000000000000000ul /* protection: read */
+#define CBE_IOPTE_M 0x2000000000000000ul /* coherency required */
+#define CBE_IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
+#define CBE_IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
+#define CBE_IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
+#define CBE_IOPTE_H 0x0000000000000800ul /* cache hint */
+#define CBE_IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
+
+/* some utility functions to deal with SMT */
+extern u32 cbe_get_hw_thread_id(int cpu);
+extern u32 cbe_cpu_to_node(int cpu);
+extern u32 cbe_node_to_cpu(int node);
+
+/* Init this module early */
+extern void cbe_regs_init(void);
+
+
+#endif /* CBE_REGS_H */
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
new file mode 100644
index 000000000..4b573a3b7
--- /dev/null
+++ b/arch/powerpc/include/asm/checksum.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_CHECKSUM_H
+#define _ASM_POWERPC_CHECKSUM_H
+#ifdef __KERNEL__
+
+/*
+ */
+
+#include <linux/bitops.h>
+#include <linux/in6.h>
+/*
+ * Computes the checksum of a memory block at src, length len,
+ * and adds in "sum" (32-bit), while copying the block to dst.
+ * If an access exception occurs on src or dst, it stores -EFAULT
+ * to *src_err or *dst_err respectively (if that pointer is not
+ * NULL), and, for an error on src, zeroes the rest of dst.
+ *
+ * Like csum_partial, this must be called with even lengths,
+ * except for the last fragment.
+ */
+extern __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
+
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
+ int len);
+#define HAVE_CSUM_COPY_USER
+extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
+ int len);
+
+#define _HAVE_ARCH_CSUM_AND_COPY
+#define csum_partial_copy_nocheck(src, dst, len) \
+ csum_partial_copy_generic((src), (dst), (len))
+
+
+/*
+ * turns a 32-bit partial checksum (e.g. from csum_partial) into a
+ * 1's complement 16-bit checksum.
+ */
+static inline __sum16 csum_fold(__wsum sum)
+{
+ u32 tmp = (__force u32)sum;
+
+ /*
+ * swap the two 16-bit halves of sum
+ * if there is a carry from adding the two 16-bit halves,
+ * it will carry from the lower half into the upper half,
+ * giving us the correct sum in the upper half.
+ */
+ return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16);
+}
+
+static inline u32 from64to32(u64 x)
+{
+ return (x + ror64(x, 32)) >> 32;
+}
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
+{
+#ifdef __powerpc64__
+ u64 s = (__force u32)sum;
+
+ s += (__force u32)saddr;
+ s += (__force u32)daddr;
+#ifdef __BIG_ENDIAN__
+ s += proto + len;
+#else
+ s += (proto + len) << 8;
+#endif
+ return (__force __wsum) from64to32(s);
+#else
+ __asm__("\n\
+ addc %0,%0,%1 \n\
+ adde %0,%0,%2 \n\
+ adde %0,%0,%3 \n\
+ addze %0,%0 \n\
+ "
+ : "=r" (sum)
+ : "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum));
+ return sum;
+#endif
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+#define HAVE_ARCH_CSUM_ADD
+static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+#ifdef __powerpc64__
+ u64 res = (__force u64)csum;
+
+ res += (__force u64)addend;
+ return (__force __wsum)((u32)res + (res >> 32));
+#else
+ if (__builtin_constant_p(csum) && csum == 0)
+ return addend;
+ if (__builtin_constant_p(addend) && addend == 0)
+ return csum;
+
+ asm("addc %0,%0,%1;"
+ "addze %0,%0;"
+ : "+r" (csum) : "r" (addend) : "xer");
+ return csum;
+#endif
+}
+
+#define HAVE_ARCH_CSUM_SHIFT
+static __always_inline __wsum csum_shift(__wsum sum, int offset)
+{
+ /* rotate sum to align it with a 16b boundary */
+ return (__force __wsum)rol32((__force u32)sum, (offset & 1) << 3);
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries. ihl is the number
+ * of 32-bit words and is always >= 5.
+ */
+static inline __wsum ip_fast_csum_nofold(const void *iph, unsigned int ihl)
+{
+ const u32 *ptr = (const u32 *)iph + 1;
+#ifdef __powerpc64__
+ unsigned int i;
+ u64 s = *(const u32 *)iph;
+
+ for (i = 0; i < ihl - 1; i++, ptr++)
+ s += *ptr;
+ return (__force __wsum)from64to32(s);
+#else
+ __wsum sum, tmp;
+
+ asm("mtctr %3;"
+ "addc %0,%4,%5;"
+ "1: lwzu %1, 4(%2);"
+ "adde %0,%0,%1;"
+ "bdnz 1b;"
+ "addze %0,%0;"
+ : "=r" (sum), "=r" (tmp), "+b" (ptr)
+ : "r" (ihl - 2), "r" (*(const u32 *)iph), "r" (*ptr)
+ : "ctr", "xer", "memory");
+
+ return sum;
+#endif
+}
+
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ return csum_fold(ip_fast_csum_nofold(iph, ihl));
+}
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum __csum_partial(const void *buff, int len, __wsum sum);
+
+static __always_inline __wsum csum_partial(const void *buff, int len, __wsum sum)
+{
+ if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) {
+ if (len == 2)
+ sum = csum_add(sum, (__force __wsum)*(const u16 *)buff);
+ if (len >= 4)
+ sum = csum_add(sum, (__force __wsum)*(const u32 *)buff);
+ if (len == 6)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u16 *)(buff + 4));
+ if (len >= 8)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u32 *)(buff + 4));
+ if (len == 10)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u16 *)(buff + 8));
+ if (len >= 12)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u32 *)(buff + 8));
+ if (len == 14)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u16 *)(buff + 12));
+ if (len >= 16)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u32 *)(buff + 12));
+ } else if (__builtin_constant_p(len) && (len & 3) == 0) {
+ sum = csum_add(sum, ip_fast_csum_nofold(buff, len >> 2));
+ } else {
+ sum = __csum_partial(buff, len, sum);
+ }
+ return sum;
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum sum);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/powerpc/include/asm/clocksource.h b/arch/powerpc/include/asm/clocksource.h
new file mode 100644
index 000000000..0a26ef13a
--- /dev/null
+++ b/arch/powerpc/include/asm/clocksource.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_CLOCKSOURCE_H
+#define _ASM_POWERPC_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif /* _ASM_POWERPC_CLOCKSOURCE_H */
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
new file mode 100644
index 000000000..05f246c0e
--- /dev/null
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_CMPXCHG_H_
+#define _ASM_POWERPC_CMPXCHG_H_
+
+#ifdef __KERNEL__
+#include <linux/compiler.h>
+#include <asm/synch.h>
+#include <linux/bug.h>
+
+#ifdef __BIG_ENDIAN
+#define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
+#else
+#define BITOFF_CAL(size, off) (off * BITS_PER_BYTE)
+#endif
+
+#define XCHG_GEN(type, sfx, cl) \
+static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
+{ \
+ unsigned int prev, prev_mask, tmp, bitoff, off; \
+ \
+ off = (unsigned long)p % sizeof(u32); \
+ bitoff = BITOFF_CAL(sizeof(type), off); \
+ p -= off; \
+ val <<= bitoff; \
+ prev_mask = (u32)(type)-1 << bitoff; \
+ \
+ __asm__ __volatile__( \
+"1: lwarx %0,0,%3\n" \
+" andc %1,%0,%5\n" \
+" or %1,%1,%4\n" \
+" stwcx. %1,0,%3\n" \
+" bne- 1b\n" \
+ : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
+ : "r" (p), "r" (val), "r" (prev_mask) \
+ : "cc", cl); \
+ \
+ return prev >> bitoff; \
+}
+
+#define CMPXCHG_GEN(type, sfx, br, br2, cl) \
+static inline \
+u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \
+{ \
+ unsigned int prev, prev_mask, tmp, bitoff, off; \
+ \
+ off = (unsigned long)p % sizeof(u32); \
+ bitoff = BITOFF_CAL(sizeof(type), off); \
+ p -= off; \
+ old <<= bitoff; \
+ new <<= bitoff; \
+ prev_mask = (u32)(type)-1 << bitoff; \
+ \
+ __asm__ __volatile__( \
+ br \
+"1: lwarx %0,0,%3\n" \
+" and %1,%0,%6\n" \
+" cmpw 0,%1,%4\n" \
+" bne- 2f\n" \
+" andc %1,%0,%6\n" \
+" or %1,%1,%5\n" \
+" stwcx. %1,0,%3\n" \
+" bne- 1b\n" \
+ br2 \
+ "\n" \
+"2:" \
+ : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
+ : "r" (p), "r" (old), "r" (new), "r" (prev_mask) \
+ : "cc", cl); \
+ \
+ return prev >> bitoff; \
+}
+
+/*
+ * Atomic exchange
+ *
+ * Changes the memory location '*p' to be val and returns
+ * the previous value stored there.
+ */
+
+XCHG_GEN(u8, _local, "memory");
+XCHG_GEN(u8, _relaxed, "cc");
+XCHG_GEN(u16, _local, "memory");
+XCHG_GEN(u16, _relaxed, "cc");
+
+static __always_inline unsigned long
+__xchg_u32_local(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 \n"
+" stwcx. %3,0,%2 \n\
+ bne- 1b"
+ : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__xchg_u32_relaxed(u32 *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2\n"
+" stwcx. %3,0,%2\n"
+" bne- 1b"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (val)
+ : "cc");
+
+ return prev;
+}
+
+#ifdef CONFIG_PPC64
+static __always_inline unsigned long
+__xchg_u64_local(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2 \n"
+" stdcx. %3,0,%2 \n\
+ bne- 1b"
+ : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__xchg_u64_relaxed(u64 *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2\n"
+" stdcx. %3,0,%2\n"
+" bne- 1b"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (val)
+ : "cc");
+
+ return prev;
+}
+#endif
+
+static __always_inline unsigned long
+__xchg_local(void *ptr, unsigned long x, unsigned int size)
+{
+ switch (size) {
+ case 1:
+ return __xchg_u8_local(ptr, x);
+ case 2:
+ return __xchg_u16_local(ptr, x);
+ case 4:
+ return __xchg_u32_local(ptr, x);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __xchg_u64_local(ptr, x);
+#endif
+ }
+ BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg");
+ return x;
+}
+
+static __always_inline unsigned long
+__xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
+{
+ switch (size) {
+ case 1:
+ return __xchg_u8_relaxed(ptr, x);
+ case 2:
+ return __xchg_u16_relaxed(ptr, x);
+ case 4:
+ return __xchg_u32_relaxed(ptr, x);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __xchg_u64_relaxed(ptr, x);
+#endif
+ }
+ BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
+ return x;
+}
+#define arch_xchg_local(ptr,x) \
+ ({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_local((ptr), \
+ (unsigned long)_x_, sizeof(*(ptr))); \
+ })
+
+#define arch_xchg_relaxed(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
+ (unsigned long)_x_, sizeof(*(ptr))); \
+})
+/*
+ * Compare and exchange - if *p == old, set it to new,
+ * and return the old value of *p.
+ */
+
+CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
+CMPXCHG_GEN(u8, _local, , , "memory");
+CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
+CMPXCHG_GEN(u8, _relaxed, , , "cc");
+CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
+CMPXCHG_GEN(u16, _local, , , "memory");
+CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
+CMPXCHG_GEN(u16, _relaxed, , , "cc");
+
+static __always_inline unsigned long
+__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__ (
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n"
+" stwcx. %4,0,%2\n\
+ bne- 1b"
+ PPC_ATOMIC_EXIT_BARRIER
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
+ unsigned long new)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__ (
+"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n"
+" stwcx. %4,0,%2\n\
+ bne- 1b"
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+"1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
+" cmpw 0,%0,%3\n"
+" bne- 2f\n"
+" stwcx. %4,0,%2\n"
+" bne- 1b\n"
+"2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc");
+
+ return prev;
+}
+
+/*
+ * cmpxchg family don't have order guarantee if cmp part fails, therefore we
+ * can avoid superfluous barriers if we use assembly code to implement
+ * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
+ * cmpxchg_release() because that will result in putting a barrier in the
+ * middle of a ll/sc loop, which is probably a bad idea. For example, this
+ * might cause the conditional store more likely to fail.
+ */
+static __always_inline unsigned long
+__cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+"1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
+" cmpw 0,%0,%3\n"
+" bne- 2f\n"
+" stwcx. %4,0,%2\n"
+" bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+ "\n"
+"2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
+#ifdef CONFIG_PPC64
+static __always_inline unsigned long
+__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+ PPC_ATOMIC_EXIT_BARRIER
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
+ unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u64_relaxed(u64 *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+"1: ldarx %0,0,%2 # __cmpxchg_u64_relaxed\n"
+" cmpd 0,%0,%3\n"
+" bne- 2f\n"
+" stdcx. %4,0,%2\n"
+" bne- 1b\n"
+"2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+"1: ldarx %0,0,%2 # __cmpxchg_u64_acquire\n"
+" cmpd 0,%0,%3\n"
+" bne- 2f\n"
+" stdcx. %4,0,%2\n"
+" bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+ "\n"
+"2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+#endif
+
+static __always_inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
+{
+ switch (size) {
+ case 1:
+ return __cmpxchg_u8(ptr, old, new);
+ case 2:
+ return __cmpxchg_u16(ptr, old, new);
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __cmpxchg_u64(ptr, old, new);
+#endif
+ }
+ BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
+ return old;
+}
+
+static __always_inline unsigned long
+__cmpxchg_local(void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
+{
+ switch (size) {
+ case 1:
+ return __cmpxchg_u8_local(ptr, old, new);
+ case 2:
+ return __cmpxchg_u16_local(ptr, old, new);
+ case 4:
+ return __cmpxchg_u32_local(ptr, old, new);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __cmpxchg_u64_local(ptr, old, new);
+#endif
+ }
+ BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
+ return old;
+}
+
+static __always_inline unsigned long
+__cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
+{
+ switch (size) {
+ case 1:
+ return __cmpxchg_u8_relaxed(ptr, old, new);
+ case 2:
+ return __cmpxchg_u16_relaxed(ptr, old, new);
+ case 4:
+ return __cmpxchg_u32_relaxed(ptr, old, new);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __cmpxchg_u64_relaxed(ptr, old, new);
+#endif
+ }
+ BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
+ return old;
+}
+
+static __always_inline unsigned long
+__cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
+{
+ switch (size) {
+ case 1:
+ return __cmpxchg_u8_acquire(ptr, old, new);
+ case 2:
+ return __cmpxchg_u16_acquire(ptr, old, new);
+ case 4:
+ return __cmpxchg_u32_acquire(ptr, old, new);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __cmpxchg_u64_acquire(ptr, old, new);
+#endif
+ }
+ BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
+ return old;
+}
+#define arch_cmpxchg(ptr, o, n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
+
+#define arch_cmpxchg_local(ptr, o, n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
+#define arch_cmpxchg_relaxed(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
+ (unsigned long)_o_, (unsigned long)_n_, \
+ sizeof(*(ptr))); \
+})
+
+#define arch_cmpxchg_acquire(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
+ (unsigned long)_o_, (unsigned long)_n_, \
+ sizeof(*(ptr))); \
+})
+#ifdef CONFIG_PPC64
+#define arch_cmpxchg64(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg((ptr), (o), (n)); \
+ })
+#define arch_cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
+ })
+#define arch_cmpxchg64_relaxed(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
+})
+#define arch_cmpxchg64_acquire(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg_acquire((ptr), (o), (n)); \
+})
+#else
+#include <asm-generic/cmpxchg-local.h>
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_CMPXCHG_H_ */
diff --git a/arch/powerpc/include/asm/code-patching-asm.h b/arch/powerpc/include/asm/code-patching-asm.h
new file mode 100644
index 000000000..ed7b14484
--- /dev/null
+++ b/arch/powerpc/include/asm/code-patching-asm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018, Michael Ellerman, IBM Corporation.
+ */
+#ifndef _ASM_POWERPC_CODE_PATCHING_ASM_H
+#define _ASM_POWERPC_CODE_PATCHING_ASM_H
+
+/* Define a "site" that can be patched */
+.macro patch_site label name
+ .pushsection ".rodata"
+ .balign 4
+ .global \name
+\name:
+ .4byte \label - .
+ .popsection
+.endm
+
+#endif /* _ASM_POWERPC_CODE_PATCHING_ASM_H */
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
new file mode 100644
index 000000000..1c6316ec4
--- /dev/null
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_CODE_PATCHING_H
+#define _ASM_POWERPC_CODE_PATCHING_H
+
+/*
+ * Copyright 2008, Michael Ellerman, IBM Corporation.
+ */
+
+#include <asm/types.h>
+#include <asm/ppc-opcode.h>
+#include <linux/string.h>
+#include <linux/kallsyms.h>
+#include <asm/asm-compat.h>
+#include <asm/inst.h>
+
+/* Flags for create_branch:
+ * "b" == create_branch(addr, target, 0);
+ * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
+ * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
+ * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
+ */
+#define BRANCH_SET_LINK 0x1
+#define BRANCH_ABSOLUTE 0x2
+
+DECLARE_STATIC_KEY_FALSE(init_mem_is_free);
+
+/*
+ * Powerpc branch instruction is :
+ *
+ * 0 6 30 31
+ * +---------+----------------+---+---+
+ * | opcode | LI |AA |LK |
+ * +---------+----------------+---+---+
+ * Where AA = 0 and LK = 0
+ *
+ * LI is a signed 24 bits integer. The real branch offset is computed
+ * by: imm32 = SignExtend(LI:'0b00', 32);
+ *
+ * So the maximum forward branch should be:
+ * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
+ * The maximum backward branch should be:
+ * (0xff800000 << 2) = 0xfe000000 = -0x2000000
+ */
+static inline bool is_offset_in_branch_range(long offset)
+{
+ return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
+}
+
+static inline bool is_offset_in_cond_branch_range(long offset)
+{
+ return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
+}
+
+static inline int create_branch(ppc_inst_t *instr, const u32 *addr,
+ unsigned long target, int flags)
+{
+ long offset;
+
+ *instr = ppc_inst(0);
+ offset = target;
+ if (! (flags & BRANCH_ABSOLUTE))
+ offset = offset - (unsigned long)addr;
+
+ /* Check we can represent the target in the instruction format */
+ if (!is_offset_in_branch_range(offset))
+ return 1;
+
+ /* Mask out the flags and target, so they don't step on each other. */
+ *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC));
+
+ return 0;
+}
+
+int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
+ unsigned long target, int flags);
+int patch_branch(u32 *addr, unsigned long target, int flags);
+int patch_instruction(u32 *addr, ppc_inst_t instr);
+int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
+
+static inline unsigned long patch_site_addr(s32 *site)
+{
+ return (unsigned long)site + *site;
+}
+
+static inline int patch_instruction_site(s32 *site, ppc_inst_t instr)
+{
+ return patch_instruction((u32 *)patch_site_addr(site), instr);
+}
+
+static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
+{
+ return patch_branch((u32 *)patch_site_addr(site), target, flags);
+}
+
+static inline int modify_instruction(unsigned int *addr, unsigned int clr,
+ unsigned int set)
+{
+ return patch_instruction(addr, ppc_inst((*addr & ~clr) | set));
+}
+
+static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
+{
+ return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
+}
+
+static inline unsigned int branch_opcode(ppc_inst_t instr)
+{
+ return ppc_inst_primary_opcode(instr) & 0x3F;
+}
+
+static inline int instr_is_branch_iform(ppc_inst_t instr)
+{
+ return branch_opcode(instr) == 18;
+}
+
+static inline int instr_is_branch_bform(ppc_inst_t instr)
+{
+ return branch_opcode(instr) == 16;
+}
+
+int instr_is_relative_branch(ppc_inst_t instr);
+int instr_is_relative_link_branch(ppc_inst_t instr);
+unsigned long branch_target(const u32 *instr);
+int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src);
+bool is_conditional_branch(ppc_inst_t instr);
+
+#define OP_RT_RA_MASK 0xffff0000UL
+#define LIS_R2 (PPC_RAW_LIS(_R2, 0))
+#define ADDIS_R2_R12 (PPC_RAW_ADDIS(_R2, _R12, 0))
+#define ADDI_R2_R2 (PPC_RAW_ADDI(_R2, _R2, 0))
+
+
+static inline unsigned long ppc_function_entry(void *func)
+{
+#ifdef CONFIG_PPC64_ELF_ABI_V2
+ u32 *insn = func;
+
+ /*
+ * A PPC64 ABIv2 function may have a local and a global entry
+ * point. We need to use the local entry point when patching
+ * functions, so identify and step over the global entry point
+ * sequence.
+ *
+ * The global entry point sequence is always of the form:
+ *
+ * addis r2,r12,XXXX
+ * addi r2,r2,XXXX
+ *
+ * A linker optimisation may convert the addis to lis:
+ *
+ * lis r2,XXXX
+ * addi r2,r2,XXXX
+ */
+ if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
+ ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
+ ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
+ return (unsigned long)(insn + 2);
+ else
+ return (unsigned long)func;
+#elif defined(CONFIG_PPC64_ELF_ABI_V1)
+ /*
+ * On PPC64 ABIv1 the function pointer actually points to the
+ * function's descriptor. The first entry in the descriptor is the
+ * address of the function text.
+ */
+ return ((struct func_desc *)func)->addr;
+#else
+ return (unsigned long)func;
+#endif
+}
+
+static inline unsigned long ppc_global_function_entry(void *func)
+{
+#ifdef CONFIG_PPC64_ELF_ABI_V2
+ /* PPC64 ABIv2 the global entry point is at the address */
+ return (unsigned long)func;
+#else
+ /* All other cases there is no change vs ppc_function_entry() */
+ return ppc_function_entry(func);
+#endif
+}
+
+/*
+ * Wrapper around kallsyms_lookup() to return function entry address:
+ * - For ABIv1, we lookup the dot variant.
+ * - For ABIv2, we return the local entry point.
+ */
+static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
+{
+ unsigned long addr;
+#ifdef CONFIG_PPC64_ELF_ABI_V1
+ /* check for dot variant */
+ char dot_name[1 + KSYM_NAME_LEN];
+ bool dot_appended = false;
+
+ if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN)
+ return 0;
+
+ if (name[0] != '.') {
+ dot_name[0] = '.';
+ dot_name[1] = '\0';
+ strlcat(dot_name, name, sizeof(dot_name));
+ dot_appended = true;
+ } else {
+ dot_name[0] = '\0';
+ strlcat(dot_name, name, sizeof(dot_name));
+ }
+ addr = kallsyms_lookup_name(dot_name);
+ if (!addr && dot_appended)
+ /* Let's try the original non-dot symbol lookup */
+ addr = kallsyms_lookup_name(name);
+#elif defined(CONFIG_PPC64_ELF_ABI_V2)
+ addr = kallsyms_lookup_name(name);
+ if (addr)
+ addr = ppc_function_entry((void *)addr);
+#else
+ addr = kallsyms_lookup_name(name);
+#endif
+ return addr;
+}
+
+/*
+ * Some instruction encodings commonly used in dynamic ftracing
+ * and function live patching.
+ */
+
+/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
+#ifdef CONFIG_PPC64_ELF_ABI_V2
+#define R2_STACK_OFFSET 24
+#else
+#define R2_STACK_OFFSET 40
+#endif
+
+#define PPC_INST_LD_TOC PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET)
+
+/* usually preceded by a mflr r0 */
+#define PPC_INST_STD_LR PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
+
+#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
new file mode 100644
index 000000000..dda4091fd
--- /dev/null
+++ b/arch/powerpc/include/asm/compat.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_COMPAT_H
+#define _ASM_POWERPC_COMPAT_H
+#ifdef __KERNEL__
+/*
+ * Architecture specific compatibility types
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+
+#define compat_ipc_pid_t compat_ipc_pid_t
+typedef u16 compat_ipc_pid_t;
+
+#define compat_ipc64_perm compat_ipc64_perm
+
+#include <asm-generic/compat.h>
+
+#ifdef __BIG_ENDIAN__
+#define COMPAT_UTS_MACHINE "ppc\0\0"
+#else
+#define COMPAT_UTS_MACHINE "ppcle\0\0"
+#endif
+
+typedef s16 compat_nlink_t;
+
+struct compat_stat {
+ compat_dev_t st_dev;
+ compat_ino_t st_ino;
+ compat_mode_t st_mode;
+ compat_nlink_t st_nlink;
+ __compat_uid32_t st_uid;
+ __compat_gid32_t st_gid;
+ compat_dev_t st_rdev;
+ compat_off_t st_size;
+ compat_off_t st_blksize;
+ compat_off_t st_blocks;
+ old_time32_t st_atime;
+ u32 st_atime_nsec;
+ old_time32_t st_mtime;
+ u32 st_mtime_nsec;
+ old_time32_t st_ctime;
+ u32 st_ctime_nsec;
+ u32 __unused4[2];
+};
+
+/*
+ * ipc64_perm is actually 32/64bit clean but since the compat layer refers to
+ * it we may as well define it.
+ */
+struct compat_ipc64_perm {
+ compat_key_t key;
+ __compat_uid_t uid;
+ __compat_gid_t gid;
+ __compat_uid_t cuid;
+ __compat_gid_t cgid;
+ compat_mode_t mode;
+ unsigned int seq;
+ unsigned int __pad2;
+ unsigned long __unused1; /* yes they really are 64bit pads */
+ unsigned long __unused2;
+};
+
+struct compat_semid64_ds {
+ struct compat_ipc64_perm sem_perm;
+ unsigned int sem_otime_high;
+ unsigned int sem_otime;
+ unsigned int sem_ctime_high;
+ unsigned int sem_ctime;
+ compat_ulong_t sem_nsems;
+ compat_ulong_t __unused3;
+ compat_ulong_t __unused4;
+};
+
+struct compat_msqid64_ds {
+ struct compat_ipc64_perm msg_perm;
+ unsigned int msg_stime_high;
+ unsigned int msg_stime;
+ unsigned int msg_rtime_high;
+ unsigned int msg_rtime;
+ unsigned int msg_ctime_high;
+ unsigned int msg_ctime;
+ compat_ulong_t msg_cbytes;
+ compat_ulong_t msg_qnum;
+ compat_ulong_t msg_qbytes;
+ compat_pid_t msg_lspid;
+ compat_pid_t msg_lrpid;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+ struct compat_ipc64_perm shm_perm;
+ unsigned int shm_atime_high;
+ unsigned int shm_atime;
+ unsigned int shm_dtime_high;
+ unsigned int shm_dtime;
+ unsigned int shm_ctime_high;
+ unsigned int shm_ctime;
+ unsigned int __unused4;
+ compat_size_t shm_segsz;
+ compat_pid_t shm_cpid;
+ compat_pid_t shm_lpid;
+ compat_ulong_t shm_nattch;
+ compat_ulong_t __unused5;
+ compat_ulong_t __unused6;
+};
+
+static inline int is_compat_task(void)
+{
+ return is_32bit_task();
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_COMPAT_H */
diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h
new file mode 100644
index 000000000..4b63931c4
--- /dev/null
+++ b/arch/powerpc/include/asm/context_tracking.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H
+#define _ASM_POWERPC_CONTEXT_TRACKING_H
+
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+#define SCHEDULE_USER bl schedule_user
+#else
+#define SCHEDULE_USER bl schedule
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/copro.h b/arch/powerpc/include/asm/copro.h
new file mode 100644
index 000000000..fd2e166ea
--- /dev/null
+++ b/arch/powerpc/include/asm/copro.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2014 IBM Corp.
+ */
+
+#ifndef _ASM_POWERPC_COPRO_H
+#define _ASM_POWERPC_COPRO_H
+
+#include <linux/mm_types.h>
+
+struct copro_slb
+{
+ u64 esid, vsid;
+};
+
+int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
+ unsigned long dsisr, vm_fault_t *flt);
+
+int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb);
+
+
+#ifdef CONFIG_PPC_COPRO_BASE
+void copro_flush_all_slbs(struct mm_struct *mm);
+#else
+static inline void copro_flush_all_slbs(struct mm_struct *mm) {}
+#endif
+#endif /* _ASM_POWERPC_COPRO_H */
diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h
new file mode 100644
index 000000000..ce483b0f8
--- /dev/null
+++ b/arch/powerpc/include/asm/cpm.h
@@ -0,0 +1 @@
+#include <soc/fsl/cpm.h>
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
new file mode 100644
index 000000000..3bdd74739
--- /dev/null
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -0,0 +1,612 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MPC8xx Communication Processor Module.
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * This file contains structures and information for the communication
+ * processor channels. Some CPM control and status is available
+ * through the MPC8xx internal memory map. See immap.h for details.
+ * This file only contains what I need for the moment, not the total
+ * CPM capabilities. I (or someone else) will add definitions as they
+ * are needed. -- Dan
+ *
+ * On the MBX board, EPPC-Bug loads CPM microcode into the first 512
+ * bytes of the DP RAM and relocates the I2C parameter area to the
+ * IDMA1 space. The remaining DP RAM is available for buffer descriptors
+ * or other use.
+ */
+#ifndef __CPM1__
+#define __CPM1__
+
+#include <linux/init.h>
+#include <asm/8xx_immap.h>
+#include <asm/ptrace.h>
+#include <asm/cpm.h>
+
+/* CPM Command register.
+*/
+#define CPM_CR_RST ((ushort)0x8000)
+#define CPM_CR_OPCODE ((ushort)0x0f00)
+#define CPM_CR_CHAN ((ushort)0x00f0)
+#define CPM_CR_FLG ((ushort)0x0001)
+
+/* Channel numbers.
+*/
+#define CPM_CR_CH_SCC1 ((ushort)0x0000)
+#define CPM_CR_CH_I2C ((ushort)0x0001) /* I2C and IDMA1 */
+#define CPM_CR_CH_SCC2 ((ushort)0x0004)
+#define CPM_CR_CH_SPI ((ushort)0x0005) /* SPI / IDMA2 / Timers */
+#define CPM_CR_CH_TIMER CPM_CR_CH_SPI
+#define CPM_CR_CH_SCC3 ((ushort)0x0008)
+#define CPM_CR_CH_SMC1 ((ushort)0x0009) /* SMC1 / DSP1 */
+#define CPM_CR_CH_SCC4 ((ushort)0x000c)
+#define CPM_CR_CH_SMC2 ((ushort)0x000d) /* SMC2 / DSP2 */
+
+#define mk_cr_cmd(CH, CMD) ((CMD << 8) | (CH << 4))
+
+/* Export the base address of the communication processor registers
+ * and dual port ram.
+ */
+extern cpm8xx_t __iomem *cpmp; /* Pointer to comm processor */
+
+#define cpm_dpalloc cpm_muram_alloc
+#define cpm_dpfree cpm_muram_free
+#define cpm_dpram_addr cpm_muram_addr
+#define cpm_dpram_phys cpm_muram_dma
+
+extern void cpm_setbrg(uint brg, uint rate);
+
+extern void __init cpm_load_patch(cpm8xx_t *cp);
+
+extern void cpm_reset(void);
+
+/* Parameter RAM offsets.
+*/
+#define PROFF_SCC1 ((uint)0x0000)
+#define PROFF_IIC ((uint)0x0080)
+#define PROFF_SCC2 ((uint)0x0100)
+#define PROFF_SPI ((uint)0x0180)
+#define PROFF_SCC3 ((uint)0x0200)
+#define PROFF_SMC1 ((uint)0x0280)
+#define PROFF_DSP1 ((uint)0x02c0)
+#define PROFF_SCC4 ((uint)0x0300)
+#define PROFF_SMC2 ((uint)0x0380)
+
+/* Define enough so I can at least use the serial port as a UART.
+ * The MBX uses SMC1 as the host serial port.
+ */
+typedef struct smc_uart {
+ ushort smc_rbase; /* Rx Buffer descriptor base address */
+ ushort smc_tbase; /* Tx Buffer descriptor base address */
+ u_char smc_rfcr; /* Rx function code */
+ u_char smc_tfcr; /* Tx function code */
+ ushort smc_mrblr; /* Max receive buffer length */
+ uint smc_rstate; /* Internal */
+ uint smc_idp; /* Internal */
+ ushort smc_rbptr; /* Internal */
+ ushort smc_ibc; /* Internal */
+ uint smc_rxtmp; /* Internal */
+ uint smc_tstate; /* Internal */
+ uint smc_tdp; /* Internal */
+ ushort smc_tbptr; /* Internal */
+ ushort smc_tbc; /* Internal */
+ uint smc_txtmp; /* Internal */
+ ushort smc_maxidl; /* Maximum idle characters */
+ ushort smc_tmpidl; /* Temporary idle counter */
+ ushort smc_brklen; /* Last received break length */
+ ushort smc_brkec; /* rcv'd break condition counter */
+ ushort smc_brkcr; /* xmt break count register */
+ ushort smc_rmask; /* Temporary bit mask */
+ char res1[8]; /* Reserved */
+ ushort smc_rpbase; /* Relocation pointer */
+} smc_uart_t;
+
+/* Function code bits.
+*/
+#define SMC_EB ((u_char)0x10) /* Set big endian byte order */
+
+/* SMC uart mode register.
+*/
+#define SMCMR_REN ((ushort)0x0001)
+#define SMCMR_TEN ((ushort)0x0002)
+#define SMCMR_DM ((ushort)0x000c)
+#define SMCMR_SM_GCI ((ushort)0x0000)
+#define SMCMR_SM_UART ((ushort)0x0020)
+#define SMCMR_SM_TRANS ((ushort)0x0030)
+#define SMCMR_SM_MASK ((ushort)0x0030)
+#define SMCMR_PM_EVEN ((ushort)0x0100) /* Even parity, else odd */
+#define SMCMR_REVD SMCMR_PM_EVEN
+#define SMCMR_PEN ((ushort)0x0200) /* Parity enable */
+#define SMCMR_BS SMCMR_PEN
+#define SMCMR_SL ((ushort)0x0400) /* Two stops, else one */
+#define SMCR_CLEN_MASK ((ushort)0x7800) /* Character length */
+#define smcr_mk_clen(C) (((C) << 11) & SMCR_CLEN_MASK)
+
+/* SMC2 as Centronics parallel printer. It is half duplex, in that
+ * it can only receive or transmit. The parameter ram values for
+ * each direction are either unique or properly overlap, so we can
+ * include them in one structure.
+ */
+typedef struct smc_centronics {
+ ushort scent_rbase;
+ ushort scent_tbase;
+ u_char scent_cfcr;
+ u_char scent_smask;
+ ushort scent_mrblr;
+ uint scent_rstate;
+ uint scent_r_ptr;
+ ushort scent_rbptr;
+ ushort scent_r_cnt;
+ uint scent_rtemp;
+ uint scent_tstate;
+ uint scent_t_ptr;
+ ushort scent_tbptr;
+ ushort scent_t_cnt;
+ uint scent_ttemp;
+ ushort scent_max_sl;
+ ushort scent_sl_cnt;
+ ushort scent_character1;
+ ushort scent_character2;
+ ushort scent_character3;
+ ushort scent_character4;
+ ushort scent_character5;
+ ushort scent_character6;
+ ushort scent_character7;
+ ushort scent_character8;
+ ushort scent_rccm;
+ ushort scent_rccr;
+} smc_cent_t;
+
+/* Centronics Status Mask Register.
+*/
+#define SMC_CENT_F ((u_char)0x08)
+#define SMC_CENT_PE ((u_char)0x04)
+#define SMC_CENT_S ((u_char)0x02)
+
+/* SMC Event and Mask register.
+*/
+#define SMCM_BRKE ((unsigned char)0x40) /* When in UART Mode */
+#define SMCM_BRK ((unsigned char)0x10) /* When in UART Mode */
+#define SMCM_TXE ((unsigned char)0x10) /* When in Transparent Mode */
+#define SMCM_BSY ((unsigned char)0x04)
+#define SMCM_TX ((unsigned char)0x02)
+#define SMCM_RX ((unsigned char)0x01)
+
+/* Baud rate generators.
+*/
+#define CPM_BRG_RST ((uint)0x00020000)
+#define CPM_BRG_EN ((uint)0x00010000)
+#define CPM_BRG_EXTC_INT ((uint)0x00000000)
+#define CPM_BRG_EXTC_CLK2 ((uint)0x00004000)
+#define CPM_BRG_EXTC_CLK6 ((uint)0x00008000)
+#define CPM_BRG_ATB ((uint)0x00002000)
+#define CPM_BRG_CD_MASK ((uint)0x00001ffe)
+#define CPM_BRG_DIV16 ((uint)0x00000001)
+
+/* SI Clock Route Register
+*/
+#define SICR_RCLK_SCC1_BRG1 ((uint)0x00000000)
+#define SICR_TCLK_SCC1_BRG1 ((uint)0x00000000)
+#define SICR_RCLK_SCC2_BRG2 ((uint)0x00000800)
+#define SICR_TCLK_SCC2_BRG2 ((uint)0x00000100)
+#define SICR_RCLK_SCC3_BRG3 ((uint)0x00100000)
+#define SICR_TCLK_SCC3_BRG3 ((uint)0x00020000)
+#define SICR_RCLK_SCC4_BRG4 ((uint)0x18000000)
+#define SICR_TCLK_SCC4_BRG4 ((uint)0x03000000)
+
+/* SCCs.
+*/
+#define SCC_GSMRH_IRP ((uint)0x00040000)
+#define SCC_GSMRH_GDE ((uint)0x00010000)
+#define SCC_GSMRH_TCRC_CCITT ((uint)0x00008000)
+#define SCC_GSMRH_TCRC_BISYNC ((uint)0x00004000)
+#define SCC_GSMRH_TCRC_HDLC ((uint)0x00000000)
+#define SCC_GSMRH_REVD ((uint)0x00002000)
+#define SCC_GSMRH_TRX ((uint)0x00001000)
+#define SCC_GSMRH_TTX ((uint)0x00000800)
+#define SCC_GSMRH_CDP ((uint)0x00000400)
+#define SCC_GSMRH_CTSP ((uint)0x00000200)
+#define SCC_GSMRH_CDS ((uint)0x00000100)
+#define SCC_GSMRH_CTSS ((uint)0x00000080)
+#define SCC_GSMRH_TFL ((uint)0x00000040)
+#define SCC_GSMRH_RFW ((uint)0x00000020)
+#define SCC_GSMRH_TXSY ((uint)0x00000010)
+#define SCC_GSMRH_SYNL16 ((uint)0x0000000c)
+#define SCC_GSMRH_SYNL8 ((uint)0x00000008)
+#define SCC_GSMRH_SYNL4 ((uint)0x00000004)
+#define SCC_GSMRH_RTSM ((uint)0x00000002)
+#define SCC_GSMRH_RSYN ((uint)0x00000001)
+
+#define SCC_GSMRL_SIR ((uint)0x80000000) /* SCC2 only */
+#define SCC_GSMRL_EDGE_NONE ((uint)0x60000000)
+#define SCC_GSMRL_EDGE_NEG ((uint)0x40000000)
+#define SCC_GSMRL_EDGE_POS ((uint)0x20000000)
+#define SCC_GSMRL_EDGE_BOTH ((uint)0x00000000)
+#define SCC_GSMRL_TCI ((uint)0x10000000)
+#define SCC_GSMRL_TSNC_3 ((uint)0x0c000000)
+#define SCC_GSMRL_TSNC_4 ((uint)0x08000000)
+#define SCC_GSMRL_TSNC_14 ((uint)0x04000000)
+#define SCC_GSMRL_TSNC_INF ((uint)0x00000000)
+#define SCC_GSMRL_RINV ((uint)0x02000000)
+#define SCC_GSMRL_TINV ((uint)0x01000000)
+#define SCC_GSMRL_TPL_128 ((uint)0x00c00000)
+#define SCC_GSMRL_TPL_64 ((uint)0x00a00000)
+#define SCC_GSMRL_TPL_48 ((uint)0x00800000)
+#define SCC_GSMRL_TPL_32 ((uint)0x00600000)
+#define SCC_GSMRL_TPL_16 ((uint)0x00400000)
+#define SCC_GSMRL_TPL_8 ((uint)0x00200000)
+#define SCC_GSMRL_TPL_NONE ((uint)0x00000000)
+#define SCC_GSMRL_TPP_ALL1 ((uint)0x00180000)
+#define SCC_GSMRL_TPP_01 ((uint)0x00100000)
+#define SCC_GSMRL_TPP_10 ((uint)0x00080000)
+#define SCC_GSMRL_TPP_ZEROS ((uint)0x00000000)
+#define SCC_GSMRL_TEND ((uint)0x00040000)
+#define SCC_GSMRL_TDCR_32 ((uint)0x00030000)
+#define SCC_GSMRL_TDCR_16 ((uint)0x00020000)
+#define SCC_GSMRL_TDCR_8 ((uint)0x00010000)
+#define SCC_GSMRL_TDCR_1 ((uint)0x00000000)
+#define SCC_GSMRL_RDCR_32 ((uint)0x0000c000)
+#define SCC_GSMRL_RDCR_16 ((uint)0x00008000)
+#define SCC_GSMRL_RDCR_8 ((uint)0x00004000)
+#define SCC_GSMRL_RDCR_1 ((uint)0x00000000)
+#define SCC_GSMRL_RENC_DFMAN ((uint)0x00003000)
+#define SCC_GSMRL_RENC_MANCH ((uint)0x00002000)
+#define SCC_GSMRL_RENC_FM0 ((uint)0x00001000)
+#define SCC_GSMRL_RENC_NRZI ((uint)0x00000800)
+#define SCC_GSMRL_RENC_NRZ ((uint)0x00000000)
+#define SCC_GSMRL_TENC_DFMAN ((uint)0x00000600)
+#define SCC_GSMRL_TENC_MANCH ((uint)0x00000400)
+#define SCC_GSMRL_TENC_FM0 ((uint)0x00000200)
+#define SCC_GSMRL_TENC_NRZI ((uint)0x00000100)
+#define SCC_GSMRL_TENC_NRZ ((uint)0x00000000)
+#define SCC_GSMRL_DIAG_LE ((uint)0x000000c0) /* Loop and echo */
+#define SCC_GSMRL_DIAG_ECHO ((uint)0x00000080)
+#define SCC_GSMRL_DIAG_LOOP ((uint)0x00000040)
+#define SCC_GSMRL_DIAG_NORM ((uint)0x00000000)
+#define SCC_GSMRL_ENR ((uint)0x00000020)
+#define SCC_GSMRL_ENT ((uint)0x00000010)
+#define SCC_GSMRL_MODE_ENET ((uint)0x0000000c)
+#define SCC_GSMRL_MODE_QMC ((uint)0x0000000a)
+#define SCC_GSMRL_MODE_DDCMP ((uint)0x00000009)
+#define SCC_GSMRL_MODE_BISYNC ((uint)0x00000008)
+#define SCC_GSMRL_MODE_V14 ((uint)0x00000007)
+#define SCC_GSMRL_MODE_AHDLC ((uint)0x00000006)
+#define SCC_GSMRL_MODE_PROFIBUS ((uint)0x00000005)
+#define SCC_GSMRL_MODE_UART ((uint)0x00000004)
+#define SCC_GSMRL_MODE_SS7 ((uint)0x00000003)
+#define SCC_GSMRL_MODE_ATALK ((uint)0x00000002)
+#define SCC_GSMRL_MODE_HDLC ((uint)0x00000000)
+
+#define SCC_TODR_TOD ((ushort)0x8000)
+
+/* SCC Event and Mask register.
+*/
+#define SCCM_TXE ((unsigned char)0x10)
+#define SCCM_BSY ((unsigned char)0x04)
+#define SCCM_TX ((unsigned char)0x02)
+#define SCCM_RX ((unsigned char)0x01)
+
+typedef struct scc_param {
+ ushort scc_rbase; /* Rx Buffer descriptor base address */
+ ushort scc_tbase; /* Tx Buffer descriptor base address */
+ u_char scc_rfcr; /* Rx function code */
+ u_char scc_tfcr; /* Tx function code */
+ ushort scc_mrblr; /* Max receive buffer length */
+ uint scc_rstate; /* Internal */
+ uint scc_idp; /* Internal */
+ ushort scc_rbptr; /* Internal */
+ ushort scc_ibc; /* Internal */
+ uint scc_rxtmp; /* Internal */
+ uint scc_tstate; /* Internal */
+ uint scc_tdp; /* Internal */
+ ushort scc_tbptr; /* Internal */
+ ushort scc_tbc; /* Internal */
+ uint scc_txtmp; /* Internal */
+ uint scc_rcrc; /* Internal */
+ uint scc_tcrc; /* Internal */
+} sccp_t;
+
+/* Function code bits.
+*/
+#define SCC_EB ((u_char)0x10) /* Set big endian byte order */
+
+/* CPM Ethernet through SCCx.
+ */
+typedef struct scc_enet {
+ sccp_t sen_genscc;
+ uint sen_cpres; /* Preset CRC */
+ uint sen_cmask; /* Constant mask for CRC */
+ uint sen_crcec; /* CRC Error counter */
+ uint sen_alec; /* alignment error counter */
+ uint sen_disfc; /* discard frame counter */
+ ushort sen_pads; /* Tx short frame pad character */
+ ushort sen_retlim; /* Retry limit threshold */
+ ushort sen_retcnt; /* Retry limit counter */
+ ushort sen_maxflr; /* maximum frame length register */
+ ushort sen_minflr; /* minimum frame length register */
+ ushort sen_maxd1; /* maximum DMA1 length */
+ ushort sen_maxd2; /* maximum DMA2 length */
+ ushort sen_maxd; /* Rx max DMA */
+ ushort sen_dmacnt; /* Rx DMA counter */
+ ushort sen_maxb; /* Max BD byte count */
+ ushort sen_gaddr1; /* Group address filter */
+ ushort sen_gaddr2;
+ ushort sen_gaddr3;
+ ushort sen_gaddr4;
+ uint sen_tbuf0data0; /* Save area 0 - current frame */
+ uint sen_tbuf0data1; /* Save area 1 - current frame */
+ uint sen_tbuf0rba; /* Internal */
+ uint sen_tbuf0crc; /* Internal */
+ ushort sen_tbuf0bcnt; /* Internal */
+ ushort sen_paddrh; /* physical address (MSB) */
+ ushort sen_paddrm;
+ ushort sen_paddrl; /* physical address (LSB) */
+ ushort sen_pper; /* persistence */
+ ushort sen_rfbdptr; /* Rx first BD pointer */
+ ushort sen_tfbdptr; /* Tx first BD pointer */
+ ushort sen_tlbdptr; /* Tx last BD pointer */
+ uint sen_tbuf1data0; /* Save area 0 - current frame */
+ uint sen_tbuf1data1; /* Save area 1 - current frame */
+ uint sen_tbuf1rba; /* Internal */
+ uint sen_tbuf1crc; /* Internal */
+ ushort sen_tbuf1bcnt; /* Internal */
+ ushort sen_txlen; /* Tx Frame length counter */
+ ushort sen_iaddr1; /* Individual address filter */
+ ushort sen_iaddr2;
+ ushort sen_iaddr3;
+ ushort sen_iaddr4;
+ ushort sen_boffcnt; /* Backoff counter */
+
+ /* NOTE: Some versions of the manual have the following items
+ * incorrectly documented. Below is the proper order.
+ */
+ ushort sen_taddrh; /* temp address (MSB) */
+ ushort sen_taddrm;
+ ushort sen_taddrl; /* temp address (LSB) */
+} scc_enet_t;
+
+/* SCC Event register as used by Ethernet.
+*/
+#define SCCE_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */
+#define SCCE_ENET_TXE ((ushort)0x0010) /* Transmit Error */
+#define SCCE_ENET_RXF ((ushort)0x0008) /* Full frame received */
+#define SCCE_ENET_BSY ((ushort)0x0004) /* All incoming buffers full */
+#define SCCE_ENET_TXB ((ushort)0x0002) /* A buffer was transmitted */
+#define SCCE_ENET_RXB ((ushort)0x0001) /* A buffer was received */
+
+/* SCC Mode Register (PMSR) as used by Ethernet.
+*/
+#define SCC_PSMR_HBC ((ushort)0x8000) /* Enable heartbeat */
+#define SCC_PSMR_FC ((ushort)0x4000) /* Force collision */
+#define SCC_PSMR_RSH ((ushort)0x2000) /* Receive short frames */
+#define SCC_PSMR_IAM ((ushort)0x1000) /* Check individual hash */
+#define SCC_PSMR_ENCRC ((ushort)0x0800) /* Ethernet CRC mode */
+#define SCC_PSMR_PRO ((ushort)0x0200) /* Promiscuous mode */
+#define SCC_PSMR_BRO ((ushort)0x0100) /* Catch broadcast pkts */
+#define SCC_PSMR_SBT ((ushort)0x0080) /* Special backoff timer */
+#define SCC_PSMR_LPB ((ushort)0x0040) /* Set Loopback mode */
+#define SCC_PSMR_SIP ((ushort)0x0020) /* Sample Input Pins */
+#define SCC_PSMR_LCW ((ushort)0x0010) /* Late collision window */
+#define SCC_PSMR_NIB22 ((ushort)0x000a) /* Start frame search */
+#define SCC_PSMR_FDE ((ushort)0x0001) /* Full duplex enable */
+
+/* SCC as UART
+*/
+typedef struct scc_uart {
+ sccp_t scc_genscc;
+ char res1[8]; /* Reserved */
+ ushort scc_maxidl; /* Maximum idle chars */
+ ushort scc_idlc; /* temp idle counter */
+ ushort scc_brkcr; /* Break count register */
+ ushort scc_parec; /* receive parity error counter */
+ ushort scc_frmec; /* receive framing error counter */
+ ushort scc_nosec; /* receive noise counter */
+ ushort scc_brkec; /* receive break condition counter */
+ ushort scc_brkln; /* last received break length */
+ ushort scc_uaddr1; /* UART address character 1 */
+ ushort scc_uaddr2; /* UART address character 2 */
+ ushort scc_rtemp; /* Temp storage */
+ ushort scc_toseq; /* Transmit out of sequence char */
+ ushort scc_char1; /* control character 1 */
+ ushort scc_char2; /* control character 2 */
+ ushort scc_char3; /* control character 3 */
+ ushort scc_char4; /* control character 4 */
+ ushort scc_char5; /* control character 5 */
+ ushort scc_char6; /* control character 6 */
+ ushort scc_char7; /* control character 7 */
+ ushort scc_char8; /* control character 8 */
+ ushort scc_rccm; /* receive control character mask */
+ ushort scc_rccr; /* receive control character register */
+ ushort scc_rlbc; /* receive last break character */
+} scc_uart_t;
+
+/* SCC Event and Mask registers when it is used as a UART.
+*/
+#define UART_SCCM_GLR ((ushort)0x1000)
+#define UART_SCCM_GLT ((ushort)0x0800)
+#define UART_SCCM_AB ((ushort)0x0200)
+#define UART_SCCM_IDL ((ushort)0x0100)
+#define UART_SCCM_GRA ((ushort)0x0080)
+#define UART_SCCM_BRKE ((ushort)0x0040)
+#define UART_SCCM_BRKS ((ushort)0x0020)
+#define UART_SCCM_CCR ((ushort)0x0008)
+#define UART_SCCM_BSY ((ushort)0x0004)
+#define UART_SCCM_TX ((ushort)0x0002)
+#define UART_SCCM_RX ((ushort)0x0001)
+
+/* The SCC PMSR when used as a UART.
+*/
+#define SCU_PSMR_FLC ((ushort)0x8000)
+#define SCU_PSMR_SL ((ushort)0x4000)
+#define SCU_PSMR_CL ((ushort)0x3000)
+#define SCU_PSMR_UM ((ushort)0x0c00)
+#define SCU_PSMR_FRZ ((ushort)0x0200)
+#define SCU_PSMR_RZS ((ushort)0x0100)
+#define SCU_PSMR_SYN ((ushort)0x0080)
+#define SCU_PSMR_DRT ((ushort)0x0040)
+#define SCU_PSMR_PEN ((ushort)0x0010)
+#define SCU_PSMR_RPM ((ushort)0x000c)
+#define SCU_PSMR_REVP ((ushort)0x0008)
+#define SCU_PSMR_TPM ((ushort)0x0003)
+#define SCU_PSMR_TEVP ((ushort)0x0002)
+
+/* CPM Transparent mode SCC.
+ */
+typedef struct scc_trans {
+ sccp_t st_genscc;
+ uint st_cpres; /* Preset CRC */
+ uint st_cmask; /* Constant mask for CRC */
+} scc_trans_t;
+
+/* IIC parameter RAM.
+*/
+typedef struct iic {
+ ushort iic_rbase; /* Rx Buffer descriptor base address */
+ ushort iic_tbase; /* Tx Buffer descriptor base address */
+ u_char iic_rfcr; /* Rx function code */
+ u_char iic_tfcr; /* Tx function code */
+ ushort iic_mrblr; /* Max receive buffer length */
+ uint iic_rstate; /* Internal */
+ uint iic_rdp; /* Internal */
+ ushort iic_rbptr; /* Internal */
+ ushort iic_rbc; /* Internal */
+ uint iic_rxtmp; /* Internal */
+ uint iic_tstate; /* Internal */
+ uint iic_tdp; /* Internal */
+ ushort iic_tbptr; /* Internal */
+ ushort iic_tbc; /* Internal */
+ uint iic_txtmp; /* Internal */
+ char res1[4]; /* Reserved */
+ ushort iic_rpbase; /* Relocation pointer */
+ char res2[2]; /* Reserved */
+} iic_t;
+
+/*
+ * RISC Controller Configuration Register definitons
+ */
+#define RCCR_TIME 0x8000 /* RISC Timer Enable */
+#define RCCR_TIMEP(t) (((t) & 0x3F)<<8) /* RISC Timer Period */
+#define RCCR_TIME_MASK 0x00FF /* not RISC Timer related bits */
+
+/* RISC Timer Parameter RAM offset */
+#define PROFF_RTMR ((uint)0x01B0)
+
+typedef struct risc_timer_pram {
+ unsigned short tm_base; /* RISC Timer Table Base Address */
+ unsigned short tm_ptr; /* RISC Timer Table Pointer (internal) */
+ unsigned short r_tmr; /* RISC Timer Mode Register */
+ unsigned short r_tmv; /* RISC Timer Valid Register */
+ unsigned long tm_cmd; /* RISC Timer Command Register */
+ unsigned long tm_cnt; /* RISC Timer Internal Count */
+} rt_pram_t;
+
+/* Bits in RISC Timer Command Register */
+#define TM_CMD_VALID 0x80000000 /* Valid - Enables the timer */
+#define TM_CMD_RESTART 0x40000000 /* Restart - for automatic restart */
+#define TM_CMD_PWM 0x20000000 /* Run in Pulse Width Modulation Mode */
+#define TM_CMD_NUM(n) (((n)&0xF)<<16) /* Timer Number */
+#define TM_CMD_PERIOD(p) ((p)&0xFFFF) /* Timer Period */
+
+/* CPM interrupts. There are nearly 32 interrupts generated by CPM
+ * channels or devices. All of these are presented to the PPC core
+ * as a single interrupt. The CPM interrupt handler dispatches its
+ * own handlers, in a similar fashion to the PPC core handler. We
+ * use the table as defined in the manuals (i.e. no special high
+ * priority and SCC1 == SCCa, etc...).
+ */
+#define CPMVEC_NR 32
+#define CPMVEC_PIO_PC15 ((ushort)0x1f)
+#define CPMVEC_SCC1 ((ushort)0x1e)
+#define CPMVEC_SCC2 ((ushort)0x1d)
+#define CPMVEC_SCC3 ((ushort)0x1c)
+#define CPMVEC_SCC4 ((ushort)0x1b)
+#define CPMVEC_PIO_PC14 ((ushort)0x1a)
+#define CPMVEC_TIMER1 ((ushort)0x19)
+#define CPMVEC_PIO_PC13 ((ushort)0x18)
+#define CPMVEC_PIO_PC12 ((ushort)0x17)
+#define CPMVEC_SDMA_CB_ERR ((ushort)0x16)
+#define CPMVEC_IDMA1 ((ushort)0x15)
+#define CPMVEC_IDMA2 ((ushort)0x14)
+#define CPMVEC_TIMER2 ((ushort)0x12)
+#define CPMVEC_RISCTIMER ((ushort)0x11)
+#define CPMVEC_I2C ((ushort)0x10)
+#define CPMVEC_PIO_PC11 ((ushort)0x0f)
+#define CPMVEC_PIO_PC10 ((ushort)0x0e)
+#define CPMVEC_TIMER3 ((ushort)0x0c)
+#define CPMVEC_PIO_PC9 ((ushort)0x0b)
+#define CPMVEC_PIO_PC8 ((ushort)0x0a)
+#define CPMVEC_PIO_PC7 ((ushort)0x09)
+#define CPMVEC_TIMER4 ((ushort)0x07)
+#define CPMVEC_PIO_PC6 ((ushort)0x06)
+#define CPMVEC_SPI ((ushort)0x05)
+#define CPMVEC_SMC1 ((ushort)0x04)
+#define CPMVEC_SMC2 ((ushort)0x03)
+#define CPMVEC_PIO_PC5 ((ushort)0x02)
+#define CPMVEC_PIO_PC4 ((ushort)0x01)
+#define CPMVEC_ERROR ((ushort)0x00)
+
+/* CPM interrupt configuration vector.
+*/
+#define CICR_SCD_SCC4 ((uint)0x00c00000) /* SCC4 @ SCCd */
+#define CICR_SCC_SCC3 ((uint)0x00200000) /* SCC3 @ SCCc */
+#define CICR_SCB_SCC2 ((uint)0x00040000) /* SCC2 @ SCCb */
+#define CICR_SCA_SCC1 ((uint)0x00000000) /* SCC1 @ SCCa */
+#define CICR_IRL_MASK ((uint)0x0000e000) /* Core interrupt */
+#define CICR_HP_MASK ((uint)0x00001f00) /* Hi-pri int. */
+#define CICR_IEN ((uint)0x00000080) /* Int. enable */
+#define CICR_SPS ((uint)0x00000001) /* SCC Spread */
+
+#define CPM_PIN_INPUT 0
+#define CPM_PIN_OUTPUT 1
+#define CPM_PIN_PRIMARY 0
+#define CPM_PIN_SECONDARY 2
+#define CPM_PIN_GPIO 4
+#define CPM_PIN_OPENDRAIN 8
+#define CPM_PIN_FALLEDGE 16
+#define CPM_PIN_ANYEDGE 0
+
+enum cpm_port {
+ CPM_PORTA,
+ CPM_PORTB,
+ CPM_PORTC,
+ CPM_PORTD,
+ CPM_PORTE,
+};
+
+void cpm1_set_pin(enum cpm_port port, int pin, int flags);
+
+enum cpm_clk_dir {
+ CPM_CLK_RX,
+ CPM_CLK_TX,
+ CPM_CLK_RTX
+};
+
+enum cpm_clk_target {
+ CPM_CLK_SCC1,
+ CPM_CLK_SCC2,
+ CPM_CLK_SCC3,
+ CPM_CLK_SCC4,
+ CPM_CLK_SMC1,
+ CPM_CLK_SMC2,
+};
+
+enum cpm_clk {
+ CPM_BRG1, /* Baud Rate Generator 1 */
+ CPM_BRG2, /* Baud Rate Generator 2 */
+ CPM_BRG3, /* Baud Rate Generator 3 */
+ CPM_BRG4, /* Baud Rate Generator 4 */
+ CPM_CLK1, /* Clock 1 */
+ CPM_CLK2, /* Clock 2 */
+ CPM_CLK3, /* Clock 3 */
+ CPM_CLK4, /* Clock 4 */
+ CPM_CLK5, /* Clock 5 */
+ CPM_CLK6, /* Clock 6 */
+ CPM_CLK7, /* Clock 7 */
+ CPM_CLK8, /* Clock 8 */
+};
+
+int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode);
+int cpm1_gpiochip_add16(struct device *dev);
+int cpm1_gpiochip_add32(struct device *dev);
+
+#endif /* __CPM1__ */
diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h
new file mode 100644
index 000000000..9ee192a6c
--- /dev/null
+++ b/arch/powerpc/include/asm/cpm2.h
@@ -0,0 +1,1149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Communication Processor Module v2.
+ *
+ * This file contains structures and information for the communication
+ * processor channels found in the dual port RAM or parameter RAM.
+ * All CPM control and status is available through the CPM2 internal
+ * memory map. See immap_cpm2.h for details.
+ */
+#ifdef __KERNEL__
+#ifndef __CPM2__
+#define __CPM2__
+
+#include <asm/immap_cpm2.h>
+#include <asm/cpm.h>
+#include <sysdev/fsl_soc.h>
+
+/* CPM Command register.
+*/
+#define CPM_CR_RST ((uint)0x80000000)
+#define CPM_CR_PAGE ((uint)0x7c000000)
+#define CPM_CR_SBLOCK ((uint)0x03e00000)
+#define CPM_CR_FLG ((uint)0x00010000)
+#define CPM_CR_MCN ((uint)0x00003fc0)
+#define CPM_CR_OPCODE ((uint)0x0000000f)
+
+/* Device sub-block and page codes.
+*/
+#define CPM_CR_SCC1_SBLOCK (0x04)
+#define CPM_CR_SCC2_SBLOCK (0x05)
+#define CPM_CR_SCC3_SBLOCK (0x06)
+#define CPM_CR_SCC4_SBLOCK (0x07)
+#define CPM_CR_SMC1_SBLOCK (0x08)
+#define CPM_CR_SMC2_SBLOCK (0x09)
+#define CPM_CR_SPI_SBLOCK (0x0a)
+#define CPM_CR_I2C_SBLOCK (0x0b)
+#define CPM_CR_TIMER_SBLOCK (0x0f)
+#define CPM_CR_RAND_SBLOCK (0x0e)
+#define CPM_CR_FCC1_SBLOCK (0x10)
+#define CPM_CR_FCC2_SBLOCK (0x11)
+#define CPM_CR_FCC3_SBLOCK (0x12)
+#define CPM_CR_IDMA1_SBLOCK (0x14)
+#define CPM_CR_IDMA2_SBLOCK (0x15)
+#define CPM_CR_IDMA3_SBLOCK (0x16)
+#define CPM_CR_IDMA4_SBLOCK (0x17)
+#define CPM_CR_MCC1_SBLOCK (0x1c)
+
+#define CPM_CR_FCC_SBLOCK(x) (x + 0x10)
+
+#define CPM_CR_SCC1_PAGE (0x00)
+#define CPM_CR_SCC2_PAGE (0x01)
+#define CPM_CR_SCC3_PAGE (0x02)
+#define CPM_CR_SCC4_PAGE (0x03)
+#define CPM_CR_SMC1_PAGE (0x07)
+#define CPM_CR_SMC2_PAGE (0x08)
+#define CPM_CR_SPI_PAGE (0x09)
+#define CPM_CR_I2C_PAGE (0x0a)
+#define CPM_CR_TIMER_PAGE (0x0a)
+#define CPM_CR_RAND_PAGE (0x0a)
+#define CPM_CR_FCC1_PAGE (0x04)
+#define CPM_CR_FCC2_PAGE (0x05)
+#define CPM_CR_FCC3_PAGE (0x06)
+#define CPM_CR_IDMA1_PAGE (0x07)
+#define CPM_CR_IDMA2_PAGE (0x08)
+#define CPM_CR_IDMA3_PAGE (0x09)
+#define CPM_CR_IDMA4_PAGE (0x0a)
+#define CPM_CR_MCC1_PAGE (0x07)
+#define CPM_CR_MCC2_PAGE (0x08)
+
+#define CPM_CR_FCC_PAGE(x) (x + 0x04)
+
+/* CPM2-specific opcodes (see cpm.h for common opcodes)
+*/
+#define CPM_CR_START_IDMA ((ushort)0x0009)
+
+#define mk_cr_cmd(PG, SBC, MCN, OP) \
+ ((PG << 26) | (SBC << 21) | (MCN << 6) | OP)
+
+/* The number of pages of host memory we allocate for CPM. This is
+ * done early in kernel initialization to get physically contiguous
+ * pages.
+ */
+#define NUM_CPM_HOST_PAGES 2
+
+/* Export the base address of the communication processor registers
+ * and dual port ram.
+ */
+extern cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor */
+
+#define cpm_dpalloc cpm_muram_alloc
+#define cpm_dpfree cpm_muram_free
+#define cpm_dpram_addr cpm_muram_addr
+
+extern void cpm2_reset(void);
+
+/* Baud rate generators.
+*/
+#define CPM_BRG_RST ((uint)0x00020000)
+#define CPM_BRG_EN ((uint)0x00010000)
+#define CPM_BRG_EXTC_INT ((uint)0x00000000)
+#define CPM_BRG_EXTC_CLK3_9 ((uint)0x00004000)
+#define CPM_BRG_EXTC_CLK5_15 ((uint)0x00008000)
+#define CPM_BRG_ATB ((uint)0x00002000)
+#define CPM_BRG_CD_MASK ((uint)0x00001ffe)
+#define CPM_BRG_DIV16 ((uint)0x00000001)
+
+#define CPM2_BRG_INT_CLK (get_brgfreq())
+#define CPM2_BRG_UART_CLK (CPM2_BRG_INT_CLK/16)
+
+extern void __cpm2_setbrg(uint brg, uint rate, uint clk, int div16, int src);
+
+/* This function is used by UARTS, or anything else that uses a 16x
+ * oversampled clock.
+ */
+static inline void cpm_setbrg(uint brg, uint rate)
+{
+ __cpm2_setbrg(brg, rate, CPM2_BRG_UART_CLK, 0, CPM_BRG_EXTC_INT);
+}
+
+/* This function is used to set high speed synchronous baud rate
+ * clocks.
+ */
+static inline void cpm2_fastbrg(uint brg, uint rate, int div16)
+{
+ __cpm2_setbrg(brg, rate, CPM2_BRG_INT_CLK, div16, CPM_BRG_EXTC_INT);
+}
+
+/* Parameter RAM offsets from the base.
+*/
+#define PROFF_SCC1 ((uint)0x8000)
+#define PROFF_SCC2 ((uint)0x8100)
+#define PROFF_SCC3 ((uint)0x8200)
+#define PROFF_SCC4 ((uint)0x8300)
+#define PROFF_FCC1 ((uint)0x8400)
+#define PROFF_FCC2 ((uint)0x8500)
+#define PROFF_FCC3 ((uint)0x8600)
+#define PROFF_MCC1 ((uint)0x8700)
+#define PROFF_SMC1_BASE ((uint)0x87fc)
+#define PROFF_IDMA1_BASE ((uint)0x87fe)
+#define PROFF_MCC2 ((uint)0x8800)
+#define PROFF_SMC2_BASE ((uint)0x88fc)
+#define PROFF_IDMA2_BASE ((uint)0x88fe)
+#define PROFF_SPI_BASE ((uint)0x89fc)
+#define PROFF_IDMA3_BASE ((uint)0x89fe)
+#define PROFF_TIMERS ((uint)0x8ae0)
+#define PROFF_REVNUM ((uint)0x8af0)
+#define PROFF_RAND ((uint)0x8af8)
+#define PROFF_I2C_BASE ((uint)0x8afc)
+#define PROFF_IDMA4_BASE ((uint)0x8afe)
+
+#define PROFF_SCC_SIZE ((uint)0x100)
+#define PROFF_FCC_SIZE ((uint)0x100)
+#define PROFF_SMC_SIZE ((uint)64)
+
+/* The SMCs are relocated to any of the first eight DPRAM pages.
+ * We will fix these at the first locations of DPRAM, until we
+ * get some microcode patches :-).
+ * The parameter ram space for the SMCs is fifty-some bytes, and
+ * they are required to start on a 64 byte boundary.
+ */
+#define PROFF_SMC1 (0)
+#define PROFF_SMC2 (64)
+
+
+/* Define enough so I can at least use the serial port as a UART.
+ */
+typedef struct smc_uart {
+ ushort smc_rbase; /* Rx Buffer descriptor base address */
+ ushort smc_tbase; /* Tx Buffer descriptor base address */
+ u_char smc_rfcr; /* Rx function code */
+ u_char smc_tfcr; /* Tx function code */
+ ushort smc_mrblr; /* Max receive buffer length */
+ uint smc_rstate; /* Internal */
+ uint smc_idp; /* Internal */
+ ushort smc_rbptr; /* Internal */
+ ushort smc_ibc; /* Internal */
+ uint smc_rxtmp; /* Internal */
+ uint smc_tstate; /* Internal */
+ uint smc_tdp; /* Internal */
+ ushort smc_tbptr; /* Internal */
+ ushort smc_tbc; /* Internal */
+ uint smc_txtmp; /* Internal */
+ ushort smc_maxidl; /* Maximum idle characters */
+ ushort smc_tmpidl; /* Temporary idle counter */
+ ushort smc_brklen; /* Last received break length */
+ ushort smc_brkec; /* rcv'd break condition counter */
+ ushort smc_brkcr; /* xmt break count register */
+ ushort smc_rmask; /* Temporary bit mask */
+ uint smc_stmp; /* SDMA Temp */
+} smc_uart_t;
+
+/* SMC uart mode register (Internal memory map).
+*/
+#define SMCMR_REN ((ushort)0x0001)
+#define SMCMR_TEN ((ushort)0x0002)
+#define SMCMR_DM ((ushort)0x000c)
+#define SMCMR_SM_GCI ((ushort)0x0000)
+#define SMCMR_SM_UART ((ushort)0x0020)
+#define SMCMR_SM_TRANS ((ushort)0x0030)
+#define SMCMR_SM_MASK ((ushort)0x0030)
+#define SMCMR_PM_EVEN ((ushort)0x0100) /* Even parity, else odd */
+#define SMCMR_REVD SMCMR_PM_EVEN
+#define SMCMR_PEN ((ushort)0x0200) /* Parity enable */
+#define SMCMR_BS SMCMR_PEN
+#define SMCMR_SL ((ushort)0x0400) /* Two stops, else one */
+#define SMCR_CLEN_MASK ((ushort)0x7800) /* Character length */
+#define smcr_mk_clen(C) (((C) << 11) & SMCR_CLEN_MASK)
+
+/* SMC Event and Mask register.
+*/
+#define SMCM_BRKE ((unsigned char)0x40) /* When in UART Mode */
+#define SMCM_BRK ((unsigned char)0x10) /* When in UART Mode */
+#define SMCM_TXE ((unsigned char)0x10)
+#define SMCM_BSY ((unsigned char)0x04)
+#define SMCM_TX ((unsigned char)0x02)
+#define SMCM_RX ((unsigned char)0x01)
+
+/* SCCs.
+*/
+#define SCC_GSMRH_IRP ((uint)0x00040000)
+#define SCC_GSMRH_GDE ((uint)0x00010000)
+#define SCC_GSMRH_TCRC_CCITT ((uint)0x00008000)
+#define SCC_GSMRH_TCRC_BISYNC ((uint)0x00004000)
+#define SCC_GSMRH_TCRC_HDLC ((uint)0x00000000)
+#define SCC_GSMRH_REVD ((uint)0x00002000)
+#define SCC_GSMRH_TRX ((uint)0x00001000)
+#define SCC_GSMRH_TTX ((uint)0x00000800)
+#define SCC_GSMRH_CDP ((uint)0x00000400)
+#define SCC_GSMRH_CTSP ((uint)0x00000200)
+#define SCC_GSMRH_CDS ((uint)0x00000100)
+#define SCC_GSMRH_CTSS ((uint)0x00000080)
+#define SCC_GSMRH_TFL ((uint)0x00000040)
+#define SCC_GSMRH_RFW ((uint)0x00000020)
+#define SCC_GSMRH_TXSY ((uint)0x00000010)
+#define SCC_GSMRH_SYNL16 ((uint)0x0000000c)
+#define SCC_GSMRH_SYNL8 ((uint)0x00000008)
+#define SCC_GSMRH_SYNL4 ((uint)0x00000004)
+#define SCC_GSMRH_RTSM ((uint)0x00000002)
+#define SCC_GSMRH_RSYN ((uint)0x00000001)
+
+#define SCC_GSMRL_SIR ((uint)0x80000000) /* SCC2 only */
+#define SCC_GSMRL_EDGE_NONE ((uint)0x60000000)
+#define SCC_GSMRL_EDGE_NEG ((uint)0x40000000)
+#define SCC_GSMRL_EDGE_POS ((uint)0x20000000)
+#define SCC_GSMRL_EDGE_BOTH ((uint)0x00000000)
+#define SCC_GSMRL_TCI ((uint)0x10000000)
+#define SCC_GSMRL_TSNC_3 ((uint)0x0c000000)
+#define SCC_GSMRL_TSNC_4 ((uint)0x08000000)
+#define SCC_GSMRL_TSNC_14 ((uint)0x04000000)
+#define SCC_GSMRL_TSNC_INF ((uint)0x00000000)
+#define SCC_GSMRL_RINV ((uint)0x02000000)
+#define SCC_GSMRL_TINV ((uint)0x01000000)
+#define SCC_GSMRL_TPL_128 ((uint)0x00c00000)
+#define SCC_GSMRL_TPL_64 ((uint)0x00a00000)
+#define SCC_GSMRL_TPL_48 ((uint)0x00800000)
+#define SCC_GSMRL_TPL_32 ((uint)0x00600000)
+#define SCC_GSMRL_TPL_16 ((uint)0x00400000)
+#define SCC_GSMRL_TPL_8 ((uint)0x00200000)
+#define SCC_GSMRL_TPL_NONE ((uint)0x00000000)
+#define SCC_GSMRL_TPP_ALL1 ((uint)0x00180000)
+#define SCC_GSMRL_TPP_01 ((uint)0x00100000)
+#define SCC_GSMRL_TPP_10 ((uint)0x00080000)
+#define SCC_GSMRL_TPP_ZEROS ((uint)0x00000000)
+#define SCC_GSMRL_TEND ((uint)0x00040000)
+#define SCC_GSMRL_TDCR_32 ((uint)0x00030000)
+#define SCC_GSMRL_TDCR_16 ((uint)0x00020000)
+#define SCC_GSMRL_TDCR_8 ((uint)0x00010000)
+#define SCC_GSMRL_TDCR_1 ((uint)0x00000000)
+#define SCC_GSMRL_RDCR_32 ((uint)0x0000c000)
+#define SCC_GSMRL_RDCR_16 ((uint)0x00008000)
+#define SCC_GSMRL_RDCR_8 ((uint)0x00004000)
+#define SCC_GSMRL_RDCR_1 ((uint)0x00000000)
+#define SCC_GSMRL_RENC_DFMAN ((uint)0x00003000)
+#define SCC_GSMRL_RENC_MANCH ((uint)0x00002000)
+#define SCC_GSMRL_RENC_FM0 ((uint)0x00001000)
+#define SCC_GSMRL_RENC_NRZI ((uint)0x00000800)
+#define SCC_GSMRL_RENC_NRZ ((uint)0x00000000)
+#define SCC_GSMRL_TENC_DFMAN ((uint)0x00000600)
+#define SCC_GSMRL_TENC_MANCH ((uint)0x00000400)
+#define SCC_GSMRL_TENC_FM0 ((uint)0x00000200)
+#define SCC_GSMRL_TENC_NRZI ((uint)0x00000100)
+#define SCC_GSMRL_TENC_NRZ ((uint)0x00000000)
+#define SCC_GSMRL_DIAG_LE ((uint)0x000000c0) /* Loop and echo */
+#define SCC_GSMRL_DIAG_ECHO ((uint)0x00000080)
+#define SCC_GSMRL_DIAG_LOOP ((uint)0x00000040)
+#define SCC_GSMRL_DIAG_NORM ((uint)0x00000000)
+#define SCC_GSMRL_ENR ((uint)0x00000020)
+#define SCC_GSMRL_ENT ((uint)0x00000010)
+#define SCC_GSMRL_MODE_ENET ((uint)0x0000000c)
+#define SCC_GSMRL_MODE_DDCMP ((uint)0x00000009)
+#define SCC_GSMRL_MODE_BISYNC ((uint)0x00000008)
+#define SCC_GSMRL_MODE_V14 ((uint)0x00000007)
+#define SCC_GSMRL_MODE_AHDLC ((uint)0x00000006)
+#define SCC_GSMRL_MODE_PROFIBUS ((uint)0x00000005)
+#define SCC_GSMRL_MODE_UART ((uint)0x00000004)
+#define SCC_GSMRL_MODE_SS7 ((uint)0x00000003)
+#define SCC_GSMRL_MODE_ATALK ((uint)0x00000002)
+#define SCC_GSMRL_MODE_HDLC ((uint)0x00000000)
+
+#define SCC_TODR_TOD ((ushort)0x8000)
+
+/* SCC Event and Mask register.
+*/
+#define SCCM_TXE ((unsigned char)0x10)
+#define SCCM_BSY ((unsigned char)0x04)
+#define SCCM_TX ((unsigned char)0x02)
+#define SCCM_RX ((unsigned char)0x01)
+
+typedef struct scc_param {
+ ushort scc_rbase; /* Rx Buffer descriptor base address */
+ ushort scc_tbase; /* Tx Buffer descriptor base address */
+ u_char scc_rfcr; /* Rx function code */
+ u_char scc_tfcr; /* Tx function code */
+ ushort scc_mrblr; /* Max receive buffer length */
+ uint scc_rstate; /* Internal */
+ uint scc_idp; /* Internal */
+ ushort scc_rbptr; /* Internal */
+ ushort scc_ibc; /* Internal */
+ uint scc_rxtmp; /* Internal */
+ uint scc_tstate; /* Internal */
+ uint scc_tdp; /* Internal */
+ ushort scc_tbptr; /* Internal */
+ ushort scc_tbc; /* Internal */
+ uint scc_txtmp; /* Internal */
+ uint scc_rcrc; /* Internal */
+ uint scc_tcrc; /* Internal */
+} sccp_t;
+
+/* Function code bits.
+*/
+#define SCC_EB ((u_char) 0x10) /* Set big endian byte order */
+#define SCC_GBL ((u_char) 0x20) /* Snooping enabled */
+
+/* CPM Ethernet through SCC1.
+ */
+typedef struct scc_enet {
+ sccp_t sen_genscc;
+ uint sen_cpres; /* Preset CRC */
+ uint sen_cmask; /* Constant mask for CRC */
+ uint sen_crcec; /* CRC Error counter */
+ uint sen_alec; /* alignment error counter */
+ uint sen_disfc; /* discard frame counter */
+ ushort sen_pads; /* Tx short frame pad character */
+ ushort sen_retlim; /* Retry limit threshold */
+ ushort sen_retcnt; /* Retry limit counter */
+ ushort sen_maxflr; /* maximum frame length register */
+ ushort sen_minflr; /* minimum frame length register */
+ ushort sen_maxd1; /* maximum DMA1 length */
+ ushort sen_maxd2; /* maximum DMA2 length */
+ ushort sen_maxd; /* Rx max DMA */
+ ushort sen_dmacnt; /* Rx DMA counter */
+ ushort sen_maxb; /* Max BD byte count */
+ ushort sen_gaddr1; /* Group address filter */
+ ushort sen_gaddr2;
+ ushort sen_gaddr3;
+ ushort sen_gaddr4;
+ uint sen_tbuf0data0; /* Save area 0 - current frame */
+ uint sen_tbuf0data1; /* Save area 1 - current frame */
+ uint sen_tbuf0rba; /* Internal */
+ uint sen_tbuf0crc; /* Internal */
+ ushort sen_tbuf0bcnt; /* Internal */
+ ushort sen_paddrh; /* physical address (MSB) */
+ ushort sen_paddrm;
+ ushort sen_paddrl; /* physical address (LSB) */
+ ushort sen_pper; /* persistence */
+ ushort sen_rfbdptr; /* Rx first BD pointer */
+ ushort sen_tfbdptr; /* Tx first BD pointer */
+ ushort sen_tlbdptr; /* Tx last BD pointer */
+ uint sen_tbuf1data0; /* Save area 0 - current frame */
+ uint sen_tbuf1data1; /* Save area 1 - current frame */
+ uint sen_tbuf1rba; /* Internal */
+ uint sen_tbuf1crc; /* Internal */
+ ushort sen_tbuf1bcnt; /* Internal */
+ ushort sen_txlen; /* Tx Frame length counter */
+ ushort sen_iaddr1; /* Individual address filter */
+ ushort sen_iaddr2;
+ ushort sen_iaddr3;
+ ushort sen_iaddr4;
+ ushort sen_boffcnt; /* Backoff counter */
+
+ /* NOTE: Some versions of the manual have the following items
+ * incorrectly documented. Below is the proper order.
+ */
+ ushort sen_taddrh; /* temp address (MSB) */
+ ushort sen_taddrm;
+ ushort sen_taddrl; /* temp address (LSB) */
+} scc_enet_t;
+
+
+/* SCC Event register as used by Ethernet.
+*/
+#define SCCE_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */
+#define SCCE_ENET_TXE ((ushort)0x0010) /* Transmit Error */
+#define SCCE_ENET_RXF ((ushort)0x0008) /* Full frame received */
+#define SCCE_ENET_BSY ((ushort)0x0004) /* All incoming buffers full */
+#define SCCE_ENET_TXB ((ushort)0x0002) /* A buffer was transmitted */
+#define SCCE_ENET_RXB ((ushort)0x0001) /* A buffer was received */
+
+/* SCC Mode Register (PSMR) as used by Ethernet.
+*/
+#define SCC_PSMR_HBC ((ushort)0x8000) /* Enable heartbeat */
+#define SCC_PSMR_FC ((ushort)0x4000) /* Force collision */
+#define SCC_PSMR_RSH ((ushort)0x2000) /* Receive short frames */
+#define SCC_PSMR_IAM ((ushort)0x1000) /* Check individual hash */
+#define SCC_PSMR_ENCRC ((ushort)0x0800) /* Ethernet CRC mode */
+#define SCC_PSMR_PRO ((ushort)0x0200) /* Promiscuous mode */
+#define SCC_PSMR_BRO ((ushort)0x0100) /* Catch broadcast pkts */
+#define SCC_PSMR_SBT ((ushort)0x0080) /* Special backoff timer */
+#define SCC_PSMR_LPB ((ushort)0x0040) /* Set Loopback mode */
+#define SCC_PSMR_SIP ((ushort)0x0020) /* Sample Input Pins */
+#define SCC_PSMR_LCW ((ushort)0x0010) /* Late collision window */
+#define SCC_PSMR_NIB22 ((ushort)0x000a) /* Start frame search */
+#define SCC_PSMR_FDE ((ushort)0x0001) /* Full duplex enable */
+
+/* SCC as UART
+*/
+typedef struct scc_uart {
+ sccp_t scc_genscc;
+ uint scc_res1; /* Reserved */
+ uint scc_res2; /* Reserved */
+ ushort scc_maxidl; /* Maximum idle chars */
+ ushort scc_idlc; /* temp idle counter */
+ ushort scc_brkcr; /* Break count register */
+ ushort scc_parec; /* receive parity error counter */
+ ushort scc_frmec; /* receive framing error counter */
+ ushort scc_nosec; /* receive noise counter */
+ ushort scc_brkec; /* receive break condition counter */
+ ushort scc_brkln; /* last received break length */
+ ushort scc_uaddr1; /* UART address character 1 */
+ ushort scc_uaddr2; /* UART address character 2 */
+ ushort scc_rtemp; /* Temp storage */
+ ushort scc_toseq; /* Transmit out of sequence char */
+ ushort scc_char1; /* control character 1 */
+ ushort scc_char2; /* control character 2 */
+ ushort scc_char3; /* control character 3 */
+ ushort scc_char4; /* control character 4 */
+ ushort scc_char5; /* control character 5 */
+ ushort scc_char6; /* control character 6 */
+ ushort scc_char7; /* control character 7 */
+ ushort scc_char8; /* control character 8 */
+ ushort scc_rccm; /* receive control character mask */
+ ushort scc_rccr; /* receive control character register */
+ ushort scc_rlbc; /* receive last break character */
+} scc_uart_t;
+
+/* SCC Event and Mask registers when it is used as a UART.
+*/
+#define UART_SCCM_GLR ((ushort)0x1000)
+#define UART_SCCM_GLT ((ushort)0x0800)
+#define UART_SCCM_AB ((ushort)0x0200)
+#define UART_SCCM_IDL ((ushort)0x0100)
+#define UART_SCCM_GRA ((ushort)0x0080)
+#define UART_SCCM_BRKE ((ushort)0x0040)
+#define UART_SCCM_BRKS ((ushort)0x0020)
+#define UART_SCCM_CCR ((ushort)0x0008)
+#define UART_SCCM_BSY ((ushort)0x0004)
+#define UART_SCCM_TX ((ushort)0x0002)
+#define UART_SCCM_RX ((ushort)0x0001)
+
+/* The SCC PSMR when used as a UART.
+*/
+#define SCU_PSMR_FLC ((ushort)0x8000)
+#define SCU_PSMR_SL ((ushort)0x4000)
+#define SCU_PSMR_CL ((ushort)0x3000)
+#define SCU_PSMR_UM ((ushort)0x0c00)
+#define SCU_PSMR_FRZ ((ushort)0x0200)
+#define SCU_PSMR_RZS ((ushort)0x0100)
+#define SCU_PSMR_SYN ((ushort)0x0080)
+#define SCU_PSMR_DRT ((ushort)0x0040)
+#define SCU_PSMR_PEN ((ushort)0x0010)
+#define SCU_PSMR_RPM ((ushort)0x000c)
+#define SCU_PSMR_REVP ((ushort)0x0008)
+#define SCU_PSMR_TPM ((ushort)0x0003)
+#define SCU_PSMR_TEVP ((ushort)0x0002)
+
+/* CPM Transparent mode SCC.
+ */
+typedef struct scc_trans {
+ sccp_t st_genscc;
+ uint st_cpres; /* Preset CRC */
+ uint st_cmask; /* Constant mask for CRC */
+} scc_trans_t;
+
+/* How about some FCCs.....
+*/
+#define FCC_GFMR_DIAG_NORM ((uint)0x00000000)
+#define FCC_GFMR_DIAG_LE ((uint)0x40000000)
+#define FCC_GFMR_DIAG_AE ((uint)0x80000000)
+#define FCC_GFMR_DIAG_ALE ((uint)0xc0000000)
+#define FCC_GFMR_TCI ((uint)0x20000000)
+#define FCC_GFMR_TRX ((uint)0x10000000)
+#define FCC_GFMR_TTX ((uint)0x08000000)
+#define FCC_GFMR_CDP ((uint)0x04000000)
+#define FCC_GFMR_CTSP ((uint)0x02000000)
+#define FCC_GFMR_CDS ((uint)0x01000000)
+#define FCC_GFMR_CTSS ((uint)0x00800000)
+#define FCC_GFMR_SYNL_NONE ((uint)0x00000000)
+#define FCC_GFMR_SYNL_AUTO ((uint)0x00004000)
+#define FCC_GFMR_SYNL_8 ((uint)0x00008000)
+#define FCC_GFMR_SYNL_16 ((uint)0x0000c000)
+#define FCC_GFMR_RTSM ((uint)0x00002000)
+#define FCC_GFMR_RENC_NRZ ((uint)0x00000000)
+#define FCC_GFMR_RENC_NRZI ((uint)0x00000800)
+#define FCC_GFMR_REVD ((uint)0x00000400)
+#define FCC_GFMR_TENC_NRZ ((uint)0x00000000)
+#define FCC_GFMR_TENC_NRZI ((uint)0x00000100)
+#define FCC_GFMR_TCRC_16 ((uint)0x00000000)
+#define FCC_GFMR_TCRC_32 ((uint)0x00000080)
+#define FCC_GFMR_ENR ((uint)0x00000020)
+#define FCC_GFMR_ENT ((uint)0x00000010)
+#define FCC_GFMR_MODE_ENET ((uint)0x0000000c)
+#define FCC_GFMR_MODE_ATM ((uint)0x0000000a)
+#define FCC_GFMR_MODE_HDLC ((uint)0x00000000)
+
+/* Generic FCC parameter ram.
+*/
+typedef struct fcc_param {
+ ushort fcc_riptr; /* Rx Internal temp pointer */
+ ushort fcc_tiptr; /* Tx Internal temp pointer */
+ ushort fcc_res1;
+ ushort fcc_mrblr; /* Max receive buffer length, mod 32 bytes */
+ uint fcc_rstate; /* Upper byte is Func code, must be set */
+ uint fcc_rbase; /* Receive BD base */
+ ushort fcc_rbdstat; /* RxBD status */
+ ushort fcc_rbdlen; /* RxBD down counter */
+ uint fcc_rdptr; /* RxBD internal data pointer */
+ uint fcc_tstate; /* Upper byte is Func code, must be set */
+ uint fcc_tbase; /* Transmit BD base */
+ ushort fcc_tbdstat; /* TxBD status */
+ ushort fcc_tbdlen; /* TxBD down counter */
+ uint fcc_tdptr; /* TxBD internal data pointer */
+ uint fcc_rbptr; /* Rx BD Internal buf pointer */
+ uint fcc_tbptr; /* Tx BD Internal buf pointer */
+ uint fcc_rcrc; /* Rx temp CRC */
+ uint fcc_res2;
+ uint fcc_tcrc; /* Tx temp CRC */
+} fccp_t;
+
+
+/* Ethernet controller through FCC.
+*/
+typedef struct fcc_enet {
+ fccp_t fen_genfcc;
+ uint fen_statbuf; /* Internal status buffer */
+ uint fen_camptr; /* CAM address */
+ uint fen_cmask; /* Constant mask for CRC */
+ uint fen_cpres; /* Preset CRC */
+ uint fen_crcec; /* CRC Error counter */
+ uint fen_alec; /* alignment error counter */
+ uint fen_disfc; /* discard frame counter */
+ ushort fen_retlim; /* Retry limit */
+ ushort fen_retcnt; /* Retry counter */
+ ushort fen_pper; /* Persistence */
+ ushort fen_boffcnt; /* backoff counter */
+ uint fen_gaddrh; /* Group address filter, high 32-bits */
+ uint fen_gaddrl; /* Group address filter, low 32-bits */
+ ushort fen_tfcstat; /* out of sequence TxBD */
+ ushort fen_tfclen;
+ uint fen_tfcptr;
+ ushort fen_mflr; /* Maximum frame length (1518) */
+ ushort fen_paddrh; /* MAC address */
+ ushort fen_paddrm;
+ ushort fen_paddrl;
+ ushort fen_ibdcount; /* Internal BD counter */
+ ushort fen_ibdstart; /* Internal BD start pointer */
+ ushort fen_ibdend; /* Internal BD end pointer */
+ ushort fen_txlen; /* Internal Tx frame length counter */
+ uint fen_ibdbase[8]; /* Internal use */
+ uint fen_iaddrh; /* Individual address filter */
+ uint fen_iaddrl;
+ ushort fen_minflr; /* Minimum frame length (64) */
+ ushort fen_taddrh; /* Filter transfer MAC address */
+ ushort fen_taddrm;
+ ushort fen_taddrl;
+ ushort fen_padptr; /* Pointer to pad byte buffer */
+ ushort fen_cftype; /* control frame type */
+ ushort fen_cfrange; /* control frame range */
+ ushort fen_maxb; /* maximum BD count */
+ ushort fen_maxd1; /* Max DMA1 length (1520) */
+ ushort fen_maxd2; /* Max DMA2 length (1520) */
+ ushort fen_maxd; /* internal max DMA count */
+ ushort fen_dmacnt; /* internal DMA counter */
+ uint fen_octc; /* Total octect counter */
+ uint fen_colc; /* Total collision counter */
+ uint fen_broc; /* Total broadcast packet counter */
+ uint fen_mulc; /* Total multicast packet count */
+ uint fen_uspc; /* Total packets < 64 bytes */
+ uint fen_frgc; /* Total packets < 64 bytes with errors */
+ uint fen_ospc; /* Total packets > 1518 */
+ uint fen_jbrc; /* Total packets > 1518 with errors */
+ uint fen_p64c; /* Total packets == 64 bytes */
+ uint fen_p65c; /* Total packets 64 < bytes <= 127 */
+ uint fen_p128c; /* Total packets 127 < bytes <= 255 */
+ uint fen_p256c; /* Total packets 256 < bytes <= 511 */
+ uint fen_p512c; /* Total packets 512 < bytes <= 1023 */
+ uint fen_p1024c; /* Total packets 1024 < bytes <= 1518 */
+ uint fen_cambuf; /* Internal CAM buffer pointer */
+ ushort fen_rfthr; /* Received frames threshold */
+ ushort fen_rfcnt; /* Received frames count */
+} fcc_enet_t;
+
+/* FCC Event/Mask register as used by Ethernet.
+*/
+#define FCC_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */
+#define FCC_ENET_RXC ((ushort)0x0040) /* Control Frame Received */
+#define FCC_ENET_TXC ((ushort)0x0020) /* Out of seq. Tx sent */
+#define FCC_ENET_TXE ((ushort)0x0010) /* Transmit Error */
+#define FCC_ENET_RXF ((ushort)0x0008) /* Full frame received */
+#define FCC_ENET_BSY ((ushort)0x0004) /* Busy. Rx Frame dropped */
+#define FCC_ENET_TXB ((ushort)0x0002) /* A buffer was transmitted */
+#define FCC_ENET_RXB ((ushort)0x0001) /* A buffer was received */
+
+/* FCC Mode Register (FPSMR) as used by Ethernet.
+*/
+#define FCC_PSMR_HBC ((uint)0x80000000) /* Enable heartbeat */
+#define FCC_PSMR_FC ((uint)0x40000000) /* Force Collision */
+#define FCC_PSMR_SBT ((uint)0x20000000) /* Stop backoff timer */
+#define FCC_PSMR_LPB ((uint)0x10000000) /* Local protect. 1 = FDX */
+#define FCC_PSMR_LCW ((uint)0x08000000) /* Late collision select */
+#define FCC_PSMR_FDE ((uint)0x04000000) /* Full Duplex Enable */
+#define FCC_PSMR_MON ((uint)0x02000000) /* RMON Enable */
+#define FCC_PSMR_PRO ((uint)0x00400000) /* Promiscuous Enable */
+#define FCC_PSMR_FCE ((uint)0x00200000) /* Flow Control Enable */
+#define FCC_PSMR_RSH ((uint)0x00100000) /* Receive Short Frames */
+#define FCC_PSMR_CAM ((uint)0x00000400) /* CAM enable */
+#define FCC_PSMR_BRO ((uint)0x00000200) /* Broadcast pkt discard */
+#define FCC_PSMR_ENCRC ((uint)0x00000080) /* Use 32-bit CRC */
+
+/* IIC parameter RAM.
+*/
+typedef struct iic {
+ ushort iic_rbase; /* Rx Buffer descriptor base address */
+ ushort iic_tbase; /* Tx Buffer descriptor base address */
+ u_char iic_rfcr; /* Rx function code */
+ u_char iic_tfcr; /* Tx function code */
+ ushort iic_mrblr; /* Max receive buffer length */
+ uint iic_rstate; /* Internal */
+ uint iic_rdp; /* Internal */
+ ushort iic_rbptr; /* Internal */
+ ushort iic_rbc; /* Internal */
+ uint iic_rxtmp; /* Internal */
+ uint iic_tstate; /* Internal */
+ uint iic_tdp; /* Internal */
+ ushort iic_tbptr; /* Internal */
+ ushort iic_tbc; /* Internal */
+ uint iic_txtmp; /* Internal */
+} iic_t;
+
+/* IDMA parameter RAM
+*/
+typedef struct idma {
+ ushort ibase; /* IDMA buffer descriptor table base address */
+ ushort dcm; /* DMA channel mode */
+ ushort ibdptr; /* IDMA current buffer descriptor pointer */
+ ushort dpr_buf; /* IDMA transfer buffer base address */
+ ushort buf_inv; /* internal buffer inventory */
+ ushort ss_max; /* steady-state maximum transfer size */
+ ushort dpr_in_ptr; /* write pointer inside the internal buffer */
+ ushort sts; /* source transfer size */
+ ushort dpr_out_ptr; /* read pointer inside the internal buffer */
+ ushort seob; /* source end of burst */
+ ushort deob; /* destination end of burst */
+ ushort dts; /* destination transfer size */
+ ushort ret_add; /* return address when working in ERM=1 mode */
+ ushort res0; /* reserved */
+ uint bd_cnt; /* internal byte count */
+ uint s_ptr; /* source internal data pointer */
+ uint d_ptr; /* destination internal data pointer */
+ uint istate; /* internal state */
+ u_char res1[20]; /* pad to 64-byte length */
+} idma_t;
+
+/* DMA channel mode bit fields
+*/
+#define IDMA_DCM_FB ((ushort)0x8000) /* fly-by mode */
+#define IDMA_DCM_LP ((ushort)0x4000) /* low priority */
+#define IDMA_DCM_TC2 ((ushort)0x0400) /* value driven on TC[2] */
+#define IDMA_DCM_DMA_WRAP_MASK ((ushort)0x01c0) /* mask for DMA wrap */
+#define IDMA_DCM_DMA_WRAP_64 ((ushort)0x0000) /* 64-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_128 ((ushort)0x0040) /* 128-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_256 ((ushort)0x0080) /* 256-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_512 ((ushort)0x00c0) /* 512-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_1024 ((ushort)0x0100) /* 1024-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_2048 ((ushort)0x0140) /* 2048-byte DMA xfer buffer */
+#define IDMA_DCM_SINC ((ushort)0x0020) /* source inc addr */
+#define IDMA_DCM_DINC ((ushort)0x0010) /* destination inc addr */
+#define IDMA_DCM_ERM ((ushort)0x0008) /* external request mode */
+#define IDMA_DCM_DT ((ushort)0x0004) /* DONE treatment */
+#define IDMA_DCM_SD_MASK ((ushort)0x0003) /* mask for SD bit field */
+#define IDMA_DCM_SD_MEM2MEM ((ushort)0x0000) /* memory-to-memory xfer */
+#define IDMA_DCM_SD_PER2MEM ((ushort)0x0002) /* peripheral-to-memory xfer */
+#define IDMA_DCM_SD_MEM2PER ((ushort)0x0001) /* memory-to-peripheral xfer */
+
+/* IDMA Buffer Descriptors
+*/
+typedef struct idma_bd {
+ uint flags;
+ uint len; /* data length */
+ uint src; /* source data buffer pointer */
+ uint dst; /* destination data buffer pointer */
+} idma_bd_t;
+
+/* IDMA buffer descriptor flag bit fields
+*/
+#define IDMA_BD_V ((uint)0x80000000) /* valid */
+#define IDMA_BD_W ((uint)0x20000000) /* wrap */
+#define IDMA_BD_I ((uint)0x10000000) /* interrupt */
+#define IDMA_BD_L ((uint)0x08000000) /* last */
+#define IDMA_BD_CM ((uint)0x02000000) /* continuous mode */
+#define IDMA_BD_SDN ((uint)0x00400000) /* source done */
+#define IDMA_BD_DDN ((uint)0x00200000) /* destination done */
+#define IDMA_BD_DGBL ((uint)0x00100000) /* destination global */
+#define IDMA_BD_DBO_LE ((uint)0x00040000) /* little-end dest byte order */
+#define IDMA_BD_DBO_BE ((uint)0x00080000) /* big-end dest byte order */
+#define IDMA_BD_DDTB ((uint)0x00010000) /* destination data bus */
+#define IDMA_BD_SGBL ((uint)0x00002000) /* source global */
+#define IDMA_BD_SBO_LE ((uint)0x00000800) /* little-end src byte order */
+#define IDMA_BD_SBO_BE ((uint)0x00001000) /* big-end src byte order */
+#define IDMA_BD_SDTB ((uint)0x00000200) /* source data bus */
+
+/* per-channel IDMA registers
+*/
+typedef struct im_idma {
+ u_char idsr; /* IDMAn event status register */
+ u_char res0[3];
+ u_char idmr; /* IDMAn event mask register */
+ u_char res1[3];
+} im_idma_t;
+
+/* IDMA event register bit fields
+*/
+#define IDMA_EVENT_SC ((unsigned char)0x08) /* stop completed */
+#define IDMA_EVENT_OB ((unsigned char)0x04) /* out of buffers */
+#define IDMA_EVENT_EDN ((unsigned char)0x02) /* external DONE asserted */
+#define IDMA_EVENT_BC ((unsigned char)0x01) /* buffer descriptor complete */
+
+/* RISC Controller Configuration Register (RCCR) bit fields
+*/
+#define RCCR_TIME ((uint)0x80000000) /* timer enable */
+#define RCCR_TIMEP_MASK ((uint)0x3f000000) /* mask for timer period bit field */
+#define RCCR_DR0M ((uint)0x00800000) /* IDMA0 request mode */
+#define RCCR_DR1M ((uint)0x00400000) /* IDMA1 request mode */
+#define RCCR_DR2M ((uint)0x00000080) /* IDMA2 request mode */
+#define RCCR_DR3M ((uint)0x00000040) /* IDMA3 request mode */
+#define RCCR_DR0QP_MASK ((uint)0x00300000) /* mask for IDMA0 req priority */
+#define RCCR_DR0QP_HIGH ((uint)0x00000000) /* IDMA0 has high req priority */
+#define RCCR_DR0QP_MED ((uint)0x00100000) /* IDMA0 has medium req priority */
+#define RCCR_DR0QP_LOW ((uint)0x00200000) /* IDMA0 has low req priority */
+#define RCCR_DR1QP_MASK ((uint)0x00030000) /* mask for IDMA1 req priority */
+#define RCCR_DR1QP_HIGH ((uint)0x00000000) /* IDMA1 has high req priority */
+#define RCCR_DR1QP_MED ((uint)0x00010000) /* IDMA1 has medium req priority */
+#define RCCR_DR1QP_LOW ((uint)0x00020000) /* IDMA1 has low req priority */
+#define RCCR_DR2QP_MASK ((uint)0x00000030) /* mask for IDMA2 req priority */
+#define RCCR_DR2QP_HIGH ((uint)0x00000000) /* IDMA2 has high req priority */
+#define RCCR_DR2QP_MED ((uint)0x00000010) /* IDMA2 has medium req priority */
+#define RCCR_DR2QP_LOW ((uint)0x00000020) /* IDMA2 has low req priority */
+#define RCCR_DR3QP_MASK ((uint)0x00000003) /* mask for IDMA3 req priority */
+#define RCCR_DR3QP_HIGH ((uint)0x00000000) /* IDMA3 has high req priority */
+#define RCCR_DR3QP_MED ((uint)0x00000001) /* IDMA3 has medium req priority */
+#define RCCR_DR3QP_LOW ((uint)0x00000002) /* IDMA3 has low req priority */
+#define RCCR_EIE ((uint)0x00080000) /* external interrupt enable */
+#define RCCR_SCD ((uint)0x00040000) /* scheduler configuration */
+#define RCCR_ERAM_MASK ((uint)0x0000e000) /* mask for enable RAM microcode */
+#define RCCR_ERAM_0KB ((uint)0x00000000) /* use 0KB of dpram for microcode */
+#define RCCR_ERAM_2KB ((uint)0x00002000) /* use 2KB of dpram for microcode */
+#define RCCR_ERAM_4KB ((uint)0x00004000) /* use 4KB of dpram for microcode */
+#define RCCR_ERAM_6KB ((uint)0x00006000) /* use 6KB of dpram for microcode */
+#define RCCR_ERAM_8KB ((uint)0x00008000) /* use 8KB of dpram for microcode */
+#define RCCR_ERAM_10KB ((uint)0x0000a000) /* use 10KB of dpram for microcode */
+#define RCCR_ERAM_12KB ((uint)0x0000c000) /* use 12KB of dpram for microcode */
+#define RCCR_EDM0 ((uint)0x00000800) /* DREQ0 edge detect mode */
+#define RCCR_EDM1 ((uint)0x00000400) /* DREQ1 edge detect mode */
+#define RCCR_EDM2 ((uint)0x00000200) /* DREQ2 edge detect mode */
+#define RCCR_EDM3 ((uint)0x00000100) /* DREQ3 edge detect mode */
+#define RCCR_DEM01 ((uint)0x00000008) /* DONE0/DONE1 edge detect mode */
+#define RCCR_DEM23 ((uint)0x00000004) /* DONE2/DONE3 edge detect mode */
+
+/*-----------------------------------------------------------------------
+ * CMXFCR - CMX FCC Clock Route Register
+ */
+#define CMXFCR_FC1 0x40000000 /* FCC1 connection */
+#define CMXFCR_RF1CS_MSK 0x38000000 /* Receive FCC1 Clock Source Mask */
+#define CMXFCR_TF1CS_MSK 0x07000000 /* Transmit FCC1 Clock Source Mask */
+#define CMXFCR_FC2 0x00400000 /* FCC2 connection */
+#define CMXFCR_RF2CS_MSK 0x00380000 /* Receive FCC2 Clock Source Mask */
+#define CMXFCR_TF2CS_MSK 0x00070000 /* Transmit FCC2 Clock Source Mask */
+#define CMXFCR_FC3 0x00004000 /* FCC3 connection */
+#define CMXFCR_RF3CS_MSK 0x00003800 /* Receive FCC3 Clock Source Mask */
+#define CMXFCR_TF3CS_MSK 0x00000700 /* Transmit FCC3 Clock Source Mask */
+
+#define CMXFCR_RF1CS_BRG5 0x00000000 /* Receive FCC1 Clock Source is BRG5 */
+#define CMXFCR_RF1CS_BRG6 0x08000000 /* Receive FCC1 Clock Source is BRG6 */
+#define CMXFCR_RF1CS_BRG7 0x10000000 /* Receive FCC1 Clock Source is BRG7 */
+#define CMXFCR_RF1CS_BRG8 0x18000000 /* Receive FCC1 Clock Source is BRG8 */
+#define CMXFCR_RF1CS_CLK9 0x20000000 /* Receive FCC1 Clock Source is CLK9 */
+#define CMXFCR_RF1CS_CLK10 0x28000000 /* Receive FCC1 Clock Source is CLK10 */
+#define CMXFCR_RF1CS_CLK11 0x30000000 /* Receive FCC1 Clock Source is CLK11 */
+#define CMXFCR_RF1CS_CLK12 0x38000000 /* Receive FCC1 Clock Source is CLK12 */
+
+#define CMXFCR_TF1CS_BRG5 0x00000000 /* Transmit FCC1 Clock Source is BRG5 */
+#define CMXFCR_TF1CS_BRG6 0x01000000 /* Transmit FCC1 Clock Source is BRG6 */
+#define CMXFCR_TF1CS_BRG7 0x02000000 /* Transmit FCC1 Clock Source is BRG7 */
+#define CMXFCR_TF1CS_BRG8 0x03000000 /* Transmit FCC1 Clock Source is BRG8 */
+#define CMXFCR_TF1CS_CLK9 0x04000000 /* Transmit FCC1 Clock Source is CLK9 */
+#define CMXFCR_TF1CS_CLK10 0x05000000 /* Transmit FCC1 Clock Source is CLK10 */
+#define CMXFCR_TF1CS_CLK11 0x06000000 /* Transmit FCC1 Clock Source is CLK11 */
+#define CMXFCR_TF1CS_CLK12 0x07000000 /* Transmit FCC1 Clock Source is CLK12 */
+
+#define CMXFCR_RF2CS_BRG5 0x00000000 /* Receive FCC2 Clock Source is BRG5 */
+#define CMXFCR_RF2CS_BRG6 0x00080000 /* Receive FCC2 Clock Source is BRG6 */
+#define CMXFCR_RF2CS_BRG7 0x00100000 /* Receive FCC2 Clock Source is BRG7 */
+#define CMXFCR_RF2CS_BRG8 0x00180000 /* Receive FCC2 Clock Source is BRG8 */
+#define CMXFCR_RF2CS_CLK13 0x00200000 /* Receive FCC2 Clock Source is CLK13 */
+#define CMXFCR_RF2CS_CLK14 0x00280000 /* Receive FCC2 Clock Source is CLK14 */
+#define CMXFCR_RF2CS_CLK15 0x00300000 /* Receive FCC2 Clock Source is CLK15 */
+#define CMXFCR_RF2CS_CLK16 0x00380000 /* Receive FCC2 Clock Source is CLK16 */
+
+#define CMXFCR_TF2CS_BRG5 0x00000000 /* Transmit FCC2 Clock Source is BRG5 */
+#define CMXFCR_TF2CS_BRG6 0x00010000 /* Transmit FCC2 Clock Source is BRG6 */
+#define CMXFCR_TF2CS_BRG7 0x00020000 /* Transmit FCC2 Clock Source is BRG7 */
+#define CMXFCR_TF2CS_BRG8 0x00030000 /* Transmit FCC2 Clock Source is BRG8 */
+#define CMXFCR_TF2CS_CLK13 0x00040000 /* Transmit FCC2 Clock Source is CLK13 */
+#define CMXFCR_TF2CS_CLK14 0x00050000 /* Transmit FCC2 Clock Source is CLK14 */
+#define CMXFCR_TF2CS_CLK15 0x00060000 /* Transmit FCC2 Clock Source is CLK15 */
+#define CMXFCR_TF2CS_CLK16 0x00070000 /* Transmit FCC2 Clock Source is CLK16 */
+
+#define CMXFCR_RF3CS_BRG5 0x00000000 /* Receive FCC3 Clock Source is BRG5 */
+#define CMXFCR_RF3CS_BRG6 0x00000800 /* Receive FCC3 Clock Source is BRG6 */
+#define CMXFCR_RF3CS_BRG7 0x00001000 /* Receive FCC3 Clock Source is BRG7 */
+#define CMXFCR_RF3CS_BRG8 0x00001800 /* Receive FCC3 Clock Source is BRG8 */
+#define CMXFCR_RF3CS_CLK13 0x00002000 /* Receive FCC3 Clock Source is CLK13 */
+#define CMXFCR_RF3CS_CLK14 0x00002800 /* Receive FCC3 Clock Source is CLK14 */
+#define CMXFCR_RF3CS_CLK15 0x00003000 /* Receive FCC3 Clock Source is CLK15 */
+#define CMXFCR_RF3CS_CLK16 0x00003800 /* Receive FCC3 Clock Source is CLK16 */
+
+#define CMXFCR_TF3CS_BRG5 0x00000000 /* Transmit FCC3 Clock Source is BRG5 */
+#define CMXFCR_TF3CS_BRG6 0x00000100 /* Transmit FCC3 Clock Source is BRG6 */
+#define CMXFCR_TF3CS_BRG7 0x00000200 /* Transmit FCC3 Clock Source is BRG7 */
+#define CMXFCR_TF3CS_BRG8 0x00000300 /* Transmit FCC3 Clock Source is BRG8 */
+#define CMXFCR_TF3CS_CLK13 0x00000400 /* Transmit FCC3 Clock Source is CLK13 */
+#define CMXFCR_TF3CS_CLK14 0x00000500 /* Transmit FCC3 Clock Source is CLK14 */
+#define CMXFCR_TF3CS_CLK15 0x00000600 /* Transmit FCC3 Clock Source is CLK15 */
+#define CMXFCR_TF3CS_CLK16 0x00000700 /* Transmit FCC3 Clock Source is CLK16 */
+
+/*-----------------------------------------------------------------------
+ * CMXSCR - CMX SCC Clock Route Register
+ */
+#define CMXSCR_GR1 0x80000000 /* Grant Support of SCC1 */
+#define CMXSCR_SC1 0x40000000 /* SCC1 connection */
+#define CMXSCR_RS1CS_MSK 0x38000000 /* Receive SCC1 Clock Source Mask */
+#define CMXSCR_TS1CS_MSK 0x07000000 /* Transmit SCC1 Clock Source Mask */
+#define CMXSCR_GR2 0x00800000 /* Grant Support of SCC2 */
+#define CMXSCR_SC2 0x00400000 /* SCC2 connection */
+#define CMXSCR_RS2CS_MSK 0x00380000 /* Receive SCC2 Clock Source Mask */
+#define CMXSCR_TS2CS_MSK 0x00070000 /* Transmit SCC2 Clock Source Mask */
+#define CMXSCR_GR3 0x00008000 /* Grant Support of SCC3 */
+#define CMXSCR_SC3 0x00004000 /* SCC3 connection */
+#define CMXSCR_RS3CS_MSK 0x00003800 /* Receive SCC3 Clock Source Mask */
+#define CMXSCR_TS3CS_MSK 0x00000700 /* Transmit SCC3 Clock Source Mask */
+#define CMXSCR_GR4 0x00000080 /* Grant Support of SCC4 */
+#define CMXSCR_SC4 0x00000040 /* SCC4 connection */
+#define CMXSCR_RS4CS_MSK 0x00000038 /* Receive SCC4 Clock Source Mask */
+#define CMXSCR_TS4CS_MSK 0x00000007 /* Transmit SCC4 Clock Source Mask */
+
+#define CMXSCR_RS1CS_BRG1 0x00000000 /* SCC1 Rx Clock Source is BRG1 */
+#define CMXSCR_RS1CS_BRG2 0x08000000 /* SCC1 Rx Clock Source is BRG2 */
+#define CMXSCR_RS1CS_BRG3 0x10000000 /* SCC1 Rx Clock Source is BRG3 */
+#define CMXSCR_RS1CS_BRG4 0x18000000 /* SCC1 Rx Clock Source is BRG4 */
+#define CMXSCR_RS1CS_CLK11 0x20000000 /* SCC1 Rx Clock Source is CLK11 */
+#define CMXSCR_RS1CS_CLK12 0x28000000 /* SCC1 Rx Clock Source is CLK12 */
+#define CMXSCR_RS1CS_CLK3 0x30000000 /* SCC1 Rx Clock Source is CLK3 */
+#define CMXSCR_RS1CS_CLK4 0x38000000 /* SCC1 Rx Clock Source is CLK4 */
+
+#define CMXSCR_TS1CS_BRG1 0x00000000 /* SCC1 Tx Clock Source is BRG1 */
+#define CMXSCR_TS1CS_BRG2 0x01000000 /* SCC1 Tx Clock Source is BRG2 */
+#define CMXSCR_TS1CS_BRG3 0x02000000 /* SCC1 Tx Clock Source is BRG3 */
+#define CMXSCR_TS1CS_BRG4 0x03000000 /* SCC1 Tx Clock Source is BRG4 */
+#define CMXSCR_TS1CS_CLK11 0x04000000 /* SCC1 Tx Clock Source is CLK11 */
+#define CMXSCR_TS1CS_CLK12 0x05000000 /* SCC1 Tx Clock Source is CLK12 */
+#define CMXSCR_TS1CS_CLK3 0x06000000 /* SCC1 Tx Clock Source is CLK3 */
+#define CMXSCR_TS1CS_CLK4 0x07000000 /* SCC1 Tx Clock Source is CLK4 */
+
+#define CMXSCR_RS2CS_BRG1 0x00000000 /* SCC2 Rx Clock Source is BRG1 */
+#define CMXSCR_RS2CS_BRG2 0x00080000 /* SCC2 Rx Clock Source is BRG2 */
+#define CMXSCR_RS2CS_BRG3 0x00100000 /* SCC2 Rx Clock Source is BRG3 */
+#define CMXSCR_RS2CS_BRG4 0x00180000 /* SCC2 Rx Clock Source is BRG4 */
+#define CMXSCR_RS2CS_CLK11 0x00200000 /* SCC2 Rx Clock Source is CLK11 */
+#define CMXSCR_RS2CS_CLK12 0x00280000 /* SCC2 Rx Clock Source is CLK12 */
+#define CMXSCR_RS2CS_CLK3 0x00300000 /* SCC2 Rx Clock Source is CLK3 */
+#define CMXSCR_RS2CS_CLK4 0x00380000 /* SCC2 Rx Clock Source is CLK4 */
+
+#define CMXSCR_TS2CS_BRG1 0x00000000 /* SCC2 Tx Clock Source is BRG1 */
+#define CMXSCR_TS2CS_BRG2 0x00010000 /* SCC2 Tx Clock Source is BRG2 */
+#define CMXSCR_TS2CS_BRG3 0x00020000 /* SCC2 Tx Clock Source is BRG3 */
+#define CMXSCR_TS2CS_BRG4 0x00030000 /* SCC2 Tx Clock Source is BRG4 */
+#define CMXSCR_TS2CS_CLK11 0x00040000 /* SCC2 Tx Clock Source is CLK11 */
+#define CMXSCR_TS2CS_CLK12 0x00050000 /* SCC2 Tx Clock Source is CLK12 */
+#define CMXSCR_TS2CS_CLK3 0x00060000 /* SCC2 Tx Clock Source is CLK3 */
+#define CMXSCR_TS2CS_CLK4 0x00070000 /* SCC2 Tx Clock Source is CLK4 */
+
+#define CMXSCR_RS3CS_BRG1 0x00000000 /* SCC3 Rx Clock Source is BRG1 */
+#define CMXSCR_RS3CS_BRG2 0x00000800 /* SCC3 Rx Clock Source is BRG2 */
+#define CMXSCR_RS3CS_BRG3 0x00001000 /* SCC3 Rx Clock Source is BRG3 */
+#define CMXSCR_RS3CS_BRG4 0x00001800 /* SCC3 Rx Clock Source is BRG4 */
+#define CMXSCR_RS3CS_CLK5 0x00002000 /* SCC3 Rx Clock Source is CLK5 */
+#define CMXSCR_RS3CS_CLK6 0x00002800 /* SCC3 Rx Clock Source is CLK6 */
+#define CMXSCR_RS3CS_CLK7 0x00003000 /* SCC3 Rx Clock Source is CLK7 */
+#define CMXSCR_RS3CS_CLK8 0x00003800 /* SCC3 Rx Clock Source is CLK8 */
+
+#define CMXSCR_TS3CS_BRG1 0x00000000 /* SCC3 Tx Clock Source is BRG1 */
+#define CMXSCR_TS3CS_BRG2 0x00000100 /* SCC3 Tx Clock Source is BRG2 */
+#define CMXSCR_TS3CS_BRG3 0x00000200 /* SCC3 Tx Clock Source is BRG3 */
+#define CMXSCR_TS3CS_BRG4 0x00000300 /* SCC3 Tx Clock Source is BRG4 */
+#define CMXSCR_TS3CS_CLK5 0x00000400 /* SCC3 Tx Clock Source is CLK5 */
+#define CMXSCR_TS3CS_CLK6 0x00000500 /* SCC3 Tx Clock Source is CLK6 */
+#define CMXSCR_TS3CS_CLK7 0x00000600 /* SCC3 Tx Clock Source is CLK7 */
+#define CMXSCR_TS3CS_CLK8 0x00000700 /* SCC3 Tx Clock Source is CLK8 */
+
+#define CMXSCR_RS4CS_BRG1 0x00000000 /* SCC4 Rx Clock Source is BRG1 */
+#define CMXSCR_RS4CS_BRG2 0x00000008 /* SCC4 Rx Clock Source is BRG2 */
+#define CMXSCR_RS4CS_BRG3 0x00000010 /* SCC4 Rx Clock Source is BRG3 */
+#define CMXSCR_RS4CS_BRG4 0x00000018 /* SCC4 Rx Clock Source is BRG4 */
+#define CMXSCR_RS4CS_CLK5 0x00000020 /* SCC4 Rx Clock Source is CLK5 */
+#define CMXSCR_RS4CS_CLK6 0x00000028 /* SCC4 Rx Clock Source is CLK6 */
+#define CMXSCR_RS4CS_CLK7 0x00000030 /* SCC4 Rx Clock Source is CLK7 */
+#define CMXSCR_RS4CS_CLK8 0x00000038 /* SCC4 Rx Clock Source is CLK8 */
+
+#define CMXSCR_TS4CS_BRG1 0x00000000 /* SCC4 Tx Clock Source is BRG1 */
+#define CMXSCR_TS4CS_BRG2 0x00000001 /* SCC4 Tx Clock Source is BRG2 */
+#define CMXSCR_TS4CS_BRG3 0x00000002 /* SCC4 Tx Clock Source is BRG3 */
+#define CMXSCR_TS4CS_BRG4 0x00000003 /* SCC4 Tx Clock Source is BRG4 */
+#define CMXSCR_TS4CS_CLK5 0x00000004 /* SCC4 Tx Clock Source is CLK5 */
+#define CMXSCR_TS4CS_CLK6 0x00000005 /* SCC4 Tx Clock Source is CLK6 */
+#define CMXSCR_TS4CS_CLK7 0x00000006 /* SCC4 Tx Clock Source is CLK7 */
+#define CMXSCR_TS4CS_CLK8 0x00000007 /* SCC4 Tx Clock Source is CLK8 */
+
+/*-----------------------------------------------------------------------
+ * SIUMCR - SIU Module Configuration Register 4-31
+ */
+#define SIUMCR_BBD 0x80000000 /* Bus Busy Disable */
+#define SIUMCR_ESE 0x40000000 /* External Snoop Enable */
+#define SIUMCR_PBSE 0x20000000 /* Parity Byte Select Enable */
+#define SIUMCR_CDIS 0x10000000 /* Core Disable */
+#define SIUMCR_DPPC00 0x00000000 /* Data Parity Pins Configuration*/
+#define SIUMCR_DPPC01 0x04000000 /* - " - */
+#define SIUMCR_DPPC10 0x08000000 /* - " - */
+#define SIUMCR_DPPC11 0x0c000000 /* - " - */
+#define SIUMCR_L2CPC00 0x00000000 /* L2 Cache Pins Configuration */
+#define SIUMCR_L2CPC01 0x01000000 /* - " - */
+#define SIUMCR_L2CPC10 0x02000000 /* - " - */
+#define SIUMCR_L2CPC11 0x03000000 /* - " - */
+#define SIUMCR_LBPC00 0x00000000 /* Local Bus Pins Configuration */
+#define SIUMCR_LBPC01 0x00400000 /* - " - */
+#define SIUMCR_LBPC10 0x00800000 /* - " - */
+#define SIUMCR_LBPC11 0x00c00000 /* - " - */
+#define SIUMCR_APPC00 0x00000000 /* Address Parity Pins Configuration*/
+#define SIUMCR_APPC01 0x00100000 /* - " - */
+#define SIUMCR_APPC10 0x00200000 /* - " - */
+#define SIUMCR_APPC11 0x00300000 /* - " - */
+#define SIUMCR_CS10PC00 0x00000000 /* CS10 Pin Configuration */
+#define SIUMCR_CS10PC01 0x00040000 /* - " - */
+#define SIUMCR_CS10PC10 0x00080000 /* - " - */
+#define SIUMCR_CS10PC11 0x000c0000 /* - " - */
+#define SIUMCR_BCTLC00 0x00000000 /* Buffer Control Configuration */
+#define SIUMCR_BCTLC01 0x00010000 /* - " - */
+#define SIUMCR_BCTLC10 0x00020000 /* - " - */
+#define SIUMCR_BCTLC11 0x00030000 /* - " - */
+#define SIUMCR_MMR00 0x00000000 /* Mask Masters Requests */
+#define SIUMCR_MMR01 0x00004000 /* - " - */
+#define SIUMCR_MMR10 0x00008000 /* - " - */
+#define SIUMCR_MMR11 0x0000c000 /* - " - */
+#define SIUMCR_LPBSE 0x00002000 /* LocalBus Parity Byte Select Enable*/
+
+/*-----------------------------------------------------------------------
+ * SCCR - System Clock Control Register 9-8
+*/
+#define SCCR_PCI_MODE 0x00000100 /* PCI Mode */
+#define SCCR_PCI_MODCK 0x00000080 /* Value of PCI_MODCK pin */
+#define SCCR_PCIDF_MSK 0x00000078 /* PCI division factor */
+#define SCCR_PCIDF_SHIFT 3
+
+#ifndef CPM_IMMR_OFFSET
+#define CPM_IMMR_OFFSET 0x101a8
+#endif
+
+#define FCC_PSMR_RMII ((uint)0x00020000) /* Use RMII interface */
+
+/* FCC iop & clock configuration. BSP code is responsible to define Fx_RXCLK & Fx_TXCLK
+ * in order to use clock-computing stuff below for the FCC x
+ */
+
+/* Automatically generates register configurations */
+#define PC_CLK(x) ((uint)(1<<(x-1))) /* FCC CLK I/O ports */
+
+#define CMXFCR_RF1CS(x) ((uint)((x-5)<<27)) /* FCC1 Receive Clock Source */
+#define CMXFCR_TF1CS(x) ((uint)((x-5)<<24)) /* FCC1 Transmit Clock Source */
+#define CMXFCR_RF2CS(x) ((uint)((x-9)<<19)) /* FCC2 Receive Clock Source */
+#define CMXFCR_TF2CS(x) ((uint)((x-9)<<16)) /* FCC2 Transmit Clock Source */
+#define CMXFCR_RF3CS(x) ((uint)((x-9)<<11)) /* FCC3 Receive Clock Source */
+#define CMXFCR_TF3CS(x) ((uint)((x-9)<<8)) /* FCC3 Transmit Clock Source */
+
+#define PC_F1RXCLK PC_CLK(F1_RXCLK)
+#define PC_F1TXCLK PC_CLK(F1_TXCLK)
+#define CMX1_CLK_ROUTE (CMXFCR_RF1CS(F1_RXCLK) | CMXFCR_TF1CS(F1_TXCLK))
+#define CMX1_CLK_MASK ((uint)0xff000000)
+
+#define PC_F2RXCLK PC_CLK(F2_RXCLK)
+#define PC_F2TXCLK PC_CLK(F2_TXCLK)
+#define CMX2_CLK_ROUTE (CMXFCR_RF2CS(F2_RXCLK) | CMXFCR_TF2CS(F2_TXCLK))
+#define CMX2_CLK_MASK ((uint)0x00ff0000)
+
+#define PC_F3RXCLK PC_CLK(F3_RXCLK)
+#define PC_F3TXCLK PC_CLK(F3_TXCLK)
+#define CMX3_CLK_ROUTE (CMXFCR_RF3CS(F3_RXCLK) | CMXFCR_TF3CS(F3_TXCLK))
+#define CMX3_CLK_MASK ((uint)0x0000ff00)
+
+#define CPMUX_CLK_MASK (CMX3_CLK_MASK | CMX2_CLK_MASK)
+#define CPMUX_CLK_ROUTE (CMX3_CLK_ROUTE | CMX2_CLK_ROUTE)
+
+#define CLK_TRX (PC_F3TXCLK | PC_F3RXCLK | PC_F2TXCLK | PC_F2RXCLK)
+
+/* I/O Pin assignment for FCC1. I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PA1_COL 0x00000001U
+#define PA1_CRS 0x00000002U
+#define PA1_TXER 0x00000004U
+#define PA1_TXEN 0x00000008U
+#define PA1_RXDV 0x00000010U
+#define PA1_RXER 0x00000020U
+#define PA1_TXDAT 0x00003c00U
+#define PA1_RXDAT 0x0003c000U
+#define PA1_PSORA0 (PA1_RXDAT | PA1_TXDAT)
+#define PA1_PSORA1 (PA1_COL | PA1_CRS | PA1_TXER | PA1_TXEN | \
+ PA1_RXDV | PA1_RXER)
+#define PA1_DIRA0 (PA1_RXDAT | PA1_CRS | PA1_COL | PA1_RXER | PA1_RXDV)
+#define PA1_DIRA1 (PA1_TXDAT | PA1_TXEN | PA1_TXER)
+
+
+/* I/O Pin assignment for FCC2. I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PB2_TXER 0x00000001U
+#define PB2_RXDV 0x00000002U
+#define PB2_TXEN 0x00000004U
+#define PB2_RXER 0x00000008U
+#define PB2_COL 0x00000010U
+#define PB2_CRS 0x00000020U
+#define PB2_TXDAT 0x000003c0U
+#define PB2_RXDAT 0x00003c00U
+#define PB2_PSORB0 (PB2_RXDAT | PB2_TXDAT | PB2_CRS | PB2_COL | \
+ PB2_RXER | PB2_RXDV | PB2_TXER)
+#define PB2_PSORB1 (PB2_TXEN)
+#define PB2_DIRB0 (PB2_RXDAT | PB2_CRS | PB2_COL | PB2_RXER | PB2_RXDV)
+#define PB2_DIRB1 (PB2_TXDAT | PB2_TXEN | PB2_TXER)
+
+
+/* I/O Pin assignment for FCC3. I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PB3_RXDV 0x00004000U
+#define PB3_RXER 0x00008000U
+#define PB3_TXER 0x00010000U
+#define PB3_TXEN 0x00020000U
+#define PB3_COL 0x00040000U
+#define PB3_CRS 0x00080000U
+#define PB3_TXDAT 0x0f000000U
+#define PC3_TXDAT 0x00000010U
+#define PB3_RXDAT 0x00f00000U
+#define PB3_PSORB0 (PB3_RXDAT | PB3_TXDAT | PB3_CRS | PB3_COL | \
+ PB3_RXER | PB3_RXDV | PB3_TXER | PB3_TXEN)
+#define PB3_PSORB1 0
+#define PB3_DIRB0 (PB3_RXDAT | PB3_CRS | PB3_COL | PB3_RXER | PB3_RXDV)
+#define PB3_DIRB1 (PB3_TXDAT | PB3_TXEN | PB3_TXER)
+#define PC3_DIRC1 (PC3_TXDAT)
+
+/* Handy macro to specify mem for FCCs*/
+#define FCC_MEM_OFFSET(x) (CPM_FCC_SPECIAL_BASE + (x*128))
+#define FCC1_MEM_OFFSET FCC_MEM_OFFSET(0)
+#define FCC2_MEM_OFFSET FCC_MEM_OFFSET(1)
+#define FCC3_MEM_OFFSET FCC_MEM_OFFSET(2)
+
+/* Clocks and GRG's */
+
+enum cpm_clk_dir {
+ CPM_CLK_RX,
+ CPM_CLK_TX,
+ CPM_CLK_RTX
+};
+
+enum cpm_clk_target {
+ CPM_CLK_SCC1,
+ CPM_CLK_SCC2,
+ CPM_CLK_SCC3,
+ CPM_CLK_SCC4,
+ CPM_CLK_FCC1,
+ CPM_CLK_FCC2,
+ CPM_CLK_FCC3,
+ CPM_CLK_SMC1,
+ CPM_CLK_SMC2,
+};
+
+enum cpm_clk {
+ CPM_CLK_NONE = 0,
+ CPM_BRG1, /* Baud Rate Generator 1 */
+ CPM_BRG2, /* Baud Rate Generator 2 */
+ CPM_BRG3, /* Baud Rate Generator 3 */
+ CPM_BRG4, /* Baud Rate Generator 4 */
+ CPM_BRG5, /* Baud Rate Generator 5 */
+ CPM_BRG6, /* Baud Rate Generator 6 */
+ CPM_BRG7, /* Baud Rate Generator 7 */
+ CPM_BRG8, /* Baud Rate Generator 8 */
+ CPM_CLK1, /* Clock 1 */
+ CPM_CLK2, /* Clock 2 */
+ CPM_CLK3, /* Clock 3 */
+ CPM_CLK4, /* Clock 4 */
+ CPM_CLK5, /* Clock 5 */
+ CPM_CLK6, /* Clock 6 */
+ CPM_CLK7, /* Clock 7 */
+ CPM_CLK8, /* Clock 8 */
+ CPM_CLK9, /* Clock 9 */
+ CPM_CLK10, /* Clock 10 */
+ CPM_CLK11, /* Clock 11 */
+ CPM_CLK12, /* Clock 12 */
+ CPM_CLK13, /* Clock 13 */
+ CPM_CLK14, /* Clock 14 */
+ CPM_CLK15, /* Clock 15 */
+ CPM_CLK16, /* Clock 16 */
+ CPM_CLK17, /* Clock 17 */
+ CPM_CLK18, /* Clock 18 */
+ CPM_CLK19, /* Clock 19 */
+ CPM_CLK20, /* Clock 20 */
+ CPM_CLK_DUMMY
+};
+
+int __init cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode);
+int __init cpm2_smc_clk_setup(enum cpm_clk_target target, int clock);
+
+#define CPM_PIN_INPUT 0
+#define CPM_PIN_OUTPUT 1
+#define CPM_PIN_PRIMARY 0
+#define CPM_PIN_SECONDARY 2
+#define CPM_PIN_GPIO 4
+#define CPM_PIN_OPENDRAIN 8
+
+void __init cpm2_set_pin(int port, int pin, int flags);
+
+#endif /* __CPM2__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
new file mode 100644
index 000000000..727d4b321
--- /dev/null
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_CPU_HAS_FEATURE_H
+#define __ASM_POWERPC_CPU_HAS_FEATURE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bug.h>
+#include <asm/cputable.h>
+
+static __always_inline bool early_cpu_has_feature(unsigned long feature)
+{
+ return !!((CPU_FTRS_ALWAYS & feature) ||
+ (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
+}
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
+#include <linux/jump_label.h>
+
+#define NUM_CPU_FTR_KEYS BITS_PER_LONG
+
+extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
+
+static __always_inline bool cpu_has_feature(unsigned long feature)
+{
+ int i;
+
+#ifndef __clang__ /* clang can't cope with this */
+ BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
+ if (!static_key_initialized) {
+ printk("Warning! cpu_has_feature() used prior to jump label init!\n");
+ dump_stack();
+ return early_cpu_has_feature(feature);
+ }
+#endif
+
+ if (CPU_FTRS_ALWAYS & feature)
+ return true;
+
+ if (!(CPU_FTRS_POSSIBLE & feature))
+ return false;
+
+ i = __builtin_ctzl(feature);
+ return static_branch_likely(&cpu_feature_keys[i]);
+}
+#else
+static __always_inline bool cpu_has_feature(unsigned long feature)
+{
+ return early_cpu_has_feature(feature);
+}
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_POWERPC_CPU_HAS_FEATURE_H */
diff --git a/arch/powerpc/include/asm/cpu_setup.h b/arch/powerpc/include/asm/cpu_setup.h
new file mode 100644
index 000000000..30e2fe389
--- /dev/null
+++ b/arch/powerpc/include/asm/cpu_setup.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 IBM Corporation
+ */
+
+#ifndef _ASM_POWERPC_CPU_SETUP_H
+#define _ASM_POWERPC_CPU_SETUP_H
+void __setup_cpu_power7(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_power8(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_power9(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_power10(unsigned long offset, struct cpu_spec *spec);
+void __restore_cpu_power7(void);
+void __restore_cpu_power8(void);
+void __restore_cpu_power9(void);
+void __restore_cpu_power10(void);
+
+void __setup_cpu_e500v1(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_e500v2(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_e500mc(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440ep(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440epx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440gx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440grx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440spe(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440x5(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_460ex(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_460gt(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_603(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_604(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_750(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_750cx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_750fx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_7400(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_7410(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_745x(unsigned long offset, struct cpu_spec *spec);
+
+void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec *spec);
+void __restore_cpu_pa6t(void);
+void __restore_cpu_ppc970(void);
+
+void __setup_cpu_e5500(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_e6500(unsigned long offset, struct cpu_spec *spec);
+void __restore_cpu_e5500(void);
+void __restore_cpu_e6500(void);
+#endif /* _ASM_POWERPC_CPU_SETUP_H */
diff --git a/arch/powerpc/include/asm/cpufeature.h b/arch/powerpc/include/asm/cpufeature.h
new file mode 100644
index 000000000..f6f790a90
--- /dev/null
+++ b/arch/powerpc/include/asm/cpufeature.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * CPU feature definitions for module loading, used by
+ * module_cpu_feature_match(), see asm/cputable.h for powerpc CPU features.
+ *
+ * Copyright 2016 Alastair D'Silva, IBM Corporation.
+ */
+
+#ifndef __ASM_POWERPC_CPUFEATURE_H
+#define __ASM_POWERPC_CPUFEATURE_H
+
+#include <asm/cputable.h>
+
+/* Keep these in step with powerpc/include/asm/cputable.h */
+#define MAX_CPU_FEATURES (2 * 32)
+
+/*
+ * Currently we don't have a need for any of the feature bits defined in
+ * cpu_user_features. When we do, they should be defined such as:
+ *
+ * #define PPC_MODULE_FEATURE_32 (ilog2(PPC_FEATURE_32))
+ */
+
+#define PPC_MODULE_FEATURE_VEC_CRYPTO (32 + ilog2(PPC_FEATURE2_VEC_CRYPTO))
+
+#define cpu_feature(x) (x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+ if (num < 32)
+ return !!(cur_cpu_spec->cpu_user_features & 1UL << num);
+ else
+ return !!(cur_cpu_spec->cpu_user_features2 & 1UL << (num - 32));
+}
+
+#endif /* __ASM_POWERPC_CPUFEATURE_H */
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
new file mode 100644
index 000000000..0cce5dc7f
--- /dev/null
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_CPUIDLE_H
+#define _ASM_POWERPC_CPUIDLE_H
+
+#ifdef CONFIG_PPC_POWERNV
+/* Thread state used in powernv idle state management */
+#define PNV_THREAD_RUNNING 0
+#define PNV_THREAD_NAP 1
+#define PNV_THREAD_SLEEP 2
+#define PNV_THREAD_WINKLE 3
+
+/*
+ * Core state used in powernv idle for POWER8.
+ *
+ * The lock bit synchronizes updates to the state, as well as parts of the
+ * sleep/wake code (see kernel/idle_book3s.S).
+ *
+ * Bottom 8 bits track the idle state of each thread. Bit is cleared before
+ * the thread executes an idle instruction (nap/sleep/winkle).
+ *
+ * Then there is winkle tracking. A core does not lose complete state
+ * until every thread is in winkle. So the winkle count field counts the
+ * number of threads in winkle (small window of false positives is okay
+ * around the sleep/wake, so long as there are no false negatives).
+ *
+ * When the winkle count reaches 8 (the COUNT_ALL_BIT becomes set), then
+ * the THREAD_WINKLE_BITS are set, which indicate which threads have not
+ * yet woken from the winkle state.
+ */
+#define NR_PNV_CORE_IDLE_LOCK_BIT 28
+#define PNV_CORE_IDLE_LOCK_BIT (1ULL << NR_PNV_CORE_IDLE_LOCK_BIT)
+
+#define PNV_CORE_IDLE_WINKLE_COUNT_SHIFT 16
+#define PNV_CORE_IDLE_WINKLE_COUNT 0x00010000
+#define PNV_CORE_IDLE_WINKLE_COUNT_BITS 0x000F0000
+#define PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT 8
+#define PNV_CORE_IDLE_THREAD_WINKLE_BITS 0x0000FF00
+
+#define PNV_CORE_IDLE_THREAD_BITS 0x000000FF
+
+/*
+ * ============================ NOTE =================================
+ * The older firmware populates only the RL field in the psscr_val and
+ * sets the psscr_mask to 0xf. On such a firmware, the kernel sets the
+ * remaining PSSCR fields to default values as follows:
+ *
+ * - ESL and EC bits are to 1. So wakeup from any stop state will be
+ * at vector 0x100.
+ *
+ * - MTL and PSLL are set to the maximum allowed value as per the ISA,
+ * i.e. 15.
+ *
+ * - The Transition Rate, TR is set to the Maximum value 3.
+ */
+#define PSSCR_HV_DEFAULT_VAL (PSSCR_ESL | PSSCR_EC | \
+ PSSCR_PSLL_MASK | PSSCR_TR_MASK | \
+ PSSCR_MTL_MASK)
+
+#define PSSCR_HV_DEFAULT_MASK (PSSCR_ESL | PSSCR_EC | \
+ PSSCR_PSLL_MASK | PSSCR_TR_MASK | \
+ PSSCR_MTL_MASK | PSSCR_RL_MASK)
+#define PSSCR_EC_SHIFT 20
+#define PSSCR_ESL_SHIFT 21
+#define GET_PSSCR_EC(x) (((x) & PSSCR_EC) >> PSSCR_EC_SHIFT)
+#define GET_PSSCR_ESL(x) (((x) & PSSCR_ESL) >> PSSCR_ESL_SHIFT)
+#define GET_PSSCR_RL(x) ((x) & PSSCR_RL_MASK)
+
+#define ERR_EC_ESL_MISMATCH -1
+#define ERR_DEEP_STATE_ESL_MISMATCH -2
+
+#ifndef __ASSEMBLY__
+
+#define PNV_IDLE_NAME_LEN 16
+struct pnv_idle_states_t {
+ char name[PNV_IDLE_NAME_LEN];
+ u32 latency_ns;
+ u32 residency_ns;
+ u64 psscr_val;
+ u64 psscr_mask;
+ u32 flags;
+ bool valid;
+};
+
+extern struct pnv_idle_states_t *pnv_idle_states;
+extern int nr_pnv_idle_states;
+
+unsigned long pnv_cpu_offline(unsigned int cpu);
+int __init validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags);
+static inline void report_invalid_psscr_val(u64 psscr_val, int err)
+{
+ switch (err) {
+ case ERR_EC_ESL_MISMATCH:
+ pr_warn("Invalid psscr 0x%016llx : ESL,EC bits unequal",
+ psscr_val);
+ break;
+ case ERR_DEEP_STATE_ESL_MISMATCH:
+ pr_warn("Invalid psscr 0x%016llx : ESL cleared for deep stop-state",
+ psscr_val);
+ }
+}
+#endif
+
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
new file mode 100644
index 000000000..757dbded1
--- /dev/null
+++ b/arch/powerpc/include/asm/cputable.h
@@ -0,0 +1,607 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_CPUTABLE_H
+#define __ASM_POWERPC_CPUTABLE_H
+
+
+#include <linux/types.h>
+#include <uapi/asm/cputable.h>
+#include <asm/asm-const.h>
+
+#ifndef __ASSEMBLY__
+
+/* This structure can grow, it's real size is used by head.S code
+ * via the mkdefs mechanism.
+ */
+struct cpu_spec;
+
+typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
+typedef void (*cpu_restore_t)(void);
+
+enum powerpc_pmc_type {
+ PPC_PMC_DEFAULT = 0,
+ PPC_PMC_IBM = 1,
+ PPC_PMC_PA6T = 2,
+ PPC_PMC_G4 = 3,
+};
+
+struct pt_regs;
+
+extern int machine_check_generic(struct pt_regs *regs);
+extern int machine_check_4xx(struct pt_regs *regs);
+extern int machine_check_440A(struct pt_regs *regs);
+extern int machine_check_e500mc(struct pt_regs *regs);
+extern int machine_check_e500(struct pt_regs *regs);
+extern int machine_check_47x(struct pt_regs *regs);
+int machine_check_8xx(struct pt_regs *regs);
+int machine_check_83xx(struct pt_regs *regs);
+
+extern void cpu_down_flush_e500v2(void);
+extern void cpu_down_flush_e500mc(void);
+extern void cpu_down_flush_e5500(void);
+extern void cpu_down_flush_e6500(void);
+
+/* NOTE WELL: Update identify_cpu() if fields are added or removed! */
+struct cpu_spec {
+ /* CPU is matched via (PVR & pvr_mask) == pvr_value */
+ unsigned int pvr_mask;
+ unsigned int pvr_value;
+
+ char *cpu_name;
+ unsigned long cpu_features; /* Kernel features */
+ unsigned int cpu_user_features; /* Userland features */
+ unsigned int cpu_user_features2; /* Userland features v2 */
+ unsigned int mmu_features; /* MMU features */
+
+ /* cache line sizes */
+ unsigned int icache_bsize;
+ unsigned int dcache_bsize;
+
+ /* flush caches inside the current cpu */
+ void (*cpu_down_flush)(void);
+
+ /* number of performance monitor counters */
+ unsigned int num_pmcs;
+ enum powerpc_pmc_type pmc_type;
+
+ /* this is called to initialize various CPU bits like L1 cache,
+ * BHT, SPD, etc... from head.S before branching to identify_machine
+ */
+ cpu_setup_t cpu_setup;
+ /* Used to restore cpu setup on secondary processors and at resume */
+ cpu_restore_t cpu_restore;
+
+ /* Name of processor class, for the ELF AT_PLATFORM entry */
+ char *platform;
+
+ /* Processor specific machine check handling. Return negative
+ * if the error is fatal, 1 if it was fully recovered and 0 to
+ * pass up (not CPU originated) */
+ int (*machine_check)(struct pt_regs *regs);
+
+ /*
+ * Processor specific early machine check handler which is
+ * called in real mode to handle SLB and TLB errors.
+ */
+ long (*machine_check_early)(struct pt_regs *regs);
+};
+
+extern struct cpu_spec *cur_cpu_spec;
+
+extern unsigned int __start___ftr_fixup, __stop___ftr_fixup;
+
+extern void set_cur_cpu_spec(struct cpu_spec *s);
+extern struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr);
+extern void identify_cpu_name(unsigned int pvr);
+extern void do_feature_fixups(unsigned long value, void *fixup_start,
+ void *fixup_end);
+
+extern const char *powerpc_base_platform;
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
+extern void cpu_feature_keys_init(void);
+#else
+static inline void cpu_feature_keys_init(void) { }
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+/* CPU kernel features */
+
+/* Definitions for features that we have on both 32-bit and 64-bit chips */
+#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x00000001)
+#define CPU_FTR_ALTIVEC ASM_CONST(0x00000002)
+#define CPU_FTR_DBELL ASM_CONST(0x00000004)
+#define CPU_FTR_CAN_NAP ASM_CONST(0x00000008)
+#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x00000010)
+// ASM_CONST(0x00000020) Free
+#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x00000040)
+#define CPU_FTR_LWSYNC ASM_CONST(0x00000080)
+#define CPU_FTR_NOEXECUTE ASM_CONST(0x00000100)
+#define CPU_FTR_EMB_HV ASM_CONST(0x00000200)
+
+/* Definitions for features that only exist on 32-bit chips */
+#ifdef CONFIG_PPC32
+#define CPU_FTR_L2CR ASM_CONST(0x00002000)
+#define CPU_FTR_SPEC7450 ASM_CONST(0x00004000)
+#define CPU_FTR_TAU ASM_CONST(0x00008000)
+#define CPU_FTR_CAN_DOZE ASM_CONST(0x00010000)
+#define CPU_FTR_L3CR ASM_CONST(0x00040000)
+#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x00080000)
+#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x00100000)
+#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x00200000)
+#define CPU_FTR_NO_DPM ASM_CONST(0x00400000)
+#define CPU_FTR_476_DD2 ASM_CONST(0x00800000)
+#define CPU_FTR_NEED_COHERENT ASM_CONST(0x01000000)
+#define CPU_FTR_NO_BTIC ASM_CONST(0x02000000)
+#define CPU_FTR_PPC_LE ASM_CONST(0x04000000)
+#define CPU_FTR_SPE ASM_CONST(0x10000000)
+#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x20000000)
+#define CPU_FTR_INDEXED_DCR ASM_CONST(0x40000000)
+
+#else /* CONFIG_PPC32 */
+/* Define these to 0 for the sake of tests in common code */
+#define CPU_FTR_PPC_LE (0)
+#define CPU_FTR_SPE (0)
+#endif
+
+/*
+ * Definitions for the 64-bit processor unique features;
+ * on 32-bit, make the names available but defined to be 0.
+ */
+#ifdef __powerpc64__
+#define LONG_ASM_CONST(x) ASM_CONST(x)
+#else
+#define LONG_ASM_CONST(x) 0
+#endif
+
+#define CPU_FTR_REAL_LE LONG_ASM_CONST(0x0000000000001000)
+#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000000002000)
+#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000000008000)
+#define CPU_FTR_ARCH_207S LONG_ASM_CONST(0x0000000000010000)
+#define CPU_FTR_ARCH_300 LONG_ASM_CONST(0x0000000000020000)
+#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000000000040000)
+#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000000000080000)
+#define CPU_FTR_SMT LONG_ASM_CONST(0x0000000000100000)
+#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000000000200000)
+#define CPU_FTR_PURR LONG_ASM_CONST(0x0000000000400000)
+#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000000000800000)
+#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000)
+#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000)
+#define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000)
+#define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000)
+#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000)
+#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000)
+#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000)
+#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0000000080000000)
+#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0000000100000000)
+#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0000000200000000)
+/* LONG_ASM_CONST(0x0000000400000000) Free */
+#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x0000000800000000)
+#define CPU_FTR_TM LONG_ASM_CONST(0x0000001000000000)
+#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000002000000000)
+#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0000004000000000)
+#define CPU_FTR_DAWR LONG_ASM_CONST(0x0000008000000000)
+#define CPU_FTR_DABRX LONG_ASM_CONST(0x0000010000000000)
+#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x0000020000000000)
+#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000)
+#define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000)
+#define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000)
+#define CPU_FTR_P9_TLBIE_STQ_BUG LONG_ASM_CONST(0x0000400000000000)
+#define CPU_FTR_P9_TIDR LONG_ASM_CONST(0x0000800000000000)
+#define CPU_FTR_P9_TLBIE_ERAT_BUG LONG_ASM_CONST(0x0001000000000000)
+#define CPU_FTR_P9_RADIX_PREFETCH_BUG LONG_ASM_CONST(0x0002000000000000)
+#define CPU_FTR_ARCH_31 LONG_ASM_CONST(0x0004000000000000)
+#define CPU_FTR_DAWR1 LONG_ASM_CONST(0x0008000000000000)
+
+#ifndef __ASSEMBLY__
+
+#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE)
+
+/* We only set the altivec features if the kernel was compiled with altivec
+ * support
+ */
+#ifdef CONFIG_ALTIVEC
+#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
+#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
+#else
+#define CPU_FTR_ALTIVEC_COMP 0
+#define PPC_FEATURE_HAS_ALTIVEC_COMP 0
+#endif
+
+/* We only set the VSX features if the kernel was compiled with VSX
+ * support
+ */
+#ifdef CONFIG_VSX
+#define CPU_FTR_VSX_COMP CPU_FTR_VSX
+#define PPC_FEATURE_HAS_VSX_COMP PPC_FEATURE_HAS_VSX
+#else
+#define CPU_FTR_VSX_COMP 0
+#define PPC_FEATURE_HAS_VSX_COMP 0
+#endif
+
+/* We only set the spe features if the kernel was compiled with spe
+ * support
+ */
+#ifdef CONFIG_SPE
+#define CPU_FTR_SPE_COMP CPU_FTR_SPE
+#define PPC_FEATURE_HAS_SPE_COMP PPC_FEATURE_HAS_SPE
+#define PPC_FEATURE_HAS_EFP_SINGLE_COMP PPC_FEATURE_HAS_EFP_SINGLE
+#define PPC_FEATURE_HAS_EFP_DOUBLE_COMP PPC_FEATURE_HAS_EFP_DOUBLE
+#else
+#define CPU_FTR_SPE_COMP 0
+#define PPC_FEATURE_HAS_SPE_COMP 0
+#define PPC_FEATURE_HAS_EFP_SINGLE_COMP 0
+#define PPC_FEATURE_HAS_EFP_DOUBLE_COMP 0
+#endif
+
+/* We only set the TM feature if the kernel was compiled with TM supprt */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+#define CPU_FTR_TM_COMP CPU_FTR_TM
+#define PPC_FEATURE2_HTM_COMP PPC_FEATURE2_HTM
+#define PPC_FEATURE2_HTM_NOSC_COMP PPC_FEATURE2_HTM_NOSC
+#else
+#define CPU_FTR_TM_COMP 0
+#define PPC_FEATURE2_HTM_COMP 0
+#define PPC_FEATURE2_HTM_NOSC_COMP 0
+#endif
+
+/* We need to mark all pages as being coherent if we're SMP or we have a
+ * 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II
+ * require it for PCI "streaming/prefetch" to work properly.
+ * This is also required by 52xx family.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) \
+ || defined(CONFIG_PPC_83xx) || defined(CONFIG_8260) \
+ || defined(CONFIG_PPC_MPC52xx)
+#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT
+#else
+#define CPU_FTR_COMMON 0
+#endif
+
+/* The powersave features NAP & DOZE seems to confuse BDI when
+ debugging. So if a BDI is used, disable theses
+ */
+#ifndef CONFIG_BDI_SWITCH
+#define CPU_FTR_MAYBE_CAN_DOZE CPU_FTR_CAN_DOZE
+#define CPU_FTR_MAYBE_CAN_NAP CPU_FTR_CAN_NAP
+#else
+#define CPU_FTR_MAYBE_CAN_DOZE 0
+#define CPU_FTR_MAYBE_CAN_NAP 0
+#endif
+
+#define CPU_FTRS_603 (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE | CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_604 (CPU_FTR_COMMON | CPU_FTR_PPC_LE)
+#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
+#define CPU_FTRS_740 (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
+ CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_PPC_LE)
+#define CPU_FTRS_750 (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
+ CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_PPC_LE)
+#define CPU_FTRS_750CL (CPU_FTRS_750)
+#define CPU_FTRS_750FX1 (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM)
+#define CPU_FTRS_750FX2 (CPU_FTRS_750 | CPU_FTR_NO_DPM)
+#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX)
+#define CPU_FTRS_750GX (CPU_FTRS_750FX)
+#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
+ CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7400 (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
+ CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | \
+ CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | \
+ CPU_FTR_NEED_PAIRED_STWCX | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | \
+ CPU_FTR_NEED_PAIRED_STWCX | \
+ CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \
+ CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | \
+ CPU_FTR_NEED_PAIRED_STWCX | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7455 (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \
+ CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7447 (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7447A (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7448 (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
+ CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \
+ CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_COMMON | CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \
+ CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE | CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON)
+#define CPU_FTRS_8XX (CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_40X (CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_44X (CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_440x6 (CPU_FTR_NOEXECUTE | \
+ CPU_FTR_INDEXED_DCR)
+#define CPU_FTRS_47X (CPU_FTRS_440x6)
+#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | \
+ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | \
+ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_E500MC ( \
+ CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
+/*
+ * e5500/e6500 erratum A-006958 is a timebase bug that can use the
+ * same workaround as CPU_FTR_CELL_TB_BUG.
+ */
+#define CPU_FTRS_E5500 ( \
+ CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_CELL_TB_BUG)
+#define CPU_FTRS_E6500 ( \
+ CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT)
+
+/* 64-bit CPUs */
+#define CPU_FTRS_PPC970 (CPU_FTR_LWSYNC | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
+ CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
+ CPU_FTR_HVMODE | CPU_FTR_DABRX)
+#define CPU_FTRS_POWER5 (CPU_FTR_LWSYNC | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX)
+#define CPU_FTRS_POWER6 (CPU_FTR_LWSYNC | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+ CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \
+ CPU_FTR_DABRX)
+#define CPU_FTRS_POWER7 (CPU_FTR_LWSYNC | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
+ CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_CFAR | CPU_FTR_HVMODE | \
+ CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX )
+#define CPU_FTRS_POWER8 (CPU_FTR_LWSYNC | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
+ CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
+ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
+ CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP )
+#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
+#define CPU_FTRS_POWER9 (CPU_FTR_LWSYNC | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
+ CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
+ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
+ CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_P9_TLBIE_STQ_BUG | \
+ CPU_FTR_P9_TLBIE_ERAT_BUG | CPU_FTR_P9_TIDR)
+#define CPU_FTRS_POWER9_DD2_0 (CPU_FTRS_POWER9 | CPU_FTR_P9_RADIX_PREFETCH_BUG)
+#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | \
+ CPU_FTR_P9_RADIX_PREFETCH_BUG | \
+ CPU_FTR_POWER9_DD2_1)
+#define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
+ CPU_FTR_P9_TM_HV_ASSIST | \
+ CPU_FTR_P9_TM_XER_SO_BUG)
+#define CPU_FTRS_POWER9_DD2_3 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
+ CPU_FTR_P9_TM_HV_ASSIST | \
+ CPU_FTR_P9_TM_XER_SO_BUG | \
+ CPU_FTR_DAWR)
+#define CPU_FTRS_POWER10 (CPU_FTR_LWSYNC | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
+ CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
+ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
+ CPU_FTR_ARCH_300 | CPU_FTR_ARCH_31 | \
+ CPU_FTR_DAWR | CPU_FTR_DAWR1)
+#define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
+ CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX)
+#define CPU_FTRS_PA6T (CPU_FTR_LWSYNC | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
+#define CPU_FTRS_COMPATIBLE (CPU_FTR_PPCAS_ARCH_V2)
+
+#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3E_64
+#define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500)
+#else
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define CPU_FTRS_POSSIBLE \
+ (CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \
+ CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | CPU_FTRS_POWER9 | \
+ CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | \
+ CPU_FTRS_POWER9_DD2_3 | CPU_FTRS_POWER10)
+#else
+#define CPU_FTRS_POSSIBLE \
+ (CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \
+ CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \
+ CPU_FTRS_POWER8 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
+ CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | CPU_FTRS_POWER9 | \
+ CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | \
+ CPU_FTRS_POWER9_DD2_3 | CPU_FTRS_POWER10)
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+#endif
+#else
+enum {
+ CPU_FTRS_POSSIBLE =
+#ifdef CONFIG_PPC_BOOK3S_604
+ CPU_FTRS_604 | CPU_FTRS_740_NOTAU |
+ CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 |
+ CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX |
+ CPU_FTRS_7400_NOTAU | CPU_FTRS_7400 | CPU_FTRS_7450_20 |
+ CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 |
+ CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 |
+ CPU_FTRS_7447 | CPU_FTRS_7447A |
+ CPU_FTRS_CLASSIC32 |
+#endif
+#ifdef CONFIG_PPC_BOOK3S_603
+ CPU_FTRS_603 | CPU_FTRS_82XX |
+ CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 |
+#endif
+#ifdef CONFIG_PPC_8xx
+ CPU_FTRS_8XX |
+#endif
+#ifdef CONFIG_40x
+ CPU_FTRS_40X |
+#endif
+#ifdef CONFIG_PPC_47x
+ CPU_FTRS_47X | CPU_FTR_476_DD2 |
+#elif defined(CONFIG_44x)
+ CPU_FTRS_44X | CPU_FTRS_440x6 |
+#endif
+#ifdef CONFIG_PPC_E500
+ CPU_FTRS_E500 | CPU_FTRS_E500_2 |
+#endif
+#ifdef CONFIG_PPC_E500MC
+ CPU_FTRS_E500MC | CPU_FTRS_E5500 | CPU_FTRS_E6500 |
+#endif
+ 0,
+};
+#endif /* __powerpc64__ */
+
+#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3E_64
+#define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500)
+#else
+
+#ifdef CONFIG_PPC_DT_CPU_FTRS
+#define CPU_FTRS_DT_CPU_BASE \
+ (CPU_FTR_LWSYNC | \
+ CPU_FTR_FPU_UNAVAILABLE | \
+ CPU_FTR_NOEXECUTE | \
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | \
+ CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_DAWR | \
+ CPU_FTR_ARCH_206 | \
+ CPU_FTR_ARCH_207S)
+#else
+#define CPU_FTRS_DT_CPU_BASE (~0ul)
+#endif
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define CPU_FTRS_ALWAYS \
+ (CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & CPU_FTRS_POWER7 & \
+ CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER9 & \
+ CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_POWER9_DD2_2 & \
+ CPU_FTRS_POWER10 & CPU_FTRS_DT_CPU_BASE)
+#else
+#define CPU_FTRS_ALWAYS \
+ (CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \
+ CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \
+ CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \
+ ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & CPU_FTRS_POWER9 & \
+ CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_POWER9_DD2_2 & \
+ CPU_FTRS_POWER10 & CPU_FTRS_DT_CPU_BASE)
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+#endif
+#else
+enum {
+ CPU_FTRS_ALWAYS =
+#ifdef CONFIG_PPC_BOOK3S_604
+ CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
+ CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 &
+ CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX &
+ CPU_FTRS_7400_NOTAU & CPU_FTRS_7400 & CPU_FTRS_7450_20 &
+ CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 &
+ CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 &
+ CPU_FTRS_7447 & CPU_FTRS_7447A &
+ CPU_FTRS_CLASSIC32 &
+#endif
+#ifdef CONFIG_PPC_BOOK3S_603
+ CPU_FTRS_603 & CPU_FTRS_82XX &
+ CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 &
+#endif
+#ifdef CONFIG_PPC_8xx
+ CPU_FTRS_8XX &
+#endif
+#ifdef CONFIG_40x
+ CPU_FTRS_40X &
+#endif
+#ifdef CONFIG_PPC_47x
+ CPU_FTRS_47X &
+#elif defined(CONFIG_44x)
+ CPU_FTRS_44X & CPU_FTRS_440x6 &
+#endif
+#ifdef CONFIG_PPC_E500
+ CPU_FTRS_E500 & CPU_FTRS_E500_2 &
+#endif
+#ifdef CONFIG_PPC_E500MC
+ CPU_FTRS_E500MC & CPU_FTRS_E5500 & CPU_FTRS_E6500 &
+#endif
+ ~CPU_FTR_EMB_HV & /* can be removed at runtime */
+ CPU_FTRS_POSSIBLE,
+};
+#endif /* __powerpc64__ */
+
+/*
+ * Maximum number of hw breakpoint supported on powerpc. Number of
+ * breakpoints supported by actual hw might be less than this, which
+ * is decided at run time in nr_wp_slots().
+ */
+#define HBP_NUM_MAX 2
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_POWERPC_CPUTABLE_H */
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
new file mode 100644
index 000000000..f26c430f3
--- /dev/null
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_CPUTHREADS_H
+#define _ASM_POWERPC_CPUTHREADS_H
+
+#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
+#include <asm/cpu_has_feature.h>
+
+/*
+ * Mapping of threads to cores
+ *
+ * Note: This implementation is limited to a power of 2 number of
+ * threads per core and the same number for each core in the system
+ * (though it would work if some processors had less threads as long
+ * as the CPU numbers are still allocated, just not brought online).
+ *
+ * However, the API allows for a different implementation in the future
+ * if needed, as long as you only use the functions and not the variables
+ * directly.
+ */
+
+#ifdef CONFIG_SMP
+extern int threads_per_core;
+extern int threads_per_subcore;
+extern int threads_shift;
+extern cpumask_t threads_core_mask;
+#else
+#define threads_per_core 1
+#define threads_per_subcore 1
+#define threads_shift 0
+#define has_big_cores 0
+#define threads_core_mask (*get_cpu_mask(0))
+#endif
+
+static inline int cpu_nr_cores(void)
+{
+ return nr_cpu_ids >> threads_shift;
+}
+
+#ifdef CONFIG_SMP
+int cpu_core_index_of_thread(int cpu);
+int cpu_first_thread_of_core(int core);
+#else
+static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
+static inline int cpu_first_thread_of_core(int core) { return core; }
+#endif
+
+static inline int cpu_thread_in_core(int cpu)
+{
+ return cpu & (threads_per_core - 1);
+}
+
+static inline int cpu_thread_in_subcore(int cpu)
+{
+ return cpu & (threads_per_subcore - 1);
+}
+
+static inline int cpu_first_thread_sibling(int cpu)
+{
+ return cpu & ~(threads_per_core - 1);
+}
+
+static inline int cpu_last_thread_sibling(int cpu)
+{
+ return cpu | (threads_per_core - 1);
+}
+
+/*
+ * tlb_thread_siblings are siblings which share a TLB. This is not
+ * architected, is not something a hypervisor could emulate and a future
+ * CPU may change behaviour even in compat mode, so this should only be
+ * used on PowerNV, and only with care.
+ */
+static inline int cpu_first_tlb_thread_sibling(int cpu)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return cpu & ~0x6; /* Big Core */
+ else
+ return cpu_first_thread_sibling(cpu);
+}
+
+static inline int cpu_last_tlb_thread_sibling(int cpu)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return cpu | 0x6; /* Big Core */
+ else
+ return cpu_last_thread_sibling(cpu);
+}
+
+static inline int cpu_tlb_thread_sibling_step(void)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return 2; /* Big Core */
+ else
+ return 1;
+}
+
+static inline u32 get_tensr(void)
+{
+#ifdef CONFIG_BOOKE
+ if (cpu_has_feature(CPU_FTR_SMT))
+ return mfspr(SPRN_TENSR);
+#endif
+ return 1;
+}
+
+void book3e_start_thread(int thread, unsigned long addr);
+void book3e_stop_thread(int thread);
+
+#endif /* __ASSEMBLY__ */
+
+#define INVALID_THREAD_HWID 0x0fff
+
+#endif /* _ASM_POWERPC_CPUTHREADS_H */
+
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
new file mode 100644
index 000000000..431ae2343
--- /dev/null
+++ b/arch/powerpc/include/asm/cputime.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Definitions for measuring cputime on powerpc machines.
+ *
+ * Copyright (C) 2006 Paul Mackerras, IBM Corp.
+ *
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in
+ * the same units as the timebase. Otherwise we measure cpu time
+ * in jiffies using the generic definitions.
+ */
+
+#ifndef __POWERPC_CPUTIME_H
+#define __POWERPC_CPUTIME_H
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <asm/div64.h>
+#include <asm/time.h>
+#include <asm/param.h>
+#include <asm/firmware.h>
+
+typedef u64 __nocast cputime_t;
+typedef u64 __nocast cputime64_t;
+
+#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
+
+#ifdef __KERNEL__
+/*
+ * Convert cputime <-> microseconds
+ */
+extern u64 __cputime_usec_factor;
+
+static inline unsigned long cputime_to_usecs(const cputime_t ct)
+{
+ return mulhdu((__force u64) ct, __cputime_usec_factor);
+}
+
+#define cputime_to_nsecs(cputime) tb_to_ns((__force u64)cputime)
+
+/*
+ * PPC64 uses PACA which is task independent for storing accounting data while
+ * PPC32 uses struct thread_info, therefore at task switch the accounting data
+ * has to be populated in the new task
+ */
+#ifdef CONFIG_PPC64
+#define get_accounting(tsk) (&get_paca()->accounting)
+#define raw_get_accounting(tsk) (&local_paca->accounting)
+static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
+
+#else
+#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
+#define raw_get_accounting(tsk) get_accounting(tsk)
+/*
+ * Called from the context switch with interrupts disabled, to charge all
+ * accumulated times to the current process, and to prepare accounting on
+ * the next process.
+ */
+static inline void arch_vtime_task_switch(struct task_struct *prev)
+{
+ struct cpu_accounting_data *acct = get_accounting(current);
+ struct cpu_accounting_data *acct0 = get_accounting(prev);
+
+ acct->starttime = acct0->starttime;
+}
+#endif
+
+/*
+ * account_cpu_user_entry/exit runs "unreconciled", so can't trace,
+ * can't use get_paca()
+ */
+static notrace inline void account_cpu_user_entry(void)
+{
+ unsigned long tb = mftb();
+ struct cpu_accounting_data *acct = raw_get_accounting(current);
+
+ acct->utime += (tb - acct->starttime_user);
+ acct->starttime = tb;
+}
+
+static notrace inline void account_cpu_user_exit(void)
+{
+ unsigned long tb = mftb();
+ struct cpu_accounting_data *acct = raw_get_accounting(current);
+
+ acct->stime += (tb - acct->starttime);
+ acct->starttime_user = tb;
+}
+
+static notrace inline void account_stolen_time(void)
+{
+#ifdef CONFIG_PPC_SPLPAR
+ if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+ struct lppaca *lp = local_paca->lppaca_ptr;
+
+ if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx)))
+ pseries_accumulate_stolen_time();
+ }
+#endif
+}
+
+#endif /* __KERNEL__ */
+#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+static inline void account_cpu_user_entry(void)
+{
+}
+static inline void account_cpu_user_exit(void)
+{
+}
+static notrace inline void account_stolen_time(void)
+{
+}
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+#endif /* __POWERPC_CPUTIME_H */
diff --git a/arch/powerpc/include/asm/crashdump-ppc64.h b/arch/powerpc/include/asm/crashdump-ppc64.h
new file mode 100644
index 000000000..68d9717cc
--- /dev/null
+++ b/arch/powerpc/include/asm/crashdump-ppc64.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_POWERPC_CRASHDUMP_PPC64_H
+#define _ASM_POWERPC_CRASHDUMP_PPC64_H
+
+/*
+ * Backup region - first 64KB of System RAM
+ *
+ * If ever the below macros are to be changed, please be judicious.
+ * The implicit assumptions are:
+ * - start, end & size are less than UINT32_MAX.
+ * - start & size are at least 8 byte aligned.
+ *
+ * For implementation details: arch/powerpc/purgatory/trampoline_64.S
+ */
+#define BACKUP_SRC_START 0
+#define BACKUP_SRC_END 0xffff
+#define BACKUP_SRC_SIZE (BACKUP_SRC_END - BACKUP_SRC_START + 1)
+
+#endif /* __ASM_POWERPC_CRASHDUMP_PPC64_H */
diff --git a/arch/powerpc/include/asm/current.h b/arch/powerpc/include/asm/current.h
new file mode 100644
index 000000000..bbfb94800
--- /dev/null
+++ b/arch/powerpc/include/asm/current.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_CURRENT_H
+#define _ASM_POWERPC_CURRENT_H
+#ifdef __KERNEL__
+
+/*
+ */
+
+struct task_struct;
+
+#ifdef __powerpc64__
+#include <linux/stddef.h>
+#include <asm/paca.h>
+
+static inline struct task_struct *get_current(void)
+{
+ struct task_struct *task;
+
+ /* get_current can be cached by the compiler, so no volatile */
+ asm ("ld %0,%1(13)"
+ : "=r" (task)
+ : "i" (offsetof(struct paca_struct, __current)));
+
+ return task;
+}
+#define current get_current()
+
+#else
+
+/*
+ * We keep `current' in r2 for speed.
+ */
+register struct task_struct *current asm ("r2");
+
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_CURRENT_H */
diff --git a/arch/powerpc/include/asm/dbdma.h b/arch/powerpc/include/asm/dbdma.h
new file mode 100644
index 000000000..4785c1716
--- /dev/null
+++ b/arch/powerpc/include/asm/dbdma.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for using the Apple Descriptor-Based DMA controller
+ * in Power Macintosh computers.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ */
+
+#ifdef __KERNEL__
+#ifndef _ASM_DBDMA_H_
+#define _ASM_DBDMA_H_
+/*
+ * DBDMA control/status registers. All little-endian.
+ */
+struct dbdma_regs {
+ unsigned int control; /* lets you change bits in status */
+ unsigned int status; /* DMA and device status bits (see below) */
+ unsigned int cmdptr_hi; /* upper 32 bits of command address */
+ unsigned int cmdptr; /* (lower 32 bits of) command address (phys) */
+ unsigned int intr_sel; /* select interrupt condition bit */
+ unsigned int br_sel; /* select branch condition bit */
+ unsigned int wait_sel; /* select wait condition bit */
+ unsigned int xfer_mode;
+ unsigned int data2ptr_hi;
+ unsigned int data2ptr;
+ unsigned int res1;
+ unsigned int address_hi;
+ unsigned int br_addr_hi;
+ unsigned int res2[3];
+};
+
+/* Bits in control and status registers */
+#define RUN 0x8000
+#define PAUSE 0x4000
+#define FLUSH 0x2000
+#define WAKE 0x1000
+#define DEAD 0x0800
+#define ACTIVE 0x0400
+#define BT 0x0100
+#define DEVSTAT 0x00ff
+
+/*
+ * DBDMA command structure. These fields are all little-endian!
+ */
+struct dbdma_cmd {
+ __le16 req_count; /* requested byte transfer count */
+ __le16 command; /* command word (has bit-fields) */
+ __le32 phy_addr; /* physical data address */
+ __le32 cmd_dep; /* command-dependent field */
+ __le16 res_count; /* residual count after completion */
+ __le16 xfer_status; /* transfer status */
+};
+
+/* DBDMA command values in command field */
+#define OUTPUT_MORE 0 /* transfer memory data to stream */
+#define OUTPUT_LAST 0x1000 /* ditto followed by end marker */
+#define INPUT_MORE 0x2000 /* transfer stream data to memory */
+#define INPUT_LAST 0x3000 /* ditto, expect end marker */
+#define STORE_WORD 0x4000 /* write word (4 bytes) to device reg */
+#define LOAD_WORD 0x5000 /* read word (4 bytes) from device reg */
+#define DBDMA_NOP 0x6000 /* do nothing */
+#define DBDMA_STOP 0x7000 /* suspend processing */
+
+/* Key values in command field */
+#define KEY_STREAM0 0 /* usual data stream */
+#define KEY_STREAM1 0x100 /* control/status stream */
+#define KEY_STREAM2 0x200 /* device-dependent stream */
+#define KEY_STREAM3 0x300 /* device-dependent stream */
+#define KEY_REGS 0x500 /* device register space */
+#define KEY_SYSTEM 0x600 /* system memory-mapped space */
+#define KEY_DEVICE 0x700 /* device memory-mapped space */
+
+/* Interrupt control values in command field */
+#define INTR_NEVER 0 /* don't interrupt */
+#define INTR_IFSET 0x10 /* intr if condition bit is 1 */
+#define INTR_IFCLR 0x20 /* intr if condition bit is 0 */
+#define INTR_ALWAYS 0x30 /* always interrupt */
+
+/* Branch control values in command field */
+#define BR_NEVER 0 /* don't branch */
+#define BR_IFSET 0x4 /* branch if condition bit is 1 */
+#define BR_IFCLR 0x8 /* branch if condition bit is 0 */
+#define BR_ALWAYS 0xc /* always branch */
+
+/* Wait control values in command field */
+#define WAIT_NEVER 0 /* don't wait */
+#define WAIT_IFSET 1 /* wait if condition bit is 1 */
+#define WAIT_IFCLR 2 /* wait if condition bit is 0 */
+#define WAIT_ALWAYS 3 /* always wait */
+
+/* Align an address for a DBDMA command structure */
+#define DBDMA_ALIGN(x) (((unsigned long)(x) + sizeof(struct dbdma_cmd) - 1) \
+ & -sizeof(struct dbdma_cmd))
+
+/* Useful macros */
+#define DBDMA_DO_STOP(regs) do { \
+ out_le32(&((regs)->control), (RUN|FLUSH)<<16); \
+ while(in_le32(&((regs)->status)) & (ACTIVE|FLUSH)) \
+ ; \
+} while(0)
+
+#define DBDMA_DO_RESET(regs) do { \
+ out_le32(&((regs)->control), (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16);\
+ while(in_le32(&((regs)->status)) & (RUN)) \
+ ; \
+} while(0)
+
+#endif /* _ASM_DBDMA_H_ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
new file mode 100644
index 000000000..3e9da22a2
--- /dev/null
+++ b/arch/powerpc/include/asm/dbell.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2009 Freescale Semiconductor, Inc.
+ *
+ * provides masks and opcode images for use by code generation, emulation
+ * and for instructions that older assemblers might not know about
+ */
+#ifndef _ASM_POWERPC_DBELL_H
+#define _ASM_POWERPC_DBELL_H
+
+#include <linux/smp.h>
+#include <linux/threads.h>
+
+#include <asm/cputhreads.h>
+#include <asm/ppc-opcode.h>
+#include <asm/feature-fixups.h>
+#include <asm/kvm_ppc.h>
+
+#define PPC_DBELL_MSG_BRDCAST (0x04000000)
+#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
+#define PPC_DBELL_TYPE_MASK PPC_DBELL_TYPE(0xf)
+#define PPC_DBELL_LPID(x) ((x) << (63 - 49))
+#define PPC_DBELL_PIR_MASK 0x3fff
+enum ppc_dbell {
+ PPC_DBELL = 0, /* doorbell */
+ PPC_DBELL_CRIT = 1, /* critical doorbell */
+ PPC_G_DBELL = 2, /* guest doorbell */
+ PPC_G_DBELL_CRIT = 3, /* guest critical doorbell */
+ PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */
+ PPC_DBELL_SERVER = 5, /* doorbell on server */
+};
+
+#ifdef CONFIG_PPC_BOOK3S
+
+#define PPC_DBELL_MSGTYPE PPC_DBELL_SERVER
+
+static inline void _ppc_msgsnd(u32 msg)
+{
+ __asm__ __volatile__ (ASM_FTR_IFSET(PPC_MSGSND(%1), PPC_MSGSNDP(%1), %0)
+ : : "i" (CPU_FTR_HVMODE), "r" (msg));
+}
+
+/* sync before sending message */
+static inline void ppc_msgsnd_sync(void)
+{
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+
+/* sync after taking message interrupt */
+static inline void ppc_msgsync(void)
+{
+ /* sync is not required when taking messages from the same core */
+ __asm__ __volatile__ (ASM_FTR_IFSET(PPC_MSGSYNC " ; lwsync", "", %0)
+ : : "i" (CPU_FTR_HVMODE|CPU_FTR_ARCH_300));
+}
+
+static inline void _ppc_msgclr(u32 msg)
+{
+ __asm__ __volatile__ (ASM_FTR_IFSET(PPC_MSGCLR(%1), PPC_MSGCLRP(%1), %0)
+ : : "i" (CPU_FTR_HVMODE), "r" (msg));
+}
+
+static inline void ppc_msgclr(enum ppc_dbell type)
+{
+ u32 msg = PPC_DBELL_TYPE(type);
+
+ _ppc_msgclr(msg);
+}
+
+#else /* CONFIG_PPC_BOOK3S */
+
+#define PPC_DBELL_MSGTYPE PPC_DBELL
+
+static inline void _ppc_msgsnd(u32 msg)
+{
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+}
+
+/* sync before sending message */
+static inline void ppc_msgsnd_sync(void)
+{
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+
+/* sync after taking message interrupt */
+static inline void ppc_msgsync(void)
+{
+}
+
+#endif /* CONFIG_PPC_BOOK3S */
+
+extern void doorbell_exception(struct pt_regs *regs);
+
+static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
+{
+ u32 msg = PPC_DBELL_TYPE(type) | (flags & PPC_DBELL_MSG_BRDCAST) |
+ (tag & 0x07ffffff);
+
+ _ppc_msgsnd(msg);
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * Doorbells must only be used if CPU_FTR_DBELL is available.
+ * msgsnd is used in HV, and msgsndp is used in !HV.
+ *
+ * These should be used by platform code that is aware of restrictions.
+ * Other arch code should use ->cause_ipi.
+ *
+ * doorbell_global_ipi() sends a dbell to any target CPU.
+ * Must be used only by architectures that address msgsnd target
+ * by PIR/get_hard_smp_processor_id.
+ */
+static inline void doorbell_global_ipi(int cpu)
+{
+ u32 tag = get_hard_smp_processor_id(cpu);
+
+ kvmppc_set_host_ipi(cpu);
+ /* Order previous accesses vs. msgsnd, which is treated as a store */
+ ppc_msgsnd_sync();
+ ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
+}
+
+/*
+ * doorbell_core_ipi() sends a dbell to a target CPU in the same core.
+ * Must be used only by architectures that address msgsnd target
+ * by TIR/cpu_thread_in_core.
+ */
+static inline void doorbell_core_ipi(int cpu)
+{
+ u32 tag = cpu_thread_in_core(cpu);
+
+ kvmppc_set_host_ipi(cpu);
+ /* Order previous accesses vs. msgsnd, which is treated as a store */
+ ppc_msgsnd_sync();
+ ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
+}
+
+/*
+ * Attempt to cause a core doorbell if destination is on the same core.
+ * Returns 1 on success, 0 on failure.
+ */
+static inline int doorbell_try_core_ipi(int cpu)
+{
+ int this_cpu = get_cpu();
+ int ret = 0;
+
+ if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu))) {
+ doorbell_core_ipi(cpu);
+ ret = 1;
+ }
+
+ put_cpu();
+
+ return ret;
+}
+
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_POWERPC_DBELL_H */
diff --git a/arch/powerpc/include/asm/dcr-generic.h b/arch/powerpc/include/asm/dcr-generic.h
new file mode 100644
index 000000000..099c28dd4
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr-generic.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ */
+
+#ifndef _ASM_POWERPC_DCR_GENERIC_H
+#define _ASM_POWERPC_DCR_GENERIC_H
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+enum host_type_t {DCR_HOST_MMIO, DCR_HOST_NATIVE, DCR_HOST_INVALID};
+
+typedef struct {
+ enum host_type_t type;
+ union {
+ dcr_host_mmio_t mmio;
+ dcr_host_native_t native;
+ } host;
+} dcr_host_t;
+
+extern bool dcr_map_ok_generic(dcr_host_t host);
+
+extern dcr_host_t dcr_map_generic(struct device_node *dev, unsigned int dcr_n,
+ unsigned int dcr_c);
+extern void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c);
+
+extern u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n);
+
+extern void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_GENERIC_H */
+
+
diff --git a/arch/powerpc/include/asm/dcr-mmio.h b/arch/powerpc/include/asm/dcr-mmio.h
new file mode 100644
index 000000000..fc6d93ef4
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr-mmio.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ */
+
+#ifndef _ASM_POWERPC_DCR_MMIO_H
+#define _ASM_POWERPC_DCR_MMIO_H
+#ifdef __KERNEL__
+
+#include <asm/io.h>
+
+typedef struct {
+ void __iomem *token;
+ unsigned int stride;
+ unsigned int base;
+} dcr_host_mmio_t;
+
+static inline bool dcr_map_ok_mmio(dcr_host_mmio_t host)
+{
+ return host.token != NULL;
+}
+
+extern dcr_host_mmio_t dcr_map_mmio(struct device_node *dev,
+ unsigned int dcr_n,
+ unsigned int dcr_c);
+extern void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c);
+
+static inline u32 dcr_read_mmio(dcr_host_mmio_t host, unsigned int dcr_n)
+{
+ return in_be32(host.token + ((host.base + dcr_n) * host.stride));
+}
+
+static inline void dcr_write_mmio(dcr_host_mmio_t host,
+ unsigned int dcr_n,
+ u32 value)
+{
+ out_be32(host.token + ((host.base + dcr_n) * host.stride), value);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_MMIO_H */
+
+
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
new file mode 100644
index 000000000..a92059964
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ */
+
+#ifndef _ASM_POWERPC_DCR_NATIVE_H
+#define _ASM_POWERPC_DCR_NATIVE_H
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/spinlock.h>
+#include <asm/cputable.h>
+#include <asm/cpu_has_feature.h>
+#include <linux/stringify.h>
+
+typedef struct {
+ unsigned int base;
+} dcr_host_native_t;
+
+static inline bool dcr_map_ok_native(dcr_host_native_t host)
+{
+ return true;
+}
+
+#define dcr_map_native(dev, dcr_n, dcr_c) \
+ ((dcr_host_native_t){ .base = (dcr_n) })
+#define dcr_unmap_native(host, dcr_c) do {} while (0)
+#define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base)
+#define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value)
+
+/* Table based DCR accessors */
+extern void __mtdcr(unsigned int reg, unsigned int val);
+extern unsigned int __mfdcr(unsigned int reg);
+
+/* mfdcrx/mtdcrx instruction based accessors. We hand code
+ * the opcodes in order not to depend on newer binutils
+ */
+static inline unsigned int mfdcrx(unsigned int reg)
+{
+ unsigned int ret;
+ asm volatile(".long 0x7c000206 | (%0 << 21) | (%1 << 16)"
+ : "=r" (ret) : "r" (reg));
+ return ret;
+}
+
+static inline void mtdcrx(unsigned int reg, unsigned int val)
+{
+ asm volatile(".long 0x7c000306 | (%0 << 21) | (%1 << 16)"
+ : : "r" (val), "r" (reg));
+}
+
+#define mfdcr(rn) \
+ ({unsigned int rval; \
+ if (__builtin_constant_p(rn) && rn < 1024) \
+ asm volatile("mfdcr %0, %1" : "=r" (rval) \
+ : "n" (rn)); \
+ else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
+ rval = mfdcrx(rn); \
+ else \
+ rval = __mfdcr(rn); \
+ rval;})
+
+#define mtdcr(rn, v) \
+do { \
+ if (__builtin_constant_p(rn) && rn < 1024) \
+ asm volatile("mtdcr %0, %1" \
+ : : "n" (rn), "r" (v)); \
+ else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
+ mtdcrx(rn, v); \
+ else \
+ __mtdcr(rn, v); \
+} while (0)
+
+/* R/W of indirect DCRs make use of standard naming conventions for DCRs */
+extern spinlock_t dcr_ind_lock;
+
+static inline unsigned __mfdcri(int base_addr, int base_data, int reg)
+{
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(&dcr_ind_lock, flags);
+ if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
+ mtdcrx(base_addr, reg);
+ val = mfdcrx(base_data);
+ } else {
+ __mtdcr(base_addr, reg);
+ val = __mfdcr(base_data);
+ }
+ spin_unlock_irqrestore(&dcr_ind_lock, flags);
+ return val;
+}
+
+static inline void __mtdcri(int base_addr, int base_data, int reg,
+ unsigned val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dcr_ind_lock, flags);
+ if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
+ mtdcrx(base_addr, reg);
+ mtdcrx(base_data, val);
+ } else {
+ __mtdcr(base_addr, reg);
+ __mtdcr(base_data, val);
+ }
+ spin_unlock_irqrestore(&dcr_ind_lock, flags);
+}
+
+static inline void __dcri_clrset(int base_addr, int base_data, int reg,
+ unsigned clr, unsigned set)
+{
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(&dcr_ind_lock, flags);
+ if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
+ mtdcrx(base_addr, reg);
+ val = (mfdcrx(base_data) & ~clr) | set;
+ mtdcrx(base_data, val);
+ } else {
+ __mtdcr(base_addr, reg);
+ val = (__mfdcr(base_data) & ~clr) | set;
+ __mtdcr(base_data, val);
+ }
+ spin_unlock_irqrestore(&dcr_ind_lock, flags);
+}
+
+#define mfdcri(base, reg) __mfdcri(DCRN_ ## base ## _CONFIG_ADDR, \
+ DCRN_ ## base ## _CONFIG_DATA, \
+ reg)
+
+#define mtdcri(base, reg, data) __mtdcri(DCRN_ ## base ## _CONFIG_ADDR, \
+ DCRN_ ## base ## _CONFIG_DATA, \
+ reg, data)
+
+#define dcri_clrset(base, reg, clr, set) __dcri_clrset(DCRN_ ## base ## _CONFIG_ADDR, \
+ DCRN_ ## base ## _CONFIG_DATA, \
+ reg, clr, set)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_NATIVE_H */
diff --git a/arch/powerpc/include/asm/dcr-regs.h b/arch/powerpc/include/asm/dcr-regs.h
new file mode 100644
index 000000000..5c1a4973f
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr-regs.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common DCR / SDR / CPR register definitions used on various IBM/AMCC
+ * 4xx processors
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp
+ * <benh@kernel.crashing.org>
+ *
+ * Mostly lifted from asm-ppc/ibm4xx.h by
+ *
+ * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ *
+ */
+
+#ifndef __DCR_REGS_H__
+#define __DCR_REGS_H__
+
+/*
+ * Most DCRs used for controlling devices such as the MAL, DMA engine,
+ * etc... are obtained for the device tree.
+ *
+ * The definitions in this files are fixed DCRs and indirect DCRs that
+ * are commonly used outside of specific drivers or refer to core
+ * common registers that may occasionally have to be tweaked outside
+ * of the driver main register set
+ */
+
+/* CPRs (440GX and 440SP/440SPe) */
+#define DCRN_CPR0_CONFIG_ADDR 0xc
+#define DCRN_CPR0_CONFIG_DATA 0xd
+
+/* SDRs (440GX and 440SP/440SPe) */
+#define DCRN_SDR0_CONFIG_ADDR 0xe
+#define DCRN_SDR0_CONFIG_DATA 0xf
+
+#define SDR0_PFC0 0x4100
+#define SDR0_PFC1 0x4101
+#define SDR0_PFC1_EPS 0x1c00000
+#define SDR0_PFC1_EPS_SHIFT 22
+#define SDR0_PFC1_RMII 0x02000000
+#define SDR0_MFR 0x4300
+#define SDR0_MFR_TAH0 0x80000000 /* TAHOE0 Enable */
+#define SDR0_MFR_TAH1 0x40000000 /* TAHOE1 Enable */
+#define SDR0_MFR_PCM 0x10000000 /* PPC440GP irq compat mode */
+#define SDR0_MFR_ECS 0x08000000 /* EMAC int clk */
+#define SDR0_MFR_T0TXFL 0x00080000
+#define SDR0_MFR_T0TXFH 0x00040000
+#define SDR0_MFR_T1TXFL 0x00020000
+#define SDR0_MFR_T1TXFH 0x00010000
+#define SDR0_MFR_E0TXFL 0x00008000
+#define SDR0_MFR_E0TXFH 0x00004000
+#define SDR0_MFR_E0RXFL 0x00002000
+#define SDR0_MFR_E0RXFH 0x00001000
+#define SDR0_MFR_E1TXFL 0x00000800
+#define SDR0_MFR_E1TXFH 0x00000400
+#define SDR0_MFR_E1RXFL 0x00000200
+#define SDR0_MFR_E1RXFH 0x00000100
+#define SDR0_MFR_E2TXFL 0x00000080
+#define SDR0_MFR_E2TXFH 0x00000040
+#define SDR0_MFR_E2RXFL 0x00000020
+#define SDR0_MFR_E2RXFH 0x00000010
+#define SDR0_MFR_E3TXFL 0x00000008
+#define SDR0_MFR_E3TXFH 0x00000004
+#define SDR0_MFR_E3RXFL 0x00000002
+#define SDR0_MFR_E3RXFH 0x00000001
+#define SDR0_UART0 0x0120
+#define SDR0_UART1 0x0121
+#define SDR0_UART2 0x0122
+#define SDR0_UART3 0x0123
+#define SDR0_CUST0 0x4000
+
+/* SDR for 405EZ */
+#define DCRN_SDR_ICINTSTAT 0x4510
+#define ICINTSTAT_ICRX 0x80000000
+#define ICINTSTAT_ICTX0 0x40000000
+#define ICINTSTAT_ICTX1 0x20000000
+#define ICINTSTAT_ICTX 0x60000000
+
+/* SDRs (460EX/460GT) */
+#define SDR0_ETH_CFG 0x4103
+#define SDR0_ETH_CFG_ECS 0x00000100 /* EMAC int clk source */
+
+/*
+ * All those DCR register addresses are offsets from the base address
+ * for the SRAM0 controller (e.g. 0x20 on 440GX). The base address is
+ * excluded here and configured in the device tree.
+ */
+#define DCRN_SRAM0_SB0CR 0x00
+#define DCRN_SRAM0_SB1CR 0x01
+#define DCRN_SRAM0_SB2CR 0x02
+#define DCRN_SRAM0_SB3CR 0x03
+#define SRAM_SBCR_BU_MASK 0x00000180
+#define SRAM_SBCR_BS_64KB 0x00000800
+#define SRAM_SBCR_BU_RO 0x00000080
+#define SRAM_SBCR_BU_RW 0x00000180
+#define DCRN_SRAM0_BEAR 0x04
+#define DCRN_SRAM0_BESR0 0x05
+#define DCRN_SRAM0_BESR1 0x06
+#define DCRN_SRAM0_PMEG 0x07
+#define DCRN_SRAM0_CID 0x08
+#define DCRN_SRAM0_REVID 0x09
+#define DCRN_SRAM0_DPC 0x0a
+#define SRAM_DPC_ENABLE 0x80000000
+
+/*
+ * All those DCR register addresses are offsets from the base address
+ * for the SRAM0 controller (e.g. 0x30 on 440GX). The base address is
+ * excluded here and configured in the device tree.
+ */
+#define DCRN_L2C0_CFG 0x00
+#define L2C_CFG_L2M 0x80000000
+#define L2C_CFG_ICU 0x40000000
+#define L2C_CFG_DCU 0x20000000
+#define L2C_CFG_DCW_MASK 0x1e000000
+#define L2C_CFG_TPC 0x01000000
+#define L2C_CFG_CPC 0x00800000
+#define L2C_CFG_FRAN 0x00200000
+#define L2C_CFG_SS_MASK 0x00180000
+#define L2C_CFG_SS_256 0x00000000
+#define L2C_CFG_CPIM 0x00040000
+#define L2C_CFG_TPIM 0x00020000
+#define L2C_CFG_LIM 0x00010000
+#define L2C_CFG_PMUX_MASK 0x00007000
+#define L2C_CFG_PMUX_SNP 0x00000000
+#define L2C_CFG_PMUX_IF 0x00001000
+#define L2C_CFG_PMUX_DF 0x00002000
+#define L2C_CFG_PMUX_DS 0x00003000
+#define L2C_CFG_PMIM 0x00000800
+#define L2C_CFG_TPEI 0x00000400
+#define L2C_CFG_CPEI 0x00000200
+#define L2C_CFG_NAM 0x00000100
+#define L2C_CFG_SMCM 0x00000080
+#define L2C_CFG_NBRM 0x00000040
+#define L2C_CFG_RDBW 0x00000008 /* only 460EX/GT */
+#define DCRN_L2C0_CMD 0x01
+#define L2C_CMD_CLR 0x80000000
+#define L2C_CMD_DIAG 0x40000000
+#define L2C_CMD_INV 0x20000000
+#define L2C_CMD_CCP 0x10000000
+#define L2C_CMD_CTE 0x08000000
+#define L2C_CMD_STRC 0x04000000
+#define L2C_CMD_STPC 0x02000000
+#define L2C_CMD_RPMC 0x01000000
+#define L2C_CMD_HCC 0x00800000
+#define DCRN_L2C0_ADDR 0x02
+#define DCRN_L2C0_DATA 0x03
+#define DCRN_L2C0_SR 0x04
+#define L2C_SR_CC 0x80000000
+#define L2C_SR_CPE 0x40000000
+#define L2C_SR_TPE 0x20000000
+#define L2C_SR_LRU 0x10000000
+#define L2C_SR_PCS 0x08000000
+#define DCRN_L2C0_REVID 0x05
+#define DCRN_L2C0_SNP0 0x06
+#define DCRN_L2C0_SNP1 0x07
+#define L2C_SNP_BA_MASK 0xffff0000
+#define L2C_SNP_SSR_MASK 0x0000f000
+#define L2C_SNP_SSR_32G 0x0000f000
+#define L2C_SNP_ESR 0x00000800
+
+/*
+ * DCR register offsets for 440SP/440SPe I2O/DMA controller.
+ * The base address is configured in the device tree.
+ */
+#define DCRN_I2O0_IBAL 0x006
+#define DCRN_I2O0_IBAH 0x007
+#define I2O_REG_ENABLE 0x00000001 /* Enable I2O/DMA access */
+
+/* 440SP/440SPe Software Reset DCR */
+#define DCRN_SDR0_SRST 0x0200
+#define DCRN_SDR0_SRST_I2ODMA (0x80000000 >> 15) /* Reset I2O/DMA */
+
+/* 440SP/440SPe Memory Queue DCR offsets */
+#define DCRN_MQ0_XORBA 0x04
+#define DCRN_MQ0_CF2H 0x06
+#define DCRN_MQ0_CFBHL 0x0f
+#define DCRN_MQ0_BAUH 0x10
+
+/* HB/LL Paths Configuration Register */
+#define MQ0_CFBHL_TPLM 28
+#define MQ0_CFBHL_HBCL 23
+#define MQ0_CFBHL_POLY 15
+
+#endif /* __DCR_REGS_H__ */
diff --git a/arch/powerpc/include/asm/dcr.h b/arch/powerpc/include/asm/dcr.h
new file mode 100644
index 000000000..64030e3a1
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ */
+
+#ifndef _ASM_POWERPC_DCR_H
+#define _ASM_POWERPC_DCR_H
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_PPC_DCR
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+#include <asm/dcr-native.h>
+#endif
+
+#ifdef CONFIG_PPC_DCR_MMIO
+#include <asm/dcr-mmio.h>
+#endif
+
+
+/* Indirection layer for providing both NATIVE and MMIO support. */
+
+#if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO)
+
+#include <asm/dcr-generic.h>
+
+#define DCR_MAP_OK(host) dcr_map_ok_generic(host)
+#define dcr_map(dev, dcr_n, dcr_c) dcr_map_generic(dev, dcr_n, dcr_c)
+#define dcr_unmap(host, dcr_c) dcr_unmap_generic(host, dcr_c)
+#define dcr_read(host, dcr_n) dcr_read_generic(host, dcr_n)
+#define dcr_write(host, dcr_n, value) dcr_write_generic(host, dcr_n, value)
+
+#else
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+typedef dcr_host_native_t dcr_host_t;
+#define DCR_MAP_OK(host) dcr_map_ok_native(host)
+#define dcr_map(dev, dcr_n, dcr_c) dcr_map_native(dev, dcr_n, dcr_c)
+#define dcr_unmap(host, dcr_c) dcr_unmap_native(host, dcr_c)
+#define dcr_read(host, dcr_n) dcr_read_native(host, dcr_n)
+#define dcr_write(host, dcr_n, value) dcr_write_native(host, dcr_n, value)
+#else
+typedef dcr_host_mmio_t dcr_host_t;
+#define DCR_MAP_OK(host) dcr_map_ok_mmio(host)
+#define dcr_map(dev, dcr_n, dcr_c) dcr_map_mmio(dev, dcr_n, dcr_c)
+#define dcr_unmap(host, dcr_c) dcr_unmap_mmio(host, dcr_c)
+#define dcr_read(host, dcr_n) dcr_read_mmio(host, dcr_n)
+#define dcr_write(host, dcr_n, value) dcr_write_mmio(host, dcr_n, value)
+#endif
+
+#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */
+
+/*
+ * additional helpers to read the DCR * base from the device-tree
+ */
+struct device_node;
+extern unsigned int dcr_resource_start(const struct device_node *np,
+ unsigned int index);
+extern unsigned int dcr_resource_len(const struct device_node *np,
+ unsigned int index);
+#endif /* CONFIG_PPC_DCR */
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_H */
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
new file mode 100644
index 000000000..86a14736c
--- /dev/null
+++ b/arch/powerpc/include/asm/debug.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_DEBUG_H
+#define _ASM_POWERPC_DEBUG_H
+
+#include <asm/hw_breakpoint.h>
+
+struct pt_regs;
+
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
+
+extern int (*__debugger)(struct pt_regs *regs);
+extern int (*__debugger_ipi)(struct pt_regs *regs);
+extern int (*__debugger_bpt)(struct pt_regs *regs);
+extern int (*__debugger_sstep)(struct pt_regs *regs);
+extern int (*__debugger_iabr_match)(struct pt_regs *regs);
+extern int (*__debugger_break_match)(struct pt_regs *regs);
+extern int (*__debugger_fault_handler)(struct pt_regs *regs);
+
+#define DEBUGGER_BOILERPLATE(__NAME) \
+static inline int __NAME(struct pt_regs *regs) \
+{ \
+ if (unlikely(__ ## __NAME)) \
+ return __ ## __NAME(regs); \
+ return 0; \
+}
+
+DEBUGGER_BOILERPLATE(debugger)
+DEBUGGER_BOILERPLATE(debugger_ipi)
+DEBUGGER_BOILERPLATE(debugger_bpt)
+DEBUGGER_BOILERPLATE(debugger_sstep)
+DEBUGGER_BOILERPLATE(debugger_iabr_match)
+DEBUGGER_BOILERPLATE(debugger_break_match)
+DEBUGGER_BOILERPLATE(debugger_fault_handler)
+
+#else
+static inline int debugger(struct pt_regs *regs) { return 0; }
+static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
+static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
+static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
+static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_break_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
+#endif
+
+void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk);
+bool ppc_breakpoint_available(void);
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+extern void do_send_trap(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code, int brkpt);
+#endif
+
+#endif /* _ASM_POWERPC_DEBUG_H */
diff --git a/arch/powerpc/include/asm/delay.h b/arch/powerpc/include/asm/delay.h
new file mode 100644
index 000000000..51bb8c147
--- /dev/null
+++ b/arch/powerpc/include/asm/delay.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_DELAY_H
+#define _ASM_POWERPC_DELAY_H
+#ifdef __KERNEL__
+
+#include <linux/processor.h>
+#include <asm/time.h>
+
+/*
+ * Copyright 1996, Paul Mackerras.
+ * Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * PPC64 Support added by Dave Engebretsen, Todd Inglett, Mike Corrigan,
+ * Anton Blanchard.
+ */
+
+extern void __delay(unsigned long loops);
+extern void udelay(unsigned long usecs);
+
+/*
+ * On shared processor machines the generic implementation of mdelay can
+ * result in large errors. While each iteration of the loop inside mdelay
+ * is supposed to take 1ms, the hypervisor could sleep our partition for
+ * longer (eg 10ms). With the right timing these errors can add up.
+ *
+ * Since there is no 32bit overflow issue on 64bit kernels, just call
+ * udelay directly.
+ */
+#ifdef CONFIG_PPC64
+#define mdelay(n) udelay((n) * 1000)
+#endif
+
+/**
+ * spin_event_timeout - spin until a condition gets true or a timeout elapses
+ * @condition: a C expression to evalate
+ * @timeout: timeout, in microseconds
+ * @delay: the number of microseconds to delay between each evaluation of
+ * @condition
+ *
+ * The process spins until the condition evaluates to true (non-zero) or the
+ * timeout elapses. The return value of this macro is the value of
+ * @condition when the loop terminates. This allows you to determine the cause
+ * of the loop terminates. If the return value is zero, then you know a
+ * timeout has occurred.
+ *
+ * This primary purpose of this macro is to poll on a hardware register
+ * until a status bit changes. The timeout ensures that the loop still
+ * terminates even if the bit never changes. The delay is for devices that
+ * need a delay in between successive reads.
+ *
+ * gcc will optimize out the if-statement if @delay is a constant.
+ */
+#define spin_event_timeout(condition, timeout, delay) \
+({ \
+ typeof(condition) __ret; \
+ unsigned long __loops = tb_ticks_per_usec * timeout; \
+ unsigned long __start = mftb(); \
+ \
+ if (delay) { \
+ while (!(__ret = (condition)) && \
+ (tb_ticks_since(__start) <= __loops)) \
+ udelay(delay); \
+ } else { \
+ spin_begin(); \
+ while (!(__ret = (condition)) && \
+ (tb_ticks_since(__start) <= __loops)) \
+ spin_cpu_relax(); \
+ spin_end(); \
+ } \
+ if (!__ret) \
+ __ret = (condition); \
+ __ret; \
+})
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DELAY_H */
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
new file mode 100644
index 000000000..47ed639f3
--- /dev/null
+++ b/arch/powerpc/include/asm/device.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Arch specific extensions to struct device
+ */
+#ifndef _ASM_POWERPC_DEVICE_H
+#define _ASM_POWERPC_DEVICE_H
+
+struct device_node;
+#ifdef CONFIG_PPC64
+struct pci_dn;
+struct iommu_table;
+#endif
+
+/*
+ * Arch extensions to struct device.
+ *
+ * When adding fields, consider macio_add_one_device in
+ * drivers/macintosh/macio_asic.c
+ */
+struct dev_archdata {
+ /*
+ * These two used to be a union. However, with the hybrid ops we need
+ * both so here we store both a DMA offset for direct mappings and
+ * an iommu_table for remapped DMA.
+ */
+ dma_addr_t dma_offset;
+
+#ifdef CONFIG_PPC64
+ struct iommu_table *iommu_table_base;
+#endif
+
+#ifdef CONFIG_PPC64
+ struct pci_dn *pci_data;
+#endif
+#ifdef CONFIG_EEH
+ struct eeh_dev *edev;
+#endif
+#ifdef CONFIG_FAIL_IOMMU
+ int fail_iommu;
+#endif
+#ifdef CONFIG_CXL_BASE
+ struct cxl_context *cxl_ctx;
+#endif
+#ifdef CONFIG_PCI_IOV
+ void *iov_data;
+#endif
+};
+
+struct pdev_archdata {
+ u64 dma_mask;
+ /*
+ * Pointer to nvdimm_pmu structure, to handle the unregistering
+ * of pmu device
+ */
+ void *priv;
+};
+
+#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h
new file mode 100644
index 000000000..8d2ebc36d
--- /dev/null
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __ASM_PPC_DISASSEMBLE_H__
+#define __ASM_PPC_DISASSEMBLE_H__
+
+#include <linux/types.h>
+
+static inline unsigned int get_op(u32 inst)
+{
+ return inst >> 26;
+}
+
+static inline unsigned int get_xop(u32 inst)
+{
+ return (inst >> 1) & 0x3ff;
+}
+
+static inline unsigned int get_sprn(u32 inst)
+{
+ return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_dcrn(u32 inst)
+{
+ return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_tmrn(u32 inst)
+{
+ return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_rt(u32 inst)
+{
+ return (inst >> 21) & 0x1f;
+}
+
+static inline unsigned int get_rs(u32 inst)
+{
+ return (inst >> 21) & 0x1f;
+}
+
+static inline unsigned int get_ra(u32 inst)
+{
+ return (inst >> 16) & 0x1f;
+}
+
+static inline unsigned int get_rb(u32 inst)
+{
+ return (inst >> 11) & 0x1f;
+}
+
+static inline unsigned int get_rc(u32 inst)
+{
+ return inst & 0x1;
+}
+
+static inline unsigned int get_ws(u32 inst)
+{
+ return (inst >> 11) & 0x1f;
+}
+
+static inline unsigned int get_d(u32 inst)
+{
+ return inst & 0xffff;
+}
+
+static inline unsigned int get_oc(u32 inst)
+{
+ return (inst >> 11) & 0x7fff;
+}
+
+static inline unsigned int get_tx_or_sx(u32 inst)
+{
+ return (inst) & 0x1;
+}
+
+#define IS_XFORM(inst) (get_op(inst) == 31)
+#define IS_DSFORM(inst) (get_op(inst) >= 56)
+
+/*
+ * Create a DSISR value from the instruction
+ */
+static inline unsigned make_dsisr(unsigned instr)
+{
+ unsigned dsisr;
+
+
+ /* bits 6:15 --> 22:31 */
+ dsisr = (instr & 0x03ff0000) >> 16;
+
+ if (IS_XFORM(instr)) {
+ /* bits 29:30 --> 15:16 */
+ dsisr |= (instr & 0x00000006) << 14;
+ /* bit 25 --> 17 */
+ dsisr |= (instr & 0x00000040) << 8;
+ /* bits 21:24 --> 18:21 */
+ dsisr |= (instr & 0x00000780) << 3;
+ } else {
+ /* bit 5 --> 17 */
+ dsisr |= (instr & 0x04000000) >> 12;
+ /* bits 1: 4 --> 18:21 */
+ dsisr |= (instr & 0x78000000) >> 17;
+ /* bits 30:31 --> 12:13 */
+ if (IS_DSFORM(instr))
+ dsisr |= (instr & 0x00000003) << 18;
+ }
+
+ return dsisr;
+}
+#endif /* __ASM_PPC_DISASSEMBLE_H__ */
diff --git a/arch/powerpc/include/asm/dma-direct.h b/arch/powerpc/include/asm/dma-direct.h
new file mode 100644
index 000000000..128304cbe
--- /dev/null
+++ b/arch/powerpc/include/asm/dma-direct.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_POWERPC_DMA_DIRECT_H
+#define ASM_POWERPC_DMA_DIRECT_H 1
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr + dev->archdata.dma_offset;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return daddr - dev->archdata.dma_offset;
+}
+#endif /* ASM_POWERPC_DMA_DIRECT_H */
diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h
new file mode 100644
index 000000000..d97c66d9a
--- /dev/null
+++ b/arch/powerpc/include/asm/dma.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_DMA_H
+#define _ASM_POWERPC_DMA_H
+#ifdef __KERNEL__
+
+/*
+ * Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ * Changes for ppc sound by Christoph Nadig
+ */
+
+/*
+ * Note: Adapted for PowerPC by Gary Thomas
+ * Modified by Cort Dougan <cort@cs.nmt.edu>
+ *
+ * None of this really applies for Power Macintoshes. There is
+ * basically just enough here to get kernel/dma.c to compile.
+ */
+
+#include <asm/io.h>
+#include <linux/spinlock.h>
+
+#ifndef MAX_DMA_CHANNELS
+#define MAX_DMA_CHANNELS 8
+#endif
+
+/* The maximum address that we can perform a DMA transfer to on this platform */
+/* Doesn't really apply... */
+#define MAX_DMA_ADDRESS (~0UL)
+
+#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
+#define dma_outb outb_p
+#else
+#define dma_outb outb
+#endif
+
+#define dma_inb inb
+
+/*
+ * NOTES about DMA transfers:
+ *
+ * controller 1: channels 0-3, byte operations, ports 00-1F
+ * controller 2: channels 4-7, word operations, ports C0-DF
+ *
+ * - ALL registers are 8 bits only, regardless of transfer size
+ * - channel 4 is not used - cascades 1 into 2.
+ * - channels 0-3 are byte - addresses/counts are for physical bytes
+ * - channels 5-7 are word - addresses/counts are for physical words
+ * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
+ * - transfer count loaded to registers is 1 less than actual count
+ * - controller 2 offsets are all even (2x offsets for controller 1)
+ * - page registers for 5-7 don't use data bit 0, represent 128K pages
+ * - page registers for 0-3 use bit 0, represent 64K pages
+ *
+ * On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing.
+ * Note that addresses loaded into registers must be _physical_ addresses,
+ * not logical addresses (which may differ if paging is active).
+ *
+ * Address mapping for channels 0-3:
+ *
+ * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * P7 ... P0 A7 ... A0 A7 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Address mapping for channels 5-7:
+ *
+ * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
+ * | ... | \ \ ... \ \ \ ... \ \
+ * | ... | \ \ ... \ \ \ ... \ (not used)
+ * | ... | \ \ ... \ \ \ ... \
+ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
+ * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
+ * the hardware level, so odd-byte transfers aren't possible).
+ *
+ * Transfer count (_not # bytes_) is limited to 64K, represented as actual
+ * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
+ * and up to 128K bytes may be transferred on channels 5-7 in one operation.
+ *
+ */
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG 0x08 /* command register (w) */
+#define DMA1_STAT_REG 0x08 /* status register (r) */
+#define DMA1_REQ_REG 0x09 /* request register (w) */
+#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
+#define DMA1_MODE_REG 0x0B /* mode register (w) */
+#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
+#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
+#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
+#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
+
+#define DMA2_CMD_REG 0xD0 /* command register (w) */
+#define DMA2_STAT_REG 0xD0 /* status register (r) */
+#define DMA2_REQ_REG 0xD2 /* request register (w) */
+#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
+#define DMA2_MODE_REG 0xD6 /* mode register (w) */
+#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
+#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
+#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
+#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
+
+#define DMA_ADDR_0 0x00 /* DMA address registers */
+#define DMA_ADDR_1 0x02
+#define DMA_ADDR_2 0x04
+#define DMA_ADDR_3 0x06
+#define DMA_ADDR_4 0xC0
+#define DMA_ADDR_5 0xC4
+#define DMA_ADDR_6 0xC8
+#define DMA_ADDR_7 0xCC
+
+#define DMA_CNT_0 0x01 /* DMA count registers */
+#define DMA_CNT_1 0x03
+#define DMA_CNT_2 0x05
+#define DMA_CNT_3 0x07
+#define DMA_CNT_4 0xC2
+#define DMA_CNT_5 0xC6
+#define DMA_CNT_6 0xCA
+#define DMA_CNT_7 0xCE
+
+#define DMA_LO_PAGE_0 0x87 /* DMA page registers */
+#define DMA_LO_PAGE_1 0x83
+#define DMA_LO_PAGE_2 0x81
+#define DMA_LO_PAGE_3 0x82
+#define DMA_LO_PAGE_5 0x8B
+#define DMA_LO_PAGE_6 0x89
+#define DMA_LO_PAGE_7 0x8A
+
+#define DMA_HI_PAGE_0 0x487 /* DMA page registers */
+#define DMA_HI_PAGE_1 0x483
+#define DMA_HI_PAGE_2 0x481
+#define DMA_HI_PAGE_3 0x482
+#define DMA_HI_PAGE_5 0x48B
+#define DMA_HI_PAGE_6 0x489
+#define DMA_HI_PAGE_7 0x48A
+
+#define DMA1_EXT_REG 0x40B
+#define DMA2_EXT_REG 0x4D6
+
+#ifndef __powerpc64__
+ /* in arch/powerpc/kernel/setup_32.c -- Cort */
+ extern unsigned int DMA_MODE_WRITE;
+ extern unsigned int DMA_MODE_READ;
+#else
+ #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
+ #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
+#endif
+
+#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+#define DMA_AUTOINIT 0x10
+
+extern spinlock_t dma_spin_lock;
+
+static __inline__ unsigned long claim_dma_lock(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dma_spin_lock, flags);
+ return flags;
+}
+
+static __inline__ void release_dma_lock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+ unsigned char ucDmaCmd = 0x00;
+
+ if (dmanr != 4) {
+ dma_outb(0, DMA2_MASK_REG); /* This may not be enabled */
+ dma_outb(ucDmaCmd, DMA2_CMD_REG); /* Enable group */
+ }
+ if (dmanr <= 3) {
+ dma_outb(dmanr, DMA1_MASK_REG);
+ dma_outb(ucDmaCmd, DMA1_CMD_REG); /* Enable group */
+ } else {
+ dma_outb(dmanr & 3, DMA2_MASK_REG);
+ }
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+ if (dmanr <= 3)
+ dma_outb(dmanr | 4, DMA1_MASK_REG);
+ else
+ dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while interrupts are disabled! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+ if (dmanr <= 3)
+ dma_outb(0, DMA1_CLEAR_FF_REG);
+ else
+ dma_outb(0, DMA2_CLEAR_FF_REG);
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+ if (dmanr <= 3)
+ dma_outb(mode | dmanr, DMA1_MODE_REG);
+ else
+ dma_outb(mode | (dmanr & 3), DMA2_MODE_REG);
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register, but a 64k boundary
+ * may have been crossed.
+ */
+static __inline__ void set_dma_page(unsigned int dmanr, int pagenr)
+{
+ switch (dmanr) {
+ case 0:
+ dma_outb(pagenr, DMA_LO_PAGE_0);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_0);
+ break;
+ case 1:
+ dma_outb(pagenr, DMA_LO_PAGE_1);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_1);
+ break;
+ case 2:
+ dma_outb(pagenr, DMA_LO_PAGE_2);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_2);
+ break;
+ case 3:
+ dma_outb(pagenr, DMA_LO_PAGE_3);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_3);
+ break;
+ case 5:
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_5);
+ break;
+ case 6:
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_6);
+ break;
+ case 7:
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_7);
+ break;
+ }
+}
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
+{
+ if (dmanr <= 3) {
+ dma_outb(phys & 0xff,
+ ((dmanr & 3) << 1) + IO_DMA1_BASE);
+ dma_outb((phys >> 8) & 0xff,
+ ((dmanr & 3) << 1) + IO_DMA1_BASE);
+ } else {
+ dma_outb((phys >> 1) & 0xff,
+ ((dmanr & 3) << 2) + IO_DMA2_BASE);
+ dma_outb((phys >> 9) & 0xff,
+ ((dmanr & 3) << 2) + IO_DMA2_BASE);
+ }
+ set_dma_page(dmanr, phys >> 16);
+}
+
+
+/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ count--;
+ if (dmanr <= 3) {
+ dma_outb(count & 0xff,
+ ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+ dma_outb((count >> 8) & 0xff,
+ ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+ } else {
+ dma_outb((count >> 1) & 0xff,
+ ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+ dma_outb((count >> 9) & 0xff,
+ ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+ }
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+ unsigned int io_port = (dmanr <= 3)
+ ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
+ : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
+
+ /* using short to get 16-bit wrap around */
+ unsigned short count;
+
+ count = 1 + dma_inb(io_port);
+ count += dma_inb(io_port) << 8;
+
+ return (dmanr <= 3) ? count : (count << 1);
+}
+
+/* These are in kernel/dma.c: */
+
+/* reserve a DMA channel */
+extern int request_dma(unsigned int dmanr, const char *device_id);
+/* release it again */
+extern void free_dma(unsigned int dmanr);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DMA_H */
diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
new file mode 100644
index 000000000..13bf6dee8
--- /dev/null
+++ b/arch/powerpc/include/asm/drmem.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * drmem.h: Power specific logical memory block representation
+ *
+ * Copyright 2017 IBM Corporation
+ */
+
+#ifndef _ASM_POWERPC_LMB_H
+#define _ASM_POWERPC_LMB_H
+
+#include <linux/sched.h>
+
+struct drmem_lmb {
+ u64 base_addr;
+ u32 drc_index;
+ u32 aa_index;
+ u32 flags;
+};
+
+struct drmem_lmb_info {
+ struct drmem_lmb *lmbs;
+ int n_lmbs;
+ u64 lmb_size;
+};
+
+struct device_node;
+struct property;
+
+extern struct drmem_lmb_info *drmem_info;
+
+static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
+ const struct drmem_lmb *start)
+{
+ /*
+ * DLPAR code paths can take several milliseconds per element
+ * when interacting with firmware. Ensure that we don't
+ * unfairly monopolize the CPU.
+ */
+ if (((++lmb - start) % 16) == 0)
+ cond_resched();
+
+ return lmb;
+}
+
+#define for_each_drmem_lmb_in_range(lmb, start, end) \
+ for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
+
+#define for_each_drmem_lmb(lmb) \
+ for_each_drmem_lmb_in_range((lmb), \
+ &drmem_info->lmbs[0], \
+ &drmem_info->lmbs[drmem_info->n_lmbs])
+
+/*
+ * The of_drconf_cell_v1 struct defines the layout of the LMB data
+ * specified in the ibm,dynamic-memory device tree property.
+ * The property itself is a 32-bit value specifying the number of
+ * LMBs followed by an array of of_drconf_cell_v1 entries, one
+ * per LMB.
+ */
+struct of_drconf_cell_v1 {
+ __be64 base_addr;
+ __be32 drc_index;
+ __be32 reserved;
+ __be32 aa_index;
+ __be32 flags;
+};
+
+/*
+ * Version 2 of the ibm,dynamic-memory property is defined as a
+ * 32-bit value specifying the number of LMB sets followed by an
+ * array of of_drconf_cell_v2 entries, one per LMB set.
+ */
+struct of_drconf_cell_v2 {
+ u32 seq_lmbs;
+ u64 base_addr;
+ u32 drc_index;
+ u32 aa_index;
+ u32 flags;
+} __packed;
+
+#define DRCONF_MEM_ASSIGNED 0x00000008
+#define DRCONF_MEM_AI_INVALID 0x00000040
+#define DRCONF_MEM_RESERVED 0x00000080
+#define DRCONF_MEM_HOTREMOVABLE 0x00000100
+
+static inline u64 drmem_lmb_size(void)
+{
+ return drmem_info->lmb_size;
+}
+
+#define DRMEM_LMB_RESERVED 0x80000000
+
+static inline void drmem_mark_lmb_reserved(struct drmem_lmb *lmb)
+{
+ lmb->flags |= DRMEM_LMB_RESERVED;
+}
+
+static inline void drmem_remove_lmb_reservation(struct drmem_lmb *lmb)
+{
+ lmb->flags &= ~DRMEM_LMB_RESERVED;
+}
+
+static inline bool drmem_lmb_reserved(struct drmem_lmb *lmb)
+{
+ return lmb->flags & DRMEM_LMB_RESERVED;
+}
+
+u64 drmem_lmb_memory_max(void);
+int walk_drmem_lmbs(struct device_node *dn, void *data,
+ int (*func)(struct drmem_lmb *, const __be32 **, void *));
+int drmem_update_dt(void);
+
+#ifdef CONFIG_PPC_PSERIES
+int __init
+walk_drmem_lmbs_early(unsigned long node, void *data,
+ int (*func)(struct drmem_lmb *, const __be32 **, void *));
+void drmem_update_lmbs(struct property *prop);
+#endif
+
+static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
+{
+ lmb->aa_index = 0xffffffff;
+}
+
+#endif /* _ASM_POWERPC_LMB_H */
diff --git a/arch/powerpc/include/asm/dt_cpu_ftrs.h b/arch/powerpc/include/asm/dt_cpu_ftrs.h
new file mode 100644
index 000000000..0c729e2d0
--- /dev/null
+++ b/arch/powerpc/include/asm/dt_cpu_ftrs.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_DT_CPU_FTRS_H
+#define __ASM_POWERPC_DT_CPU_FTRS_H
+
+/*
+ * Copyright 2017, IBM Corporation
+ * cpufeatures is the new way to discover CPU features with /cpus/features
+ * devicetree. This supersedes PVR based discovery ("cputable"), and older
+ * device tree feature advertisement.
+ */
+
+#include <linux/types.h>
+#include <uapi/asm/cputable.h>
+
+#ifdef CONFIG_PPC_DT_CPU_FTRS
+bool dt_cpu_ftrs_init(void *fdt);
+void dt_cpu_ftrs_scan(void);
+bool dt_cpu_ftrs_in_use(void);
+#else
+static inline bool dt_cpu_ftrs_init(void *fdt) { return false; }
+static inline void dt_cpu_ftrs_scan(void) { }
+static inline bool dt_cpu_ftrs_in_use(void) { return false; }
+#endif
+
+#endif /* __ASM_POWERPC_DT_CPU_FTRS_H */
diff --git a/arch/powerpc/include/asm/dtl.h b/arch/powerpc/include/asm/dtl.h
new file mode 100644
index 000000000..4bcb9f9ac
--- /dev/null
+++ b/arch/powerpc/include/asm/dtl.h
@@ -0,0 +1,44 @@
+#ifndef _ASM_POWERPC_DTL_H
+#define _ASM_POWERPC_DTL_H
+
+#include <asm/lppaca.h>
+#include <linux/spinlock_types.h>
+
+/*
+ * Layout of entries in the hypervisor's dispatch trace log buffer.
+ */
+struct dtl_entry {
+ u8 dispatch_reason;
+ u8 preempt_reason;
+ __be16 processor_id;
+ __be32 enqueue_to_dispatch_time;
+ __be32 ready_to_enqueue_time;
+ __be32 waiting_to_ready_time;
+ __be64 timebase;
+ __be64 fault_addr;
+ __be64 srr0;
+ __be64 srr1;
+};
+
+#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
+#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
+
+/*
+ * Dispatch trace log event enable mask:
+ * 0x1: voluntary virtual processor waits
+ * 0x2: time-slice preempts
+ * 0x4: virtual partition memory page faults
+ */
+#define DTL_LOG_CEDE 0x1
+#define DTL_LOG_PREEMPT 0x2
+#define DTL_LOG_FAULT 0x4
+#define DTL_LOG_ALL (DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT)
+
+extern struct kmem_cache *dtl_cache;
+extern rwlock_t dtl_access_lock;
+
+extern void register_dtl_buffer(int cpu);
+extern void alloc_dtl_buffers(unsigned long *time_limit);
+extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
+
+#endif /* _ASM_POWERPC_DTL_H */
diff --git a/arch/powerpc/include/asm/edac.h b/arch/powerpc/include/asm/edac.h
new file mode 100644
index 000000000..5571e23d2
--- /dev/null
+++ b/arch/powerpc/include/asm/edac.h
@@ -0,0 +1,40 @@
+/*
+ * PPC EDAC common defs
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+/*
+ * ECC atomic, DMA, SMP and interrupt safe scrub function.
+ * Implements the per arch edac_atomic_scrub() that EDAC use for software
+ * ECC scrubbing. It reads memory and then writes back the original
+ * value, allowing the hardware to detect and correct memory errors.
+ */
+static __inline__ void edac_atomic_scrub(void *va, u32 size)
+{
+ unsigned int *virt_addr = va;
+ unsigned int temp;
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
+ /* Very carefully read and write to memory atomically
+ * so we are interrupt, DMA and SMP safe.
+ */
+ __asm__ __volatile__ ("\n\
+ 1: lwarx %0,0,%1\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b\n\
+ isync"
+ : "=&r"(temp)
+ : "r"(virt_addr)
+ : "cr0", "memory");
+ }
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
new file mode 100644
index 000000000..514dd056c
--- /dev/null
+++ b/arch/powerpc/include/asm/eeh.h
@@ -0,0 +1,461 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation.
+ * Copyright 2001-2012 IBM Corporation.
+ */
+
+#ifndef _POWERPC_EEH_H
+#define _POWERPC_EEH_H
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/atomic.h>
+
+#include <uapi/asm/eeh.h>
+
+struct pci_dev;
+struct pci_bus;
+struct pci_dn;
+
+#ifdef CONFIG_EEH
+
+/* EEH subsystem flags */
+#define EEH_ENABLED 0x01 /* EEH enabled */
+#define EEH_FORCE_DISABLED 0x02 /* EEH disabled */
+#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */
+#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */
+#define EEH_ENABLE_IO_FOR_LOG 0x20 /* Enable IO for log */
+#define EEH_EARLY_DUMP_LOG 0x40 /* Dump log immediately */
+
+/*
+ * Delay for PE reset, all in ms
+ *
+ * PCI specification has reset hold time of 100 milliseconds.
+ * We have 250 milliseconds here. The PCI bus settlement time
+ * is specified as 1.5 seconds and we have 1.8 seconds.
+ */
+#define EEH_PE_RST_HOLD_TIME 250
+#define EEH_PE_RST_SETTLE_TIME 1800
+
+/*
+ * The struct is used to trace PE related EEH functionality.
+ * In theory, there will have one instance of the struct to
+ * be created against particular PE. In nature, PEs correlate
+ * to each other. the struct has to reflect that hierarchy in
+ * order to easily pick up those affected PEs when one particular
+ * PE has EEH errors.
+ *
+ * Also, one particular PE might be composed of PCI device, PCI
+ * bus and its subordinate components. The struct also need ship
+ * the information. Further more, one particular PE is only meaingful
+ * in the corresponding PHB. Therefore, the root PEs should be created
+ * against existing PHBs in on-to-one fashion.
+ */
+#define EEH_PE_INVALID (1 << 0) /* Invalid */
+#define EEH_PE_PHB (1 << 1) /* PHB PE */
+#define EEH_PE_DEVICE (1 << 2) /* Device PE */
+#define EEH_PE_BUS (1 << 3) /* Bus PE */
+#define EEH_PE_VF (1 << 4) /* VF PE */
+
+#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */
+#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
+#define EEH_PE_CFG_BLOCKED (1 << 2) /* Block config access */
+#define EEH_PE_RESET (1 << 3) /* PE reset in progress */
+
+#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
+#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
+#define EEH_PE_REMOVED (1 << 10) /* Removed permanently */
+#define EEH_PE_PRI_BUS (1 << 11) /* Cached primary bus */
+
+struct eeh_pe {
+ int type; /* PE type: PHB/Bus/Device */
+ int state; /* PE EEH dependent mode */
+ int addr; /* PE configuration address */
+ struct pci_controller *phb; /* Associated PHB */
+ struct pci_bus *bus; /* Top PCI bus for bus PE */
+ int check_count; /* Times of ignored error */
+ int freeze_count; /* Times of froze up */
+ time64_t tstamp; /* Time on first-time freeze */
+ int false_positives; /* Times of reported #ff's */
+ atomic_t pass_dev_cnt; /* Count of passed through devs */
+ struct eeh_pe *parent; /* Parent PE */
+ void *data; /* PE auxillary data */
+ struct list_head child_list; /* List of PEs below this PE */
+ struct list_head child; /* Memb. child_list/eeh_phb_pe */
+ struct list_head edevs; /* List of eeh_dev in this PE */
+
+#ifdef CONFIG_STACKTRACE
+ /*
+ * Saved stack trace. When we find a PE freeze in eeh_dev_check_failure
+ * the stack trace is saved here so we can print it in the recovery
+ * thread if it turns out to due to a real problem rather than
+ * a hot-remove.
+ *
+ * A max of 64 entries might be overkill, but it also might not be.
+ */
+ unsigned long stack_trace[64];
+ int trace_entries;
+#endif /* CONFIG_STACKTRACE */
+};
+
+#define eeh_pe_for_each_dev(pe, edev, tmp) \
+ list_for_each_entry_safe(edev, tmp, &pe->edevs, entry)
+
+#define eeh_for_each_pe(root, pe) \
+ for (pe = root; pe; pe = eeh_pe_next(pe, root))
+
+static inline bool eeh_pe_passed(struct eeh_pe *pe)
+{
+ return pe ? !!atomic_read(&pe->pass_dev_cnt) : false;
+}
+
+/*
+ * The struct is used to trace EEH state for the associated
+ * PCI device node or PCI device. In future, it might
+ * represent PE as well so that the EEH device to form
+ * another tree except the currently existing tree of PCI
+ * buses and PCI devices
+ */
+#define EEH_DEV_BRIDGE (1 << 0) /* PCI bridge */
+#define EEH_DEV_ROOT_PORT (1 << 1) /* PCIe root port */
+#define EEH_DEV_DS_PORT (1 << 2) /* Downstream port */
+#define EEH_DEV_IRQ_DISABLED (1 << 3) /* Interrupt disabled */
+#define EEH_DEV_DISCONNECTED (1 << 4) /* Removing from PE */
+
+#define EEH_DEV_NO_HANDLER (1 << 8) /* No error handler */
+#define EEH_DEV_SYSFS (1 << 9) /* Sysfs created */
+#define EEH_DEV_REMOVED (1 << 10) /* Removed permanently */
+
+struct eeh_dev {
+ int mode; /* EEH mode */
+ int bdfn; /* bdfn of device (for cfg ops) */
+ struct pci_controller *controller;
+ int pe_config_addr; /* PE config address */
+ u32 config_space[16]; /* Saved PCI config space */
+ int pcix_cap; /* Saved PCIx capability */
+ int pcie_cap; /* Saved PCIe capability */
+ int aer_cap; /* Saved AER capability */
+ int af_cap; /* Saved AF capability */
+ struct eeh_pe *pe; /* Associated PE */
+ struct list_head entry; /* Membership in eeh_pe.edevs */
+ struct list_head rmv_entry; /* Membership in rmv_list */
+ struct pci_dn *pdn; /* Associated PCI device node */
+ struct pci_dev *pdev; /* Associated PCI device */
+ bool in_error; /* Error flag for edev */
+
+ /* VF specific properties */
+ struct pci_dev *physfn; /* Associated SRIOV PF */
+ int vf_index; /* Index of this VF */
+};
+
+/* "fmt" must be a simple literal string */
+#define EEH_EDEV_PRINT(level, edev, fmt, ...) \
+ pr_##level("PCI %04x:%02x:%02x.%x#%04x: EEH: " fmt, \
+ (edev)->controller->global_number, PCI_BUSNO((edev)->bdfn), \
+ PCI_SLOT((edev)->bdfn), PCI_FUNC((edev)->bdfn), \
+ ((edev)->pe ? (edev)->pe_config_addr : 0xffff), ##__VA_ARGS__)
+#define eeh_edev_dbg(edev, fmt, ...) EEH_EDEV_PRINT(debug, (edev), fmt, ##__VA_ARGS__)
+#define eeh_edev_info(edev, fmt, ...) EEH_EDEV_PRINT(info, (edev), fmt, ##__VA_ARGS__)
+#define eeh_edev_warn(edev, fmt, ...) EEH_EDEV_PRINT(warn, (edev), fmt, ##__VA_ARGS__)
+#define eeh_edev_err(edev, fmt, ...) EEH_EDEV_PRINT(err, (edev), fmt, ##__VA_ARGS__)
+
+static inline struct pci_dn *eeh_dev_to_pdn(struct eeh_dev *edev)
+{
+ return edev ? edev->pdn : NULL;
+}
+
+static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
+{
+ return edev ? edev->pdev : NULL;
+}
+
+static inline struct eeh_pe *eeh_dev_to_pe(struct eeh_dev* edev)
+{
+ return edev ? edev->pe : NULL;
+}
+
+/* Return values from eeh_ops::next_error */
+enum {
+ EEH_NEXT_ERR_NONE = 0,
+ EEH_NEXT_ERR_INF,
+ EEH_NEXT_ERR_FROZEN_PE,
+ EEH_NEXT_ERR_FENCED_PHB,
+ EEH_NEXT_ERR_DEAD_PHB,
+ EEH_NEXT_ERR_DEAD_IOC
+};
+
+/*
+ * The struct is used to trace the registered EEH operation
+ * callback functions. Actually, those operation callback
+ * functions are heavily platform dependent. That means the
+ * platform should register its own EEH operation callback
+ * functions before any EEH further operations.
+ */
+#define EEH_OPT_DISABLE 0 /* EEH disable */
+#define EEH_OPT_ENABLE 1 /* EEH enable */
+#define EEH_OPT_THAW_MMIO 2 /* MMIO enable */
+#define EEH_OPT_THAW_DMA 3 /* DMA enable */
+#define EEH_OPT_FREEZE_PE 4 /* Freeze PE */
+#define EEH_STATE_UNAVAILABLE (1 << 0) /* State unavailable */
+#define EEH_STATE_NOT_SUPPORT (1 << 1) /* EEH not supported */
+#define EEH_STATE_RESET_ACTIVE (1 << 2) /* Active reset */
+#define EEH_STATE_MMIO_ACTIVE (1 << 3) /* Active MMIO */
+#define EEH_STATE_DMA_ACTIVE (1 << 4) /* Active DMA */
+#define EEH_STATE_MMIO_ENABLED (1 << 5) /* MMIO enabled */
+#define EEH_STATE_DMA_ENABLED (1 << 6) /* DMA enabled */
+#define EEH_RESET_DEACTIVATE 0 /* Deactivate the PE reset */
+#define EEH_RESET_HOT 1 /* Hot reset */
+#define EEH_RESET_FUNDAMENTAL 3 /* Fundamental reset */
+#define EEH_LOG_TEMP 1 /* EEH temporary error log */
+#define EEH_LOG_PERM 2 /* EEH permanent error log */
+
+struct eeh_ops {
+ char *name;
+ struct eeh_dev *(*probe)(struct pci_dev *pdev);
+ int (*set_option)(struct eeh_pe *pe, int option);
+ int (*get_state)(struct eeh_pe *pe, int *delay);
+ int (*reset)(struct eeh_pe *pe, int option);
+ int (*get_log)(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len);
+ int (*configure_bridge)(struct eeh_pe *pe);
+ int (*err_inject)(struct eeh_pe *pe, int type, int func,
+ unsigned long addr, unsigned long mask);
+ int (*read_config)(struct eeh_dev *edev, int where, int size, u32 *val);
+ int (*write_config)(struct eeh_dev *edev, int where, int size, u32 val);
+ int (*next_error)(struct eeh_pe **pe);
+ int (*restore_config)(struct eeh_dev *edev);
+ int (*notify_resume)(struct eeh_dev *edev);
+};
+
+extern int eeh_subsystem_flags;
+extern u32 eeh_max_freezes;
+extern bool eeh_debugfs_no_recover;
+extern struct eeh_ops *eeh_ops;
+extern raw_spinlock_t confirm_error_lock;
+
+static inline void eeh_add_flag(int flag)
+{
+ eeh_subsystem_flags |= flag;
+}
+
+static inline void eeh_clear_flag(int flag)
+{
+ eeh_subsystem_flags &= ~flag;
+}
+
+static inline bool eeh_has_flag(int flag)
+{
+ return !!(eeh_subsystem_flags & flag);
+}
+
+static inline bool eeh_enabled(void)
+{
+ return eeh_has_flag(EEH_ENABLED) && !eeh_has_flag(EEH_FORCE_DISABLED);
+}
+
+static inline void eeh_serialize_lock(unsigned long *flags)
+{
+ raw_spin_lock_irqsave(&confirm_error_lock, *flags);
+}
+
+static inline void eeh_serialize_unlock(unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
+}
+
+static inline bool eeh_state_active(int state)
+{
+ return (state & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE))
+ == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
+}
+
+typedef void (*eeh_edev_traverse_func)(struct eeh_dev *edev, void *flag);
+typedef void *(*eeh_pe_traverse_func)(struct eeh_pe *pe, void *flag);
+void eeh_set_pe_aux_size(int size);
+int eeh_phb_pe_create(struct pci_controller *phb);
+int eeh_wait_state(struct eeh_pe *pe, int max_wait);
+struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb);
+struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root);
+struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no);
+int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent);
+int eeh_pe_tree_remove(struct eeh_dev *edev);
+void eeh_pe_update_time_stamp(struct eeh_pe *pe);
+void *eeh_pe_traverse(struct eeh_pe *root,
+ eeh_pe_traverse_func fn, void *flag);
+void eeh_pe_dev_traverse(struct eeh_pe *root,
+ eeh_edev_traverse_func fn, void *flag);
+void eeh_pe_restore_bars(struct eeh_pe *pe);
+const char *eeh_pe_loc_get(struct eeh_pe *pe);
+struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
+
+void eeh_show_enabled(void);
+int __init eeh_init(struct eeh_ops *ops);
+int eeh_check_failure(const volatile void __iomem *token);
+int eeh_dev_check_failure(struct eeh_dev *edev);
+void eeh_addr_cache_init(void);
+void eeh_probe_device(struct pci_dev *pdev);
+void eeh_remove_device(struct pci_dev *);
+int eeh_unfreeze_pe(struct eeh_pe *pe);
+int eeh_pe_reset_and_recover(struct eeh_pe *pe);
+int eeh_dev_open(struct pci_dev *pdev);
+void eeh_dev_release(struct pci_dev *pdev);
+struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group);
+int eeh_pe_set_option(struct eeh_pe *pe, int option);
+int eeh_pe_get_state(struct eeh_pe *pe);
+int eeh_pe_reset(struct eeh_pe *pe, int option, bool include_passed);
+int eeh_pe_configure(struct eeh_pe *pe);
+int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
+ unsigned long addr, unsigned long mask);
+
+/**
+ * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
+ *
+ * If this macro yields TRUE, the caller relays to eeh_check_failure()
+ * which does further tests out of line.
+ */
+#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_enabled())
+
+/*
+ * Reads from a device which has been isolated by EEH will return
+ * all 1s. This macro gives an all-1s value of the given size (in
+ * bytes: 1, 2, or 4) for comparing with the result of a read.
+ */
+#define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8))
+
+#else /* !CONFIG_EEH */
+
+static inline bool eeh_enabled(void)
+{
+ return false;
+}
+
+static inline void eeh_show_enabled(void) { }
+
+static inline int eeh_check_failure(const volatile void __iomem *token)
+{
+ return 0;
+}
+
+#define eeh_dev_check_failure(x) (0)
+
+static inline void eeh_addr_cache_init(void) { }
+
+static inline void eeh_probe_device(struct pci_dev *dev) { }
+
+static inline void eeh_remove_device(struct pci_dev *dev) { }
+
+#define EEH_POSSIBLE_ERROR(val, type) (0)
+#define EEH_IO_ERROR_VALUE(size) (-1UL)
+static inline int eeh_phb_pe_create(struct pci_controller *phb) { return 0; }
+#endif /* CONFIG_EEH */
+
+#if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_EEH)
+void pseries_eeh_init_edev_recursive(struct pci_dn *pdn);
+#endif
+
+#ifdef CONFIG_PPC64
+/*
+ * MMIO read/write operations with EEH support.
+ */
+static inline u8 eeh_readb(const volatile void __iomem *addr)
+{
+ u8 val = in_8(addr);
+ if (EEH_POSSIBLE_ERROR(val, u8))
+ eeh_check_failure(addr);
+ return val;
+}
+
+static inline u16 eeh_readw(const volatile void __iomem *addr)
+{
+ u16 val = in_le16(addr);
+ if (EEH_POSSIBLE_ERROR(val, u16))
+ eeh_check_failure(addr);
+ return val;
+}
+
+static inline u32 eeh_readl(const volatile void __iomem *addr)
+{
+ u32 val = in_le32(addr);
+ if (EEH_POSSIBLE_ERROR(val, u32))
+ eeh_check_failure(addr);
+ return val;
+}
+
+static inline u64 eeh_readq(const volatile void __iomem *addr)
+{
+ u64 val = in_le64(addr);
+ if (EEH_POSSIBLE_ERROR(val, u64))
+ eeh_check_failure(addr);
+ return val;
+}
+
+static inline u16 eeh_readw_be(const volatile void __iomem *addr)
+{
+ u16 val = in_be16(addr);
+ if (EEH_POSSIBLE_ERROR(val, u16))
+ eeh_check_failure(addr);
+ return val;
+}
+
+static inline u32 eeh_readl_be(const volatile void __iomem *addr)
+{
+ u32 val = in_be32(addr);
+ if (EEH_POSSIBLE_ERROR(val, u32))
+ eeh_check_failure(addr);
+ return val;
+}
+
+static inline u64 eeh_readq_be(const volatile void __iomem *addr)
+{
+ u64 val = in_be64(addr);
+ if (EEH_POSSIBLE_ERROR(val, u64))
+ eeh_check_failure(addr);
+ return val;
+}
+
+static inline void eeh_memcpy_fromio(void *dest, const
+ volatile void __iomem *src,
+ unsigned long n)
+{
+ _memcpy_fromio(dest, src, n);
+
+ /* Look for ffff's here at dest[n]. Assume that at least 4 bytes
+ * were copied. Check all four bytes.
+ */
+ if (n >= 4 && EEH_POSSIBLE_ERROR(*((u32 *)(dest + n - 4)), u32))
+ eeh_check_failure(src);
+}
+
+/* in-string eeh macros */
+static inline void eeh_readsb(const volatile void __iomem *addr, void * buf,
+ int ns)
+{
+ _insb(addr, buf, ns);
+ if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8))
+ eeh_check_failure(addr);
+}
+
+static inline void eeh_readsw(const volatile void __iomem *addr, void * buf,
+ int ns)
+{
+ _insw(addr, buf, ns);
+ if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16))
+ eeh_check_failure(addr);
+}
+
+static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
+ int nl)
+{
+ _insl(addr, buf, nl);
+ if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32))
+ eeh_check_failure(addr);
+}
+
+
+void __init eeh_cache_debugfs_init(void);
+
+#endif /* CONFIG_PPC64 */
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_EEH_H */
diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h
new file mode 100644
index 000000000..dadde7d52
--- /dev/null
+++ b/arch/powerpc/include/asm/eeh_event.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *
+ * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
+ */
+
+#ifndef ASM_POWERPC_EEH_EVENT_H
+#define ASM_POWERPC_EEH_EVENT_H
+#ifdef __KERNEL__
+
+/*
+ * structure holding pci controller data that describes a
+ * change in the isolation status of a PCI slot. A pointer
+ * to this struct is passed as the data pointer in a notify
+ * callback.
+ */
+struct eeh_event {
+ struct list_head list; /* to form event queue */
+ struct eeh_pe *pe; /* EEH PE */
+};
+
+int eeh_event_init(void);
+int eeh_send_failure_event(struct eeh_pe *pe);
+int __eeh_send_failure_event(struct eeh_pe *pe);
+void eeh_remove_event(struct eeh_pe *pe, bool force);
+void eeh_handle_normal_event(struct eeh_pe *pe);
+void eeh_handle_special_event(void);
+
+#endif /* __KERNEL__ */
+#endif /* ASM_POWERPC_EEH_EVENT_H */
diff --git a/arch/powerpc/include/asm/ehv_pic.h b/arch/powerpc/include/asm/ehv_pic.h
new file mode 100644
index 000000000..dc7d48e3e
--- /dev/null
+++ b/arch/powerpc/include/asm/ehv_pic.h
@@ -0,0 +1,40 @@
+/*
+ * EHV_PIC private definitions and structure.
+ *
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef __EHV_PIC_H__
+#define __EHV_PIC_H__
+
+#include <linux/irq.h>
+
+#define NR_EHV_PIC_INTS 1024
+
+#define EHV_PIC_INFO(name) EHV_PIC_##name
+
+#define EHV_PIC_VECPRI_POLARITY_NEGATIVE 0
+#define EHV_PIC_VECPRI_POLARITY_POSITIVE 1
+#define EHV_PIC_VECPRI_SENSE_EDGE 0
+#define EHV_PIC_VECPRI_SENSE_LEVEL 0x2
+#define EHV_PIC_VECPRI_POLARITY_MASK 0x1
+#define EHV_PIC_VECPRI_SENSE_MASK 0x2
+
+struct ehv_pic {
+ /* The remapper for this EHV_PIC */
+ struct irq_domain *irqhost;
+
+ /* The "linux" controller struct */
+ struct irq_chip hc_irq;
+
+ /* core int flag */
+ int coreint_flag;
+};
+
+void ehv_pic_init(void);
+unsigned int ehv_pic_get_irq(void);
+
+#endif /* __EHV_PIC_H__ */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
new file mode 100644
index 000000000..79f1c480b
--- /dev/null
+++ b/arch/powerpc/include/asm/elf.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * ELF register definitions..
+ */
+#ifndef _ASM_POWERPC_ELF_H
+#define _ASM_POWERPC_ELF_H
+
+#include <linux/sched.h> /* for task_struct */
+#include <asm/page.h>
+#include <asm/string.h>
+#include <uapi/asm/elf.h>
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
+#define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC)
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \
+ 0x100000000UL)
+
+#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
+
+/*
+ * Our registers are always unsigned longs, whether we're a 32 bit
+ * process or 64 bit, on either a 64 bit or 32 bit kernel.
+ *
+ * This macro relies on elf_regs[i] having the right type to truncate to,
+ * either u32 or u64. It defines the body of the elf_core_copy_regs
+ * function, either the native one with elf_gregset_t elf_regs or
+ * the 32-bit one with elf_gregset_t32 elf_regs.
+ */
+#define PPC_ELF_CORE_COPY_REGS(elf_regs, regs) \
+ int i, nregs = min(sizeof(*regs) / sizeof(unsigned long), \
+ (size_t)ELF_NGREG); \
+ for (i = 0; i < nregs; i++) \
+ elf_regs[i] = ((unsigned long *) regs)[i]; \
+ memset(&elf_regs[i], 0, (ELF_NGREG - i) * sizeof(elf_regs[0]))
+
+/* Common routine for both 32-bit and 64-bit native processes */
+static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
+ struct pt_regs *regs)
+{
+ PPC_ELF_CORE_COPY_REGS(elf_regs, regs);
+}
+#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
+
+/* ELF_HWCAP yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This could be done in userspace,
+ but it's not easy, and we've already done it here. */
+# define ELF_HWCAP (cur_cpu_spec->cpu_user_features)
+# define ELF_HWCAP2 (cur_cpu_spec->cpu_user_features2)
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo. */
+
+#define ELF_PLATFORM (cur_cpu_spec->platform)
+
+/* While ELF_PLATFORM indicates the ISA supported by the platform, it
+ * may not accurately reflect the underlying behavior of the hardware
+ * (as in the case of running in Power5+ compatibility mode on a
+ * Power6 machine). ELF_BASE_PLATFORM allows ld.so to load libraries
+ * that are tuned for the real hardware.
+ */
+#define ELF_BASE_PLATFORM (powerpc_base_platform)
+
+#ifdef __powerpc64__
+# define ELF_PLAT_INIT(_r, load_addr) do { \
+ _r->gpr[2] = load_addr; \
+} while (0)
+#endif /* __powerpc64__ */
+
+#ifdef __powerpc64__
+# define SET_PERSONALITY(ex) \
+do { \
+ if (((ex).e_flags & 0x3) == 2) \
+ set_thread_flag(TIF_ELF2ABI); \
+ else \
+ clear_thread_flag(TIF_ELF2ABI); \
+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ set_thread_flag(TIF_32BIT); \
+ else \
+ clear_thread_flag(TIF_32BIT); \
+ if (personality(current->personality) != PER_LINUX32) \
+ set_personality(PER_LINUX | \
+ (current->personality & (~PER_MASK))); \
+} while (0)
+/*
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically. This
+ * is only required to work around bugs in old 32bit toolchains. Since
+ * the 64bit ABI has never had these issues dont enable the workaround
+ * even if we have an executable stack.
+ */
+# define elf_read_implies_exec(ex, exec_stk) (is_32bit_task() ? \
+ (exec_stk == EXSTACK_DEFAULT) : 0)
+#else
+# define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT)
+#endif /* __powerpc64__ */
+
+extern int dcache_bsize;
+extern int icache_bsize;
+extern int ucache_bsize;
+
+/* vDSO has arch_setup_additional_pages */
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b)
+
+/* 1GB for 64bit, 8MB for 32bit */
+#define STACK_RND_MASK (is_32bit_task() ? \
+ (0x7ff >> (PAGE_SHIFT - 12)) : \
+ (0x3ffff >> (PAGE_SHIFT - 12)))
+
+#ifdef CONFIG_SPU_BASE
+/* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
+#define NT_SPU 1
+
+#define ARCH_HAVE_EXTRA_ELF_NOTES
+
+#endif /* CONFIG_SPU_BASE */
+
+#ifdef CONFIG_PPC64
+
+#define get_cache_geometry(level) \
+ (ppc64_caches.level.assoc << 16 | ppc64_caches.level.line_size)
+
+#define ARCH_DLINFO_CACHE_GEOMETRY \
+ NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size); \
+ NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i)); \
+ NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size); \
+ NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d)); \
+ NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size); \
+ NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2)); \
+ NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size); \
+ NEW_AUX_ENT(AT_L3_CACHEGEOMETRY, get_cache_geometry(l3))
+
+#else
+#define ARCH_DLINFO_CACHE_GEOMETRY
+#endif
+
+/*
+ * The requirements here are:
+ * - keep the final alignment of sp (sp & 0xf)
+ * - make sure the 32-bit value at the first 16 byte aligned position of
+ * AUXV is greater than 16 for glibc compatibility.
+ * AT_IGNOREPPC is used for that.
+ * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
+ * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
+ * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes
+ */
+#define COMMON_ARCH_DLINFO \
+do { \
+ /* Handle glibc compatibility. */ \
+ NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
+ NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
+ /* Cache size items */ \
+ NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
+ NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
+ NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
+ VDSO_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long)current->mm->context.vdso);\
+ ARCH_DLINFO_CACHE_GEOMETRY; \
+} while (0)
+
+#define ARCH_DLINFO \
+do { \
+ COMMON_ARCH_DLINFO; \
+ NEW_AUX_ENT(AT_MINSIGSTKSZ, get_min_sigframe_size()); \
+} while (0)
+
+#define COMPAT_ARCH_DLINFO \
+do { \
+ COMMON_ARCH_DLINFO; \
+ NEW_AUX_ENT(AT_MINSIGSTKSZ, get_min_sigframe_size_compat()); \
+} while (0)
+
+/* Relocate the kernel image to @final_address */
+void relocate(unsigned long final_address);
+
+struct func_desc {
+ unsigned long addr;
+ unsigned long toc;
+ unsigned long env;
+};
+
+#endif /* _ASM_POWERPC_ELF_H */
diff --git a/arch/powerpc/include/asm/elfnote.h b/arch/powerpc/include/asm/elfnote.h
new file mode 100644
index 000000000..a201b6e9a
--- /dev/null
+++ b/arch/powerpc/include/asm/elfnote.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PowerPC ELF notes.
+ *
+ * Copyright 2019, IBM Corporation
+ */
+
+#ifndef __ASM_POWERPC_ELFNOTE_H__
+#define __ASM_POWERPC_ELFNOTE_H__
+
+/*
+ * These note types should live in a SHT_NOTE segment and have
+ * "PowerPC" in the name field.
+ */
+
+/*
+ * The capabilities supported/required by this kernel (bitmap).
+ *
+ * This type uses a bitmap as "desc" field. Each bit is described
+ * in arch/powerpc/kernel/note.S
+ */
+#define PPC_ELFNOTE_CAPABILITIES 1
+
+#endif /* __ASM_POWERPC_ELFNOTE_H__ */
diff --git a/arch/powerpc/include/asm/emergency-restart.h b/arch/powerpc/include/asm/emergency-restart.h
new file mode 100644
index 000000000..3711bd9d5
--- /dev/null
+++ b/arch/powerpc/include/asm/emergency-restart.h
@@ -0,0 +1 @@
+#include <asm-generic/emergency-restart.h>
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
new file mode 100644
index 000000000..800cb2100
--- /dev/null
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2007 Sony Corporation
+ */
+
+#ifndef _ASM_POWERPC_EMULATED_OPS_H
+#define _ASM_POWERPC_EMULATED_OPS_H
+
+#include <linux/atomic.h>
+#include <linux/perf_event.h>
+
+
+#ifdef CONFIG_PPC_EMULATED_STATS
+
+struct ppc_emulated_entry {
+ const char *name;
+ atomic_t val;
+};
+
+extern struct ppc_emulated {
+#ifdef CONFIG_ALTIVEC
+ struct ppc_emulated_entry altivec;
+#endif
+ struct ppc_emulated_entry dcba;
+ struct ppc_emulated_entry dcbz;
+ struct ppc_emulated_entry fp_pair;
+ struct ppc_emulated_entry isel;
+ struct ppc_emulated_entry mcrxr;
+ struct ppc_emulated_entry mfpvr;
+ struct ppc_emulated_entry multiple;
+ struct ppc_emulated_entry popcntb;
+ struct ppc_emulated_entry spe;
+ struct ppc_emulated_entry string;
+ struct ppc_emulated_entry sync;
+ struct ppc_emulated_entry unaligned;
+#ifdef CONFIG_MATH_EMULATION
+ struct ppc_emulated_entry math;
+#endif
+#ifdef CONFIG_VSX
+ struct ppc_emulated_entry vsx;
+#endif
+#ifdef CONFIG_PPC64
+ struct ppc_emulated_entry mfdscr;
+ struct ppc_emulated_entry mtdscr;
+ struct ppc_emulated_entry lq_stq;
+ struct ppc_emulated_entry lxvw4x;
+ struct ppc_emulated_entry lxvh8x;
+ struct ppc_emulated_entry lxvd2x;
+ struct ppc_emulated_entry lxvb16x;
+#endif
+} ppc_emulated;
+
+extern u32 ppc_warn_emulated;
+
+extern void ppc_warn_emulated_print(const char *type);
+
+#define __PPC_WARN_EMULATED(type) \
+ do { \
+ atomic_inc(&ppc_emulated.type.val); \
+ if (ppc_warn_emulated) \
+ ppc_warn_emulated_print(ppc_emulated.type.name); \
+ } while (0)
+
+#else /* !CONFIG_PPC_EMULATED_STATS */
+
+#define __PPC_WARN_EMULATED(type) do { } while (0)
+
+#endif /* !CONFIG_PPC_EMULATED_STATS */
+
+#define PPC_WARN_EMULATED(type, regs) \
+ do { \
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \
+ 1, regs, 0); \
+ __PPC_WARN_EMULATED(type); \
+ } while (0)
+
+#define PPC_WARN_ALIGNMENT(type, regs) \
+ do { \
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \
+ 1, regs, regs->dar); \
+ __PPC_WARN_EMULATED(type); \
+ } while (0)
+
+#endif /* _ASM_POWERPC_EMULATED_OPS_H */
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
new file mode 100644
index 000000000..cdf3c6df5
--- /dev/null
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -0,0 +1,575 @@
+/*
+ * ePAPR hcall interface
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * This file is provided under a dual BSD/GPL license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* A "hypercall" is an "sc 1" instruction. This header file provides C
+ * wrapper functions for the ePAPR hypervisor interface. It is inteded
+ * for use by Linux device drivers and other operating systems.
+ *
+ * The hypercalls are implemented as inline assembly, rather than assembly
+ * language functions in a .S file, for optimization. It allows
+ * the caller to issue the hypercall instruction directly, improving both
+ * performance and memory footprint.
+ */
+
+#ifndef _EPAPR_HCALLS_H
+#define _EPAPR_HCALLS_H
+
+#include <uapi/asm/epapr_hcalls.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/byteorder.h>
+
+/*
+ * Hypercall register clobber list
+ *
+ * These macros are used to define the list of clobbered registers during a
+ * hypercall. Technically, registers r0 and r3-r12 are always clobbered,
+ * but the gcc inline assembly syntax does not allow us to specify registers
+ * on the clobber list that are also on the input/output list. Therefore,
+ * the lists of clobbered registers depends on the number of register
+ * parameters ("+r" and "=r") passed to the hypercall.
+ *
+ * Each assembly block should use one of the HCALL_CLOBBERSx macros. As a
+ * general rule, 'x' is the number of parameters passed to the assembly
+ * block *except* for r11.
+ *
+ * If you're not sure, just use the smallest value of 'x' that does not
+ * generate a compilation error. Because these are static inline functions,
+ * the compiler will only check the clobber list for a function if you
+ * compile code that calls that function.
+ *
+ * r3 and r11 are not included in any clobbers list because they are always
+ * listed as output registers.
+ *
+ * XER, CTR, and LR are currently listed as clobbers because it's uncertain
+ * whether they will be clobbered.
+ *
+ * Note that r11 can be used as an output parameter.
+ *
+ * The "memory" clobber is only necessary for hcalls where the Hypervisor
+ * will read or write guest memory. However, we add it to all hcalls because
+ * the impact is minimal, and we want to ensure that it's present for the
+ * hcalls that need it.
+*/
+
+/* List of common clobbered registers. Do not use this macro. */
+#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc", "memory"
+
+#define EV_HCALL_CLOBBERS8 EV_HCALL_CLOBBERS
+#define EV_HCALL_CLOBBERS7 EV_HCALL_CLOBBERS8, "r10"
+#define EV_HCALL_CLOBBERS6 EV_HCALL_CLOBBERS7, "r9"
+#define EV_HCALL_CLOBBERS5 EV_HCALL_CLOBBERS6, "r8"
+#define EV_HCALL_CLOBBERS4 EV_HCALL_CLOBBERS5, "r7"
+#define EV_HCALL_CLOBBERS3 EV_HCALL_CLOBBERS4, "r6"
+#define EV_HCALL_CLOBBERS2 EV_HCALL_CLOBBERS3, "r5"
+#define EV_HCALL_CLOBBERS1 EV_HCALL_CLOBBERS2, "r4"
+
+extern bool epapr_paravirt_enabled;
+extern u32 epapr_hypercall_start[];
+
+#ifdef CONFIG_EPAPR_PARAVIRT
+int __init epapr_paravirt_early_init(void);
+#else
+static inline int epapr_paravirt_early_init(void) { return 0; }
+#endif
+
+/*
+ * We use "uintptr_t" to define a register because it's guaranteed to be a
+ * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
+ * platform.
+ *
+ * All registers are either input/output or output only. Registers that are
+ * initialized before making the hypercall are input/output. All
+ * input/output registers are represented with "+r". Output-only registers
+ * are represented with "=r". Do not specify any unused registers. The
+ * clobber list will tell the compiler that the hypercall modifies those
+ * registers, which is good enough.
+ */
+
+/**
+ * ev_int_set_config - configure the specified interrupt
+ * @interrupt: the interrupt number
+ * @config: configuration for this interrupt
+ * @priority: interrupt priority
+ * @destination: destination CPU number
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_set_config(unsigned int interrupt,
+ uint32_t config, unsigned int priority, uint32_t destination)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_SET_CONFIG);
+ r3 = interrupt;
+ r4 = config;
+ r5 = priority;
+ r6 = destination;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6)
+ : : EV_HCALL_CLOBBERS4
+ );
+
+ return r3;
+}
+
+/**
+ * ev_int_get_config - return the config of the specified interrupt
+ * @interrupt: the interrupt number
+ * @config: returned configuration for this interrupt
+ * @priority: returned interrupt priority
+ * @destination: returned destination CPU number
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_get_config(unsigned int interrupt,
+ uint32_t *config, unsigned int *priority, uint32_t *destination)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG);
+ r3 = interrupt;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6)
+ : : EV_HCALL_CLOBBERS4
+ );
+
+ *config = r4;
+ *priority = r5;
+ *destination = r6;
+
+ return r3;
+}
+
+/**
+ * ev_int_set_mask - sets the mask for the specified interrupt source
+ * @interrupt: the interrupt number
+ * @mask: 0=enable interrupts, 1=disable interrupts
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_set_mask(unsigned int interrupt,
+ unsigned int mask)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_SET_MASK);
+ r3 = interrupt;
+ r4 = mask;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ return r3;
+}
+
+/**
+ * ev_int_get_mask - returns the mask for the specified interrupt source
+ * @interrupt: the interrupt number
+ * @mask: returned mask for this interrupt (0=enabled, 1=disabled)
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_get_mask(unsigned int interrupt,
+ unsigned int *mask)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK);
+ r3 = interrupt;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *mask = r4;
+
+ return r3;
+}
+
+/**
+ * ev_int_eoi - signal the end of interrupt processing
+ * @interrupt: the interrupt number
+ *
+ * This function signals the end of processing for the specified
+ * interrupt, which must be the interrupt currently in service. By
+ * definition, this is also the highest-priority interrupt.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_eoi(unsigned int interrupt)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_EOI);
+ r3 = interrupt;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * ev_byte_channel_send - send characters to a byte stream
+ * @handle: byte stream handle
+ * @count: (input) num of chars to send, (output) num chars sent
+ * @buffer: pointer to a 16-byte buffer
+ *
+ * @buffer must be at least 16 bytes long, because all 16 bytes will be
+ * read from memory into registers, even if count < 16.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_send(unsigned int handle,
+ unsigned int *count, const char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r8 __asm__("r8");
+ const uint32_t *p = (const uint32_t *) buffer;
+
+ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_SEND);
+ r3 = handle;
+ r4 = *count;
+ r5 = be32_to_cpu(p[0]);
+ r6 = be32_to_cpu(p[1]);
+ r7 = be32_to_cpu(p[2]);
+ r8 = be32_to_cpu(p[3]);
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3),
+ "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8)
+ : : EV_HCALL_CLOBBERS6
+ );
+
+ *count = r4;
+
+ return r3;
+}
+
+/**
+ * ev_byte_channel_receive - fetch characters from a byte channel
+ * @handle: byte channel handle
+ * @count: (input) max num of chars to receive, (output) num chars received
+ * @buffer: pointer to a 16-byte buffer
+ *
+ * The size of @buffer must be at least 16 bytes, even if you request fewer
+ * than 16 characters, because we always write 16 bytes to @buffer. This is
+ * for performance reasons.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_receive(unsigned int handle,
+ unsigned int *count, char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r8 __asm__("r8");
+ uint32_t *p = (uint32_t *) buffer;
+
+ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_RECEIVE);
+ r3 = handle;
+ r4 = *count;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4),
+ "=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8)
+ : : EV_HCALL_CLOBBERS6
+ );
+
+ *count = r4;
+ p[0] = cpu_to_be32(r5);
+ p[1] = cpu_to_be32(r6);
+ p[2] = cpu_to_be32(r7);
+ p[3] = cpu_to_be32(r8);
+
+ return r3;
+}
+
+/**
+ * ev_byte_channel_poll - returns the status of the byte channel buffers
+ * @handle: byte channel handle
+ * @rx_count: returned count of bytes in receive queue
+ * @tx_count: returned count of free space in transmit queue
+ *
+ * This function reports the amount of data in the receive queue (i.e. the
+ * number of bytes you can read), and the amount of free space in the transmit
+ * queue (i.e. the number of bytes you can write).
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_poll(unsigned int handle,
+ unsigned int *rx_count, unsigned int *tx_count)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+
+ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL);
+ r3 = handle;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5)
+ : : EV_HCALL_CLOBBERS3
+ );
+
+ *rx_count = r4;
+ *tx_count = r5;
+
+ return r3;
+}
+
+/**
+ * ev_int_iack - acknowledge an interrupt
+ * @handle: handle to the target interrupt controller
+ * @vector: returned interrupt vector
+ *
+ * If handle is zero, the function returns the next interrupt source
+ * number to be handled irrespective of the hierarchy or cascading
+ * of interrupt controllers. If non-zero, specifies a handle to the
+ * interrupt controller that is the target of the acknowledge.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_iack(unsigned int handle,
+ unsigned int *vector)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_IACK);
+ r3 = handle;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *vector = r4;
+
+ return r3;
+}
+
+/**
+ * ev_doorbell_send - send a doorbell to another partition
+ * @handle: doorbell send handle
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_doorbell_send(unsigned int handle)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND);
+ r3 = handle;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * ev_idle -- wait for next interrupt on this core
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_idle(void)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = EV_HCALL_TOKEN(EV_IDLE);
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "=r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+#ifdef CONFIG_EPAPR_PARAVIRT
+static inline unsigned long epapr_hypercall(unsigned long *in,
+ unsigned long *out,
+ unsigned long nr)
+{
+ register unsigned long r0 asm("r0");
+ register unsigned long r3 asm("r3") = in[0];
+ register unsigned long r4 asm("r4") = in[1];
+ register unsigned long r5 asm("r5") = in[2];
+ register unsigned long r6 asm("r6") = in[3];
+ register unsigned long r7 asm("r7") = in[4];
+ register unsigned long r8 asm("r8") = in[5];
+ register unsigned long r9 asm("r9") = in[6];
+ register unsigned long r10 asm("r10") = in[7];
+ register unsigned long r11 asm("r11") = nr;
+ register unsigned long r12 asm("r12");
+
+ asm volatile("bl epapr_hypercall_start"
+ : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
+ "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
+ "=r"(r12)
+ : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
+ "r"(r9), "r"(r10), "r"(r11)
+ : "memory", "cc", "xer", "ctr", "lr");
+
+ out[0] = r4;
+ out[1] = r5;
+ out[2] = r6;
+ out[3] = r7;
+ out[4] = r8;
+ out[5] = r9;
+ out[6] = r10;
+ out[7] = r11;
+
+ return r3;
+}
+#else
+static unsigned long epapr_hypercall(unsigned long *in,
+ unsigned long *out,
+ unsigned long nr)
+{
+ return EV_UNIMPLEMENTED;
+}
+#endif
+
+static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
+{
+ unsigned long in[8] = {0};
+ unsigned long out[8];
+ unsigned long r;
+
+ r = epapr_hypercall(in, out, nr);
+ *r2 = out[0];
+
+ return r;
+}
+
+static inline long epapr_hypercall0(unsigned int nr)
+{
+ unsigned long in[8] = {0};
+ unsigned long out[8];
+
+ return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
+{
+ unsigned long in[8] = {0};
+ unsigned long out[8];
+
+ in[0] = p1;
+ return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
+ unsigned long p2)
+{
+ unsigned long in[8] = {0};
+ unsigned long out[8];
+
+ in[0] = p1;
+ in[1] = p2;
+ return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall3(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ unsigned long in[8] = {0};
+ unsigned long out[8];
+
+ in[0] = p1;
+ in[1] = p2;
+ in[2] = p3;
+ return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall4(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4)
+{
+ unsigned long in[8] = {0};
+ unsigned long out[8];
+
+ in[0] = p1;
+ in[1] = p2;
+ in[2] = p3;
+ in[3] = p4;
+ return epapr_hypercall(in, out, nr);
+}
+#endif /* !__ASSEMBLY__ */
+#endif /* _EPAPR_HCALLS_H */
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
new file mode 100644
index 000000000..b1ef1e92c
--- /dev/null
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Definitions for use by exception code on Book3-E
+ *
+ * Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
+ */
+#ifndef _ASM_POWERPC_EXCEPTION_64E_H
+#define _ASM_POWERPC_EXCEPTION_64E_H
+
+/*
+ * SPRGs usage an other considerations...
+ *
+ * Since TLB miss and other standard exceptions can be interrupted by
+ * critical exceptions which can themselves be interrupted by machine
+ * checks, and since the two later can themselves cause a TLB miss when
+ * hitting the linear mapping for the kernel stacks, we need to be a bit
+ * creative on how we use SPRGs.
+ *
+ * The base idea is that we have one SRPG reserved for critical and one
+ * for machine check interrupts. Those are used to save a GPR that can
+ * then be used to get the PACA, and store as much context as we need
+ * to save in there. That includes saving the SPRGs used by the TLB miss
+ * handler for linear mapping misses and the associated SRR0/1 due to
+ * the above re-entrancy issue.
+ *
+ * So here's the current usage pattern. It's done regardless of which
+ * SPRGs are user-readable though, thus we might have to change some of
+ * this later. In order to do that more easily, we use special constants
+ * for naming them
+ *
+ * WARNING: Some of these SPRGs are user readable. We need to do something
+ * about it as some point by making sure they can't be used to leak kernel
+ * critical data
+ */
+
+#define PACA_EXGDBELL PACA_EXGEN
+
+/* We are out of SPRGs so we save some things in the PACA. The normal
+ * exception frame is smaller than the CRIT or MC one though
+ */
+#define EX_R1 (0 * 8)
+#define EX_CR (1 * 8)
+#define EX_R10 (2 * 8)
+#define EX_R11 (3 * 8)
+#define EX_R14 (4 * 8)
+#define EX_R15 (5 * 8)
+
+/*
+ * The TLB miss exception uses different slots.
+ *
+ * The bolted variant uses only the first six fields,
+ * which in combination with pgd and kernel_pgd fits in
+ * one 64-byte cache line.
+ */
+
+#define EX_TLB_R10 ( 0 * 8)
+#define EX_TLB_R11 ( 1 * 8)
+#define EX_TLB_R14 ( 2 * 8)
+#define EX_TLB_R15 ( 3 * 8)
+#define EX_TLB_R16 ( 4 * 8)
+#define EX_TLB_CR ( 5 * 8)
+#define EX_TLB_R12 ( 6 * 8)
+#define EX_TLB_R13 ( 7 * 8)
+#define EX_TLB_DEAR ( 8 * 8) /* Level 0 and 2 only */
+#define EX_TLB_ESR ( 9 * 8) /* Level 0 and 2 only */
+#define EX_TLB_SRR0 (10 * 8)
+#define EX_TLB_SRR1 (11 * 8)
+#define EX_TLB_R7 (12 * 8)
+#define EX_TLB_SIZE (13 * 8)
+
+#define START_EXCEPTION(label) \
+ .globl exc_##label##_book3e; \
+exc_##label##_book3e:
+
+/* TLB miss exception prolog
+ *
+ * This prolog handles re-entrancy (up to 3 levels supported in the PACA
+ * though we currently don't test for overflow). It provides you with a
+ * re-entrancy safe working space of r10...r16 and CR with r12 being used
+ * as the exception area pointer in the PACA for that level of re-entrancy
+ * and r13 containing the PACA pointer.
+ *
+ * SRR0 and SRR1 are saved, but DEAR and ESR are not, since they don't apply
+ * as-is for instruction exceptions. It's up to the actual exception code
+ * to save them as well if required.
+ */
+#define TLB_MISS_PROLOG \
+ mtspr SPRN_SPRG_TLB_SCRATCH,r12; \
+ mfspr r12,SPRN_SPRG_TLB_EXFRAME; \
+ std r10,EX_TLB_R10(r12); \
+ mfcr r10; \
+ std r11,EX_TLB_R11(r12); \
+ mfspr r11,SPRN_SPRG_TLB_SCRATCH; \
+ std r13,EX_TLB_R13(r12); \
+ mfspr r13,SPRN_SPRG_PACA; \
+ std r14,EX_TLB_R14(r12); \
+ addi r14,r12,EX_TLB_SIZE; \
+ std r15,EX_TLB_R15(r12); \
+ mfspr r15,SPRN_SRR1; \
+ std r16,EX_TLB_R16(r12); \
+ mfspr r16,SPRN_SRR0; \
+ std r10,EX_TLB_CR(r12); \
+ std r11,EX_TLB_R12(r12); \
+ mtspr SPRN_SPRG_TLB_EXFRAME,r14; \
+ std r15,EX_TLB_SRR1(r12); \
+ std r16,EX_TLB_SRR0(r12);
+
+/* And these are the matching epilogs that restores things
+ *
+ * There are 3 epilogs:
+ *
+ * - SUCCESS : Unwinds one level
+ * - ERROR : restore from level 0 and reset
+ * - ERROR_SPECIAL : restore from current level and reset
+ *
+ * Normal errors use ERROR, that is, they restore the initial fault context
+ * and trigger a fault. However, there is a special case for linear mapping
+ * errors. Those should basically never happen, but if they do happen, we
+ * want the error to point out the context that did that linear mapping
+ * fault, not the initial level 0 (basically, we got a bogus PGF or something
+ * like that). For userland errors on the linear mapping, there is no
+ * difference since those are always level 0 anyway
+ */
+
+#define TLB_MISS_RESTORE(freg) \
+ ld r14,EX_TLB_CR(r12); \
+ ld r10,EX_TLB_R10(r12); \
+ ld r15,EX_TLB_SRR0(r12); \
+ ld r16,EX_TLB_SRR1(r12); \
+ mtspr SPRN_SPRG_TLB_EXFRAME,freg; \
+ ld r11,EX_TLB_R11(r12); \
+ mtcr r14; \
+ ld r13,EX_TLB_R13(r12); \
+ ld r14,EX_TLB_R14(r12); \
+ mtspr SPRN_SRR0,r15; \
+ ld r15,EX_TLB_R15(r12); \
+ mtspr SPRN_SRR1,r16; \
+ ld r16,EX_TLB_R16(r12); \
+ ld r12,EX_TLB_R12(r12); \
+
+#define TLB_MISS_EPILOG_SUCCESS \
+ TLB_MISS_RESTORE(r12)
+
+#define TLB_MISS_EPILOG_ERROR \
+ addi r12,r13,PACA_EXTLB; \
+ TLB_MISS_RESTORE(r12)
+
+#define TLB_MISS_EPILOG_ERROR_SPECIAL \
+ addi r11,r13,PACA_EXTLB; \
+ TLB_MISS_RESTORE(r11)
+
+#ifndef __ASSEMBLY__
+extern unsigned int interrupt_base_book3e;
+#endif
+
+#define SET_IVOR(vector_number, vector_offset) \
+ LOAD_REG_ADDR(r3,interrupt_base_book3e);\
+ ori r3,r3,vector_offset@l; \
+ mtspr SPRN_IVOR##vector_number,r3;
+/*
+ * powerpc relies on return from interrupt/syscall being context synchronising
+ * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
+ * synchronisation instructions.
+ */
+#define RFI_TO_KERNEL \
+ rfi
+
+#define RFI_TO_USER \
+ rfi
+
+#endif /* _ASM_POWERPC_EXCEPTION_64E_H */
+
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
new file mode 100644
index 000000000..bb6f78fcf
--- /dev/null
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_EXCEPTION_H
+#define _ASM_POWERPC_EXCEPTION_H
+/*
+ * Extracted from head_64.S
+ *
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ * Adapted for Power Macintosh by Paul Mackerras.
+ * Low-level exception handlers and MMU support
+ * rewritten by Paul Mackerras.
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
+ * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
+ *
+ * This file contains the low-level support and setup for the
+ * PowerPC-64 platform, including trap and interrupt dispatch.
+ */
+/*
+ * The following macros define the code that appears as
+ * the prologue to each of the exception handlers. They
+ * are split into two parts to allow a single kernel binary
+ * to be used for pSeries and iSeries.
+ *
+ * We make as much of the exception code common between native
+ * exception handlers (including pSeries LPAR) and iSeries LPAR
+ * implementations as possible.
+ */
+#include <asm/feature-fixups.h>
+
+/* PACA save area size in u64 units (exgen, exmc, etc) */
+#define EX_SIZE 10
+
+/* PACA save area offsets */
+#define EX_R9 0
+#define EX_R10 8
+#define EX_R11 16
+#define EX_R12 24
+#define EX_R13 32
+#define EX_DAR 40
+#define EX_DSISR 48
+#define EX_CCR 52
+#define EX_CFAR 56
+#define EX_PPR 64
+#define EX_CTR 72
+
+/*
+ * maximum recursive depth of MCE exceptions
+ */
+#define MAX_MCE_DEPTH 4
+
+#ifdef __ASSEMBLY__
+
+#define STF_ENTRY_BARRIER_SLOT \
+ STF_ENTRY_BARRIER_FIXUP_SECTION; \
+ nop; \
+ nop; \
+ nop
+
+#define STF_EXIT_BARRIER_SLOT \
+ STF_EXIT_BARRIER_FIXUP_SECTION; \
+ nop; \
+ nop; \
+ nop; \
+ nop; \
+ nop; \
+ nop
+
+#define ENTRY_FLUSH_SLOT \
+ ENTRY_FLUSH_FIXUP_SECTION; \
+ nop; \
+ nop; \
+ nop;
+
+#define SCV_ENTRY_FLUSH_SLOT \
+ SCV_ENTRY_FLUSH_FIXUP_SECTION; \
+ nop; \
+ nop; \
+ nop;
+
+/*
+ * r10 must be free to use, r13 must be paca
+ */
+#define INTERRUPT_TO_KERNEL \
+ STF_ENTRY_BARRIER_SLOT; \
+ ENTRY_FLUSH_SLOT
+
+/*
+ * r10, ctr must be free to use, r13 must be paca
+ */
+#define SCV_INTERRUPT_TO_KERNEL \
+ STF_ENTRY_BARRIER_SLOT; \
+ SCV_ENTRY_FLUSH_SLOT
+
+/*
+ * Macros for annotating the expected destination of (h)rfid
+ *
+ * The nop instructions allow us to insert one or more instructions to flush the
+ * L1-D cache when returning to userspace or a guest.
+ *
+ * powerpc relies on return from interrupt/syscall being context synchronising
+ * (which hrfid, rfid, and rfscv are) to support ARCH_HAS_MEMBARRIER_SYNC_CORE
+ * without additional synchronisation instructions.
+ *
+ * soft-masked interrupt replay does not include a context-synchronising rfid,
+ * but those always return to kernel, the sync is only required when returning
+ * to user.
+ */
+#define RFI_FLUSH_SLOT \
+ RFI_FLUSH_FIXUP_SECTION; \
+ nop; \
+ nop; \
+ nop
+
+#define RFI_TO_KERNEL \
+ rfid
+
+#define RFI_TO_USER \
+ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+#define RFI_TO_USER_OR_KERNEL \
+ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+#define RFI_TO_GUEST \
+ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+#define HRFI_TO_KERNEL \
+ hrfid
+
+#define HRFI_TO_USER \
+ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+#define HRFI_TO_USER_OR_KERNEL \
+ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+#define HRFI_TO_GUEST \
+ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+#define HRFI_TO_UNKNOWN \
+ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+#define RFSCV_TO_USER \
+ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ RFSCV; \
+ b rfscv_flush_fallback
+
+#else /* __ASSEMBLY__ */
+/* Prototype for function defined in exceptions-64s.S */
+void do_uaccess_flush(void);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_EXCEPTION_H */
diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
new file mode 100644
index 000000000..92cac4851
--- /dev/null
+++ b/arch/powerpc/include/asm/exec.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_EXEC_H
+#define _ASM_POWERPC_EXEC_H
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif /* _ASM_POWERPC_EXEC_H */
diff --git a/arch/powerpc/include/asm/extable.h b/arch/powerpc/include/asm/extable.h
new file mode 100644
index 000000000..26ce2e5c0
--- /dev/null
+++ b/arch/powerpc/include/asm/extable.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARCH_POWERPC_EXTABLE_H
+#define _ARCH_POWERPC_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of relative addresses: the first is
+ * the address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out what
+ * to do.
+ *
+ * All the routines below use bits of fixup code that are out of line with the
+ * main instruction path. This means when everything is well, we don't even
+ * have to jump over them. Further, they do not intrude on our cache or tlb
+ * entries.
+ */
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+#ifndef __ASSEMBLY__
+
+struct exception_table_entry {
+ int insn;
+ int fixup;
+};
+
+static inline unsigned long extable_fixup(const struct exception_table_entry *x)
+{
+ return (unsigned long)&x->fixup + x->fixup;
+}
+
+#endif
+
+/*
+ * Helper macro for exception table entries
+ */
+#define EX_TABLE(_fault, _target) \
+ stringify_in_c(.section __ex_table,"a";)\
+ stringify_in_c(.balign 4;) \
+ stringify_in_c(.long (_fault) - . ;) \
+ stringify_in_c(.long (_target) - . ;) \
+ stringify_in_c(.previous)
+
+#endif
diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h
new file mode 100644
index 000000000..27f9e11ed
--- /dev/null
+++ b/arch/powerpc/include/asm/fadump-internal.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Firmware-Assisted Dump internal code.
+ *
+ * Copyright 2011, Mahesh Salgaonkar, IBM Corporation.
+ * Copyright 2019, Hari Bathini, IBM Corporation.
+ */
+
+#ifndef _ASM_POWERPC_FADUMP_INTERNAL_H
+#define _ASM_POWERPC_FADUMP_INTERNAL_H
+
+/* Maximum number of memory regions kernel supports */
+#define FADUMP_MAX_MEM_REGS 128
+
+#ifndef CONFIG_PRESERVE_FA_DUMP
+
+/* The upper limit percentage for user specified boot memory size (25%) */
+#define MAX_BOOT_MEM_RATIO 4
+
+#define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt)
+
+/* FAD commands */
+#define FADUMP_REGISTER 1
+#define FADUMP_UNREGISTER 2
+#define FADUMP_INVALIDATE 3
+
+/*
+ * Copy the ascii values for first 8 characters from a string into u64
+ * variable at their respective indexes.
+ * e.g.
+ * The string "FADMPINF" will be converted into 0x4641444d50494e46
+ */
+static inline u64 fadump_str_to_u64(const char *str)
+{
+ u64 val = 0;
+ int i;
+
+ for (i = 0; i < sizeof(val); i++)
+ val = (*str) ? (val << 8) | *str++ : val << 8;
+ return val;
+}
+
+#define FADUMP_CPU_UNKNOWN (~((u32)0))
+
+#define FADUMP_CRASH_INFO_MAGIC fadump_str_to_u64("FADMPINF")
+
+/* fadump crash info structure */
+struct fadump_crash_info_header {
+ u64 magic_number;
+ u64 elfcorehdr_addr;
+ u32 crashing_cpu;
+ struct pt_regs regs;
+ struct cpumask cpu_mask;
+};
+
+struct fadump_memory_range {
+ u64 base;
+ u64 size;
+};
+
+/* fadump memory ranges info */
+#define RNG_NAME_SZ 16
+struct fadump_mrange_info {
+ char name[RNG_NAME_SZ];
+ struct fadump_memory_range *mem_ranges;
+ u32 mem_ranges_sz;
+ u32 mem_range_cnt;
+ u32 max_mem_ranges;
+ bool is_static;
+};
+
+/* Platform specific callback functions */
+struct fadump_ops;
+
+/* Firmware-assisted dump configuration details. */
+struct fw_dump {
+ unsigned long reserve_dump_area_start;
+ unsigned long reserve_dump_area_size;
+ /* cmd line option during boot */
+ unsigned long reserve_bootvar;
+
+ unsigned long cpu_state_data_size;
+ u64 cpu_state_dest_vaddr;
+ u32 cpu_state_data_version;
+ u32 cpu_state_entry_size;
+
+ unsigned long hpte_region_size;
+
+ unsigned long boot_memory_size;
+ u64 boot_mem_dest_addr;
+ u64 boot_mem_addr[FADUMP_MAX_MEM_REGS];
+ u64 boot_mem_sz[FADUMP_MAX_MEM_REGS];
+ u64 boot_mem_top;
+ u64 boot_mem_regs_cnt;
+
+ unsigned long fadumphdr_addr;
+ unsigned long cpu_notes_buf_vaddr;
+ unsigned long cpu_notes_buf_size;
+
+ /*
+ * Maximum size supported by firmware to copy from source to
+ * destination address per entry.
+ */
+ u64 max_copy_size;
+ u64 kernel_metadata;
+
+ int ibm_configure_kernel_dump;
+
+ unsigned long fadump_enabled:1;
+ unsigned long fadump_supported:1;
+ unsigned long dump_active:1;
+ unsigned long dump_registered:1;
+ unsigned long nocma:1;
+
+ struct fadump_ops *ops;
+};
+
+struct fadump_ops {
+ u64 (*fadump_init_mem_struct)(struct fw_dump *fadump_conf);
+ u64 (*fadump_get_metadata_size)(void);
+ int (*fadump_setup_metadata)(struct fw_dump *fadump_conf);
+ u64 (*fadump_get_bootmem_min)(void);
+ int (*fadump_register)(struct fw_dump *fadump_conf);
+ int (*fadump_unregister)(struct fw_dump *fadump_conf);
+ int (*fadump_invalidate)(struct fw_dump *fadump_conf);
+ void (*fadump_cleanup)(struct fw_dump *fadump_conf);
+ int (*fadump_process)(struct fw_dump *fadump_conf);
+ void (*fadump_region_show)(struct fw_dump *fadump_conf,
+ struct seq_file *m);
+ void (*fadump_trigger)(struct fadump_crash_info_header *fdh,
+ const char *msg);
+};
+
+/* Helper functions */
+s32 __init fadump_setup_cpu_notes_buf(u32 num_cpus);
+void fadump_free_cpu_notes_buf(void);
+u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs);
+void __init fadump_update_elfcore_header(char *bufp);
+bool is_fadump_boot_mem_contiguous(void);
+bool is_fadump_reserved_mem_contiguous(void);
+
+#else /* !CONFIG_PRESERVE_FA_DUMP */
+
+/* Firmware-assisted dump configuration details. */
+struct fw_dump {
+ u64 boot_mem_top;
+ u64 dump_active;
+};
+
+#endif /* CONFIG_PRESERVE_FA_DUMP */
+
+#ifdef CONFIG_PPC_PSERIES
+extern void rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node);
+#else
+static inline void
+rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) { }
+#endif
+
+#ifdef CONFIG_PPC_POWERNV
+extern void opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node);
+#else
+static inline void
+opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) { }
+#endif
+
+#endif /* _ASM_POWERPC_FADUMP_INTERNAL_H */
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
new file mode 100644
index 000000000..526a6a647
--- /dev/null
+++ b/arch/powerpc/include/asm/fadump.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Firmware Assisted dump header file.
+ *
+ * Copyright 2011 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#ifndef _ASM_POWERPC_FADUMP_H
+#define _ASM_POWERPC_FADUMP_H
+
+#ifdef CONFIG_FA_DUMP
+
+extern int crashing_cpu;
+
+extern int is_fadump_memory_area(u64 addr, ulong size);
+extern int setup_fadump(void);
+extern int is_fadump_active(void);
+extern int should_fadump_crash(void);
+extern void crash_fadump(struct pt_regs *, const char *);
+extern void fadump_cleanup(void);
+
+#else /* CONFIG_FA_DUMP */
+static inline int is_fadump_active(void) { return 0; }
+static inline int should_fadump_crash(void) { return 0; }
+static inline void crash_fadump(struct pt_regs *regs, const char *str) { }
+static inline void fadump_cleanup(void) { }
+#endif /* !CONFIG_FA_DUMP */
+
+#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
+extern int early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
+ int depth, void *data);
+extern int fadump_reserve_mem(void);
+#endif
+#endif /* _ASM_POWERPC_FADUMP_H */
diff --git a/arch/powerpc/include/asm/fb.h b/arch/powerpc/include/asm/fb.h
new file mode 100644
index 000000000..6541ab77c
--- /dev/null
+++ b/arch/powerpc/include/asm/fb.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
new file mode 100644
index 000000000..ac605fc36
--- /dev/null
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_POWERPC_FEATURE_FIXUPS_H
+#define __ASM_POWERPC_FEATURE_FIXUPS_H
+
+#include <asm/asm-const.h>
+
+/*
+ */
+
+/*
+ * Feature section common macros
+ *
+ * Note that the entries now contain offsets between the table entry
+ * and the code rather than absolute code pointers in order to be
+ * useable with the vdso shared library. There is also an assumption
+ * that values will be negative, that is, the fixup table has to be
+ * located after the code it fixes up.
+ */
+#if defined(CONFIG_PPC64) && !defined(__powerpc64__)
+/* 64 bits kernel, 32 bits code (ie. vdso32) */
+#define FTR_ENTRY_LONG .8byte
+#define FTR_ENTRY_OFFSET .long 0xffffffff; .long
+#elif defined(CONFIG_PPC64)
+#define FTR_ENTRY_LONG .8byte
+#define FTR_ENTRY_OFFSET .8byte
+#else
+#define FTR_ENTRY_LONG .long
+#define FTR_ENTRY_OFFSET .long
+#endif
+
+#define START_FTR_SECTION(label) label##1:
+
+#define FTR_SECTION_ELSE_NESTED(label) \
+label##2: \
+ .pushsection __ftr_alt_##label,"a"; \
+ .align 2; \
+label##3:
+
+
+#ifndef CONFIG_CC_IS_CLANG
+#define CHECK_ALT_SIZE(else_size, body_size) \
+ .ifgt (else_size) - (body_size); \
+ .error "Feature section else case larger than body"; \
+ .endif;
+#else
+/*
+ * If we use the ifgt syntax above, clang's assembler complains about the
+ * expression being non-absolute when the code appears in an inline assembly
+ * statement.
+ * As a workaround use an .org directive that has no effect if the else case
+ * instructions are smaller than the body, but fails otherwise.
+ */
+#define CHECK_ALT_SIZE(else_size, body_size) \
+ .org . + ((else_size) > (body_size));
+#endif
+
+#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
+label##4: \
+ .popsection; \
+ .pushsection sect,"a"; \
+ .align 3; \
+label##5: \
+ FTR_ENTRY_LONG msk; \
+ FTR_ENTRY_LONG val; \
+ FTR_ENTRY_OFFSET label##1b-label##5b; \
+ FTR_ENTRY_OFFSET label##2b-label##5b; \
+ FTR_ENTRY_OFFSET label##3b-label##5b; \
+ FTR_ENTRY_OFFSET label##4b-label##5b; \
+ CHECK_ALT_SIZE((label##4b-label##3b), (label##2b-label##1b)); \
+ .popsection;
+
+
+/* CPU feature dependent sections */
+#define BEGIN_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
+#define BEGIN_FTR_SECTION START_FTR_SECTION(97)
+
+#define END_FTR_SECTION_NESTED(msk, val, label) \
+ FTR_SECTION_ELSE_NESTED(label) \
+ MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup)
+
+#define END_FTR_SECTION(msk, val) \
+ END_FTR_SECTION_NESTED(msk, val, 97)
+
+#define END_FTR_SECTION_NESTED_IFSET(msk, label) \
+ END_FTR_SECTION_NESTED((msk), (msk), label)
+
+#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
+#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
+
+/* CPU feature sections with alternatives, use BEGIN_FTR_SECTION to start */
+#define FTR_SECTION_ELSE FTR_SECTION_ELSE_NESTED(97)
+#define ALT_FTR_SECTION_END_NESTED(msk, val, label) \
+ MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup)
+#define ALT_FTR_SECTION_END_NESTED_IFSET(msk, label) \
+ ALT_FTR_SECTION_END_NESTED(msk, msk, label)
+#define ALT_FTR_SECTION_END_NESTED_IFCLR(msk, label) \
+ ALT_FTR_SECTION_END_NESTED(msk, 0, label)
+#define ALT_FTR_SECTION_END(msk, val) \
+ ALT_FTR_SECTION_END_NESTED(msk, val, 97)
+#define ALT_FTR_SECTION_END_IFSET(msk) \
+ ALT_FTR_SECTION_END_NESTED_IFSET(msk, 97)
+#define ALT_FTR_SECTION_END_IFCLR(msk) \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
+
+/* MMU feature dependent sections */
+#define BEGIN_MMU_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
+#define BEGIN_MMU_FTR_SECTION START_FTR_SECTION(97)
+
+#define END_MMU_FTR_SECTION_NESTED(msk, val, label) \
+ FTR_SECTION_ELSE_NESTED(label) \
+ MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
+
+#define END_MMU_FTR_SECTION(msk, val) \
+ END_MMU_FTR_SECTION_NESTED(msk, val, 97)
+
+#define END_MMU_FTR_SECTION_NESTED_IFSET(msk, label) \
+ END_MMU_FTR_SECTION_NESTED((msk), (msk), label)
+
+#define END_MMU_FTR_SECTION_NESTED_IFCLR(msk, label) \
+ END_MMU_FTR_SECTION_NESTED((msk), 0, label)
+
+#define END_MMU_FTR_SECTION_IFSET(msk) END_MMU_FTR_SECTION((msk), (msk))
+#define END_MMU_FTR_SECTION_IFCLR(msk) END_MMU_FTR_SECTION((msk), 0)
+
+/* MMU feature sections with alternatives, use BEGIN_FTR_SECTION to start */
+#define MMU_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label)
+#define MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE_NESTED(97)
+#define ALT_MMU_FTR_SECTION_END_NESTED(msk, val, label) \
+ MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
+#define ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, label) \
+ ALT_MMU_FTR_SECTION_END_NESTED(msk, msk, label)
+#define ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, label) \
+ ALT_MMU_FTR_SECTION_END_NESTED(msk, 0, label)
+#define ALT_MMU_FTR_SECTION_END(msk, val) \
+ ALT_MMU_FTR_SECTION_END_NESTED(msk, val, 97)
+#define ALT_MMU_FTR_SECTION_END_IFSET(msk) \
+ ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, 97)
+#define ALT_MMU_FTR_SECTION_END_IFCLR(msk) \
+ ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
+
+/* Firmware feature dependent sections */
+#define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
+#define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97)
+
+#define END_FW_FTR_SECTION_NESTED(msk, val, label) \
+ FTR_SECTION_ELSE_NESTED(label) \
+ MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup)
+
+#define END_FW_FTR_SECTION(msk, val) \
+ END_FW_FTR_SECTION_NESTED(msk, val, 97)
+
+#define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk))
+#define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0)
+
+/* Firmware feature sections with alternatives */
+#define FW_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label)
+#define FW_FTR_SECTION_ELSE FTR_SECTION_ELSE_NESTED(97)
+#define ALT_FW_FTR_SECTION_END_NESTED(msk, val, label) \
+ MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup)
+#define ALT_FW_FTR_SECTION_END_NESTED_IFSET(msk, label) \
+ ALT_FW_FTR_SECTION_END_NESTED(msk, msk, label)
+#define ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, label) \
+ ALT_FW_FTR_SECTION_END_NESTED(msk, 0, label)
+#define ALT_FW_FTR_SECTION_END(msk, val) \
+ ALT_FW_FTR_SECTION_END_NESTED(msk, val, 97)
+#define ALT_FW_FTR_SECTION_END_IFSET(msk) \
+ ALT_FW_FTR_SECTION_END_NESTED_IFSET(msk, 97)
+#define ALT_FW_FTR_SECTION_END_IFCLR(msk) \
+ ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
+
+#ifndef __ASSEMBLY__
+
+#define ASM_FTR_IF(section_if, section_else, msk, val) \
+ stringify_in_c(BEGIN_FTR_SECTION) \
+ section_if "; " \
+ stringify_in_c(FTR_SECTION_ELSE) \
+ section_else "; " \
+ stringify_in_c(ALT_FTR_SECTION_END((msk), (val)))
+
+#define ASM_FTR_IFSET(section_if, section_else, msk) \
+ ASM_FTR_IF(section_if, section_else, (msk), (msk))
+
+#define ASM_FTR_IFCLR(section_if, section_else, msk) \
+ ASM_FTR_IF(section_if, section_else, (msk), 0)
+
+#define ASM_MMU_FTR_IF(section_if, section_else, msk, val) \
+ stringify_in_c(BEGIN_MMU_FTR_SECTION) \
+ section_if "; " \
+ stringify_in_c(MMU_FTR_SECTION_ELSE) \
+ section_else "; " \
+ stringify_in_c(ALT_MMU_FTR_SECTION_END((msk), (val)))
+
+#define ASM_MMU_FTR_IFSET(section_if, section_else, msk) \
+ ASM_MMU_FTR_IF(section_if, section_else, (msk), (msk))
+
+#define ASM_MMU_FTR_IFCLR(section_if, section_else, msk) \
+ ASM_MMU_FTR_IF(section_if, section_else, (msk), 0)
+
+#endif /* __ASSEMBLY__ */
+
+/* LWSYNC feature sections */
+#define START_LWSYNC_SECTION(label) label##1:
+#define MAKE_LWSYNC_SECTION_ENTRY(label, sect) \
+label##2: \
+ .pushsection sect,"a"; \
+ .align 2; \
+label##3: \
+ FTR_ENTRY_OFFSET label##1b-label##3b; \
+ .popsection;
+
+#define STF_ENTRY_BARRIER_FIXUP_SECTION \
+953: \
+ .pushsection __stf_entry_barrier_fixup,"a"; \
+ .align 2; \
+954: \
+ FTR_ENTRY_OFFSET 953b-954b; \
+ .popsection;
+
+#define STF_EXIT_BARRIER_FIXUP_SECTION \
+955: \
+ .pushsection __stf_exit_barrier_fixup,"a"; \
+ .align 2; \
+956: \
+ FTR_ENTRY_OFFSET 955b-956b; \
+ .popsection;
+
+#define UACCESS_FLUSH_FIXUP_SECTION \
+959: \
+ .pushsection __uaccess_flush_fixup,"a"; \
+ .align 2; \
+960: \
+ FTR_ENTRY_OFFSET 959b-960b; \
+ .popsection;
+
+#define ENTRY_FLUSH_FIXUP_SECTION \
+957: \
+ .pushsection __entry_flush_fixup,"a"; \
+ .align 2; \
+958: \
+ FTR_ENTRY_OFFSET 957b-958b; \
+ .popsection;
+
+#define SCV_ENTRY_FLUSH_FIXUP_SECTION \
+957: \
+ .pushsection __scv_entry_flush_fixup,"a"; \
+ .align 2; \
+958: \
+ FTR_ENTRY_OFFSET 957b-958b; \
+ .popsection;
+
+#define RFI_FLUSH_FIXUP_SECTION \
+951: \
+ .pushsection __rfi_flush_fixup,"a"; \
+ .align 2; \
+952: \
+ FTR_ENTRY_OFFSET 951b-952b; \
+ .popsection;
+
+#define NOSPEC_BARRIER_FIXUP_SECTION \
+953: \
+ .pushsection __barrier_nospec_fixup,"a"; \
+ .align 2; \
+954: \
+ FTR_ENTRY_OFFSET 953b-954b; \
+ .popsection;
+
+#define START_BTB_FLUSH_SECTION \
+955: \
+
+#define END_BTB_FLUSH_SECTION \
+956: \
+ .pushsection __btb_flush_fixup,"a"; \
+ .align 2; \
+957: \
+ FTR_ENTRY_OFFSET 955b-957b; \
+ FTR_ENTRY_OFFSET 956b-957b; \
+ .popsection;
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+extern long stf_barrier_fallback;
+extern long entry_flush_fallback;
+extern long scv_entry_flush_fallback;
+extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
+extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
+extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
+extern long __start___scv_entry_flush_fixup, __stop___scv_entry_flush_fixup;
+extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
+extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
+
+void apply_feature_fixups(void);
+void setup_feature_keys(void);
+#endif
+
+#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
new file mode 100644
index 000000000..ed6db13a1
--- /dev/null
+++ b/arch/powerpc/include/asm/firmware.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * Modifications for ppc64:
+ * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
+ */
+#ifndef __ASM_POWERPC_FIRMWARE_H
+#define __ASM_POWERPC_FIRMWARE_H
+
+#ifdef __KERNEL__
+
+#include <asm/asm-const.h>
+
+/* firmware feature bitmask values */
+
+#define FW_FEATURE_PFT ASM_CONST(0x0000000000000001)
+#define FW_FEATURE_TCE ASM_CONST(0x0000000000000002)
+#define FW_FEATURE_SPRG0 ASM_CONST(0x0000000000000004)
+#define FW_FEATURE_DABR ASM_CONST(0x0000000000000008)
+#define FW_FEATURE_COPY ASM_CONST(0x0000000000000010)
+#define FW_FEATURE_ASR ASM_CONST(0x0000000000000020)
+#define FW_FEATURE_DEBUG ASM_CONST(0x0000000000000040)
+#define FW_FEATURE_TERM ASM_CONST(0x0000000000000080)
+#define FW_FEATURE_PERF ASM_CONST(0x0000000000000100)
+#define FW_FEATURE_DUMP ASM_CONST(0x0000000000000200)
+#define FW_FEATURE_INTERRUPT ASM_CONST(0x0000000000000400)
+#define FW_FEATURE_MIGRATE ASM_CONST(0x0000000000000800)
+#define FW_FEATURE_PERFMON ASM_CONST(0x0000000000001000)
+#define FW_FEATURE_CRQ ASM_CONST(0x0000000000002000)
+#define FW_FEATURE_VIO ASM_CONST(0x0000000000004000)
+#define FW_FEATURE_RDMA ASM_CONST(0x0000000000008000)
+#define FW_FEATURE_LLAN ASM_CONST(0x0000000000010000)
+#define FW_FEATURE_BULK_REMOVE ASM_CONST(0x0000000000020000)
+#define FW_FEATURE_XDABR ASM_CONST(0x0000000000040000)
+#define FW_FEATURE_PUT_TCE_IND ASM_CONST(0x0000000000080000)
+#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000)
+#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000)
+#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
+#define FW_FEATURE_HPT_RESIZE ASM_CONST(0x0000000001000000)
+#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000)
+#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000)
+#define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000)
+#define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000)
+#define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000)
+#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
+#define FW_FEATURE_FORM1_AFFINITY ASM_CONST(0x0000000100000000)
+#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
+#define FW_FEATURE_DRMEM_V2 ASM_CONST(0x0000000400000000)
+#define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000800000000)
+#define FW_FEATURE_BLOCK_REMOVE ASM_CONST(0x0000001000000000)
+#define FW_FEATURE_PAPR_SCM ASM_CONST(0x0000002000000000)
+#define FW_FEATURE_ULTRAVISOR ASM_CONST(0x0000004000000000)
+#define FW_FEATURE_STUFF_TCE ASM_CONST(0x0000008000000000)
+#define FW_FEATURE_RPT_INVALIDATE ASM_CONST(0x0000010000000000)
+#define FW_FEATURE_FORM2_AFFINITY ASM_CONST(0x0000020000000000)
+#define FW_FEATURE_ENERGY_SCALE_INFO ASM_CONST(0x0000040000000000)
+#define FW_FEATURE_WATCHDOG ASM_CONST(0x0000080000000000)
+
+#ifndef __ASSEMBLY__
+
+enum {
+#ifdef CONFIG_PPC64
+ FW_FEATURE_PSERIES_POSSIBLE = FW_FEATURE_PFT | FW_FEATURE_TCE |
+ FW_FEATURE_SPRG0 | FW_FEATURE_DABR | FW_FEATURE_COPY |
+ FW_FEATURE_ASR | FW_FEATURE_DEBUG | FW_FEATURE_TERM |
+ FW_FEATURE_PERF | FW_FEATURE_DUMP | FW_FEATURE_INTERRUPT |
+ FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
+ FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
+ FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
+ FW_FEATURE_PUT_TCE_IND | FW_FEATURE_STUFF_TCE |
+ FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
+ FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
+ FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
+ FW_FEATURE_FORM1_AFFINITY | FW_FEATURE_PRRN |
+ FW_FEATURE_HPT_RESIZE | FW_FEATURE_DRMEM_V2 |
+ FW_FEATURE_DRC_INFO | FW_FEATURE_BLOCK_REMOVE |
+ FW_FEATURE_PAPR_SCM | FW_FEATURE_ULTRAVISOR |
+ FW_FEATURE_RPT_INVALIDATE | FW_FEATURE_FORM2_AFFINITY |
+ FW_FEATURE_ENERGY_SCALE_INFO | FW_FEATURE_WATCHDOG,
+ FW_FEATURE_PSERIES_ALWAYS = 0,
+ FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_ULTRAVISOR,
+ FW_FEATURE_POWERNV_ALWAYS = 0,
+ FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+ FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+ FW_FEATURE_NATIVE_POSSIBLE = 0,
+ FW_FEATURE_NATIVE_ALWAYS = 0,
+ FW_FEATURE_POSSIBLE =
+#ifdef CONFIG_PPC_PSERIES
+ FW_FEATURE_PSERIES_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_POWERNV
+ FW_FEATURE_POWERNV_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_PS3
+ FW_FEATURE_PS3_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_HASH_MMU_NATIVE
+ FW_FEATURE_NATIVE_ALWAYS |
+#endif
+ 0,
+ FW_FEATURE_ALWAYS =
+#ifdef CONFIG_PPC_PSERIES
+ FW_FEATURE_PSERIES_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_POWERNV
+ FW_FEATURE_POWERNV_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_PS3
+ FW_FEATURE_PS3_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_HASH_MMU_NATIVE
+ FW_FEATURE_NATIVE_ALWAYS &
+#endif
+ FW_FEATURE_POSSIBLE,
+
+#else /* CONFIG_PPC64 */
+ FW_FEATURE_POSSIBLE = 0,
+ FW_FEATURE_ALWAYS = 0,
+#endif
+};
+
+/* This is used to identify firmware features which are available
+ * to the kernel.
+ */
+extern unsigned long powerpc_firmware_features;
+
+#define firmware_has_feature(feature) \
+ ((FW_FEATURE_ALWAYS & (feature)) || \
+ (FW_FEATURE_POSSIBLE & powerpc_firmware_features & (feature)))
+
+extern void system_reset_fwnmi(void);
+extern void machine_check_fwnmi(void);
+
+/* This is true if we are using the firmware NMI handler (typically LPAR) */
+extern int fwnmi_active;
+extern int ibm_nmi_interlock_token;
+
+extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup;
+
+#ifdef CONFIG_PPC_PSERIES
+void pseries_probe_fw_features(void);
+#else
+static inline void pseries_probe_fw_features(void) { }
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_FIRMWARE_H */
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
new file mode 100644
index 000000000..a832aeafe
--- /dev/null
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -0,0 +1,123 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Copyright 2008 Freescale Semiconductor Inc.
+ * Port to powerpc added by Kumar Gala
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#ifndef __ASSEMBLY__
+#include <linux/sizes.h>
+#include <linux/pgtable.h>
+#include <asm/page.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_size.h>
+#endif
+
+#ifdef CONFIG_PPC64
+#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
+#else
+#define FIXADDR_SIZE 0
+#ifdef CONFIG_KASAN
+#include <asm/kasan.h>
+#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
+#else
+#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
+#endif
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of virtual memory (0xfffff000) backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * highger than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+enum fixed_addresses {
+ FIX_HOLE,
+#ifdef CONFIG_PPC32
+ /* reserve the top 128K for early debugging purposes */
+ FIX_EARLY_DEBUG_TOP = FIX_HOLE,
+ FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
+#ifdef CONFIG_HIGHMEM
+ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
+#endif
+#ifdef CONFIG_PPC_8xx
+ /* For IMMR we need an aligned 512K area */
+#define FIX_IMMR_SIZE (512 * 1024 / PAGE_SIZE)
+ FIX_IMMR_START,
+ FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 +
+ FIX_IMMR_SIZE,
+#endif
+#ifdef CONFIG_PPC_83xx
+ /* For IMMR we need an aligned 2M area */
+#define FIX_IMMR_SIZE (SZ_2M / PAGE_SIZE)
+ FIX_IMMR_START,
+ FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 +
+ FIX_IMMR_SIZE,
+#endif
+ /* FIX_PCIE_MCFG, */
+#endif /* CONFIG_PPC32 */
+ __end_of_permanent_fixed_addresses,
+
+#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE)
+#define FIX_BTMAPS_SLOTS 16
+#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+ __end_of_fixed_addresses
+};
+
+#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
+
+#define FIXMAP_ALIGNED_SIZE (ALIGN(FIXADDR_TOP, PGDIR_SIZE) - \
+ ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE))
+#define FIXMAP_PTE_SIZE (FIXMAP_ALIGNED_SIZE / PGDIR_SIZE * PTE_TABLE_SIZE)
+
+#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG
+#define FIXMAP_PAGE_IO PAGE_KERNEL_NCG
+
+#include <asm-generic/fixmap.h>
+
+static inline void __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE);
+
+ if (__builtin_constant_p(idx))
+ BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
+ else if (WARN_ON(idx >= __end_of_fixed_addresses))
+ return;
+ if (pgprot_val(flags))
+ map_kernel_page(__fix_to_virt(idx), phys, flags);
+ else
+ unmap_kernel_page(__fix_to_virt(idx));
+}
+
+#define __early_set_fixmap __set_fixmap
+
+#endif /* !__ASSEMBLY__ */
+#endif
diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h
new file mode 100644
index 000000000..f8ce178b4
--- /dev/null
+++ b/arch/powerpc/include/asm/floppy.h
@@ -0,0 +1,214 @@
+/*
+ * Architecture specific parts of the Floppy driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995
+ */
+#ifndef __ASM_POWERPC_FLOPPY_H
+#define __ASM_POWERPC_FLOPPY_H
+#ifdef __KERNEL__
+
+#include <asm/machdep.h>
+
+#define fd_inb(base, reg) inb_p((base) + (reg))
+#define fd_outb(value, base, reg) outb_p(value, (base) + (reg))
+
+#define fd_enable_dma() enable_dma(FLOPPY_DMA)
+#define fd_disable_dma() fd_ops->_disable_dma(FLOPPY_DMA)
+#define fd_free_dma() fd_ops->_free_dma(FLOPPY_DMA)
+#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
+#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA, mode)
+#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA, count)
+#define fd_get_dma_residue() fd_ops->_get_dma_residue(FLOPPY_DMA)
+#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
+
+#include <linux/pci.h>
+#include <asm/ppc-pci.h> /* for isa_bridge_pcidev */
+
+#define fd_dma_setup(addr,size,mode,io) fd_ops->_dma_setup(addr,size,mode,io)
+
+static int fd_request_dma(void);
+
+struct fd_dma_ops {
+ void (*_disable_dma)(unsigned int dmanr);
+ void (*_free_dma)(unsigned int dmanr);
+ int (*_get_dma_residue)(unsigned int dummy);
+ int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
+};
+
+static int virtual_dma_count;
+static int virtual_dma_residue;
+static char *virtual_dma_addr;
+static int virtual_dma_mode;
+static int doing_vdma;
+static struct fd_dma_ops *fd_ops;
+
+static irqreturn_t floppy_hardint(int irq, void *dev_id)
+{
+ unsigned char st;
+ int lcount;
+ char *lptr;
+
+ if (!doing_vdma)
+ return floppy_interrupt(irq, dev_id);
+
+
+ st = 1;
+ for (lcount=virtual_dma_count, lptr=virtual_dma_addr;
+ lcount; lcount--, lptr++) {
+ st = inb(virtual_dma_port + FD_STATUS);
+ st &= STATUS_DMA | STATUS_READY;
+ if (st != (STATUS_DMA | STATUS_READY))
+ break;
+ if (virtual_dma_mode)
+ outb_p(*lptr, virtual_dma_port + FD_DATA);
+ else
+ *lptr = inb_p(virtual_dma_port + FD_DATA);
+ }
+ virtual_dma_count = lcount;
+ virtual_dma_addr = lptr;
+ st = inb(virtual_dma_port + FD_STATUS);
+
+ if (st == STATUS_DMA)
+ return IRQ_HANDLED;
+ if (!(st & STATUS_DMA)) {
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+ doing_vdma = 0;
+ floppy_interrupt(irq, dev_id);
+ return IRQ_HANDLED;
+ }
+ return IRQ_HANDLED;
+}
+
+static void vdma_disable_dma(unsigned int dummy)
+{
+ doing_vdma = 0;
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+}
+
+static void vdma_nop(unsigned int dummy)
+{
+}
+
+
+static int vdma_get_dma_residue(unsigned int dummy)
+{
+ return virtual_dma_count + virtual_dma_residue;
+}
+
+
+static int fd_request_irq(void)
+{
+ if (can_use_virtual_dma)
+ return request_irq(FLOPPY_IRQ, floppy_hardint,
+ 0, "floppy", NULL);
+ else
+ return request_irq(FLOPPY_IRQ, floppy_interrupt,
+ 0, "floppy", NULL);
+}
+
+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+ doing_vdma = 1;
+ virtual_dma_port = io;
+ virtual_dma_mode = (mode == DMA_MODE_WRITE);
+ virtual_dma_addr = addr;
+ virtual_dma_count = size;
+ virtual_dma_residue = 0;
+ return 0;
+}
+
+static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+ static unsigned long prev_size;
+ static dma_addr_t bus_addr = 0;
+ static char *prev_addr;
+ static int prev_dir;
+ int dir;
+
+ doing_vdma = 0;
+ dir = (mode == DMA_MODE_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ if (bus_addr
+ && (addr != prev_addr || size != prev_size || dir != prev_dir)) {
+ /* different from last time -- unmap prev */
+ dma_unmap_single(&isa_bridge_pcidev->dev, bus_addr, prev_size,
+ prev_dir);
+ bus_addr = 0;
+ }
+
+ if (!bus_addr) /* need to map it */
+ bus_addr = dma_map_single(&isa_bridge_pcidev->dev, addr, size,
+ dir);
+
+ /* remember this one as prev */
+ prev_addr = addr;
+ prev_size = size;
+ prev_dir = dir;
+
+ fd_clear_dma_ff();
+ fd_set_dma_mode(mode);
+ set_dma_addr(FLOPPY_DMA, bus_addr);
+ fd_set_dma_count(size);
+ virtual_dma_port = io;
+ fd_enable_dma();
+
+ return 0;
+}
+
+static struct fd_dma_ops real_dma_ops =
+{
+ ._disable_dma = disable_dma,
+ ._free_dma = free_dma,
+ ._get_dma_residue = get_dma_residue,
+ ._dma_setup = hard_dma_setup
+};
+
+static struct fd_dma_ops virt_dma_ops =
+{
+ ._disable_dma = vdma_disable_dma,
+ ._free_dma = vdma_nop,
+ ._get_dma_residue = vdma_get_dma_residue,
+ ._dma_setup = vdma_dma_setup
+};
+
+static int fd_request_dma(void)
+{
+ if (can_use_virtual_dma & 1) {
+ fd_ops = &virt_dma_ops;
+ return 0;
+ }
+ else {
+ fd_ops = &real_dma_ops;
+ return request_dma(FLOPPY_DMA, "floppy");
+ }
+}
+
+static int FDC1 = 0x3f0;
+static int FDC2 = -1;
+
+/*
+ * Again, the CMOS information not available
+ */
+#define FLOPPY0_TYPE 6
+#define FLOPPY1_TYPE 0
+
+#define N_FDC 2 /* Don't change this! */
+#define N_DRIVE 8
+
+/*
+ * The PowerPC has no problems with floppy DMA crossing 64k borders.
+ */
+#define CROSS_64KB(a,s) (0)
+
+#define EXTRA_FLOPPY_PARAMS
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_FLOPPY_H */
diff --git a/arch/powerpc/include/asm/fs_pd.h b/arch/powerpc/include/asm/fs_pd.h
new file mode 100644
index 000000000..8def56ec0
--- /dev/null
+++ b/arch/powerpc/include/asm/fs_pd.h
@@ -0,0 +1,49 @@
+/*
+ * Platform information definitions.
+ *
+ * 2006 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef FS_PD_H
+#define FS_PD_H
+#include <sysdev/fsl_soc.h>
+#include <asm/time.h>
+
+#ifdef CONFIG_CPM2
+#include <asm/cpm2.h>
+
+#if defined(CONFIG_8260)
+#include <asm/mpc8260.h>
+#endif
+
+#define cpm2_map(member) (&cpm2_immr->member)
+#define cpm2_map_size(member, size) (&cpm2_immr->member)
+#define cpm2_unmap(addr) do {} while(0)
+#endif
+
+#ifdef CONFIG_PPC_8xx
+#include <asm/8xx_immap.h>
+
+extern immap_t __iomem *mpc8xx_immr;
+
+#define immr_map(member) (&mpc8xx_immr->member)
+#define immr_map_size(member, size) (&mpc8xx_immr->member)
+#define immr_unmap(addr) do {} while (0)
+#endif
+
+static inline int uart_baudrate(void)
+{
+ return get_baudrate();
+}
+
+static inline int uart_clock(void)
+{
+ return ppc_proc_freq;
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/fsl_gtm.h b/arch/powerpc/include/asm/fsl_gtm.h
new file mode 100644
index 000000000..6ff687650
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_gtm.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Freescale General-purpose Timers Module
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc.
+ * Shlomi Gridish <gridish@freescale.com>
+ * Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#ifndef __ASM_FSL_GTM_H
+#define __ASM_FSL_GTM_H
+
+#include <linux/types.h>
+
+struct gtm;
+
+struct gtm_timer {
+ unsigned int irq;
+
+ struct gtm *gtm;
+ bool requested;
+ u8 __iomem *gtcfr;
+ __be16 __iomem *gtmdr;
+ __be16 __iomem *gtpsr;
+ __be16 __iomem *gtcnr;
+ __be16 __iomem *gtrfr;
+ __be16 __iomem *gtevr;
+};
+
+extern struct gtm_timer *gtm_get_timer16(void);
+extern struct gtm_timer *gtm_get_specific_timer16(struct gtm *gtm,
+ unsigned int timer);
+extern void gtm_put_timer16(struct gtm_timer *tmr);
+extern int gtm_set_timer16(struct gtm_timer *tmr, unsigned long usec,
+ bool reload);
+extern int gtm_set_exact_timer16(struct gtm_timer *tmr, u16 usec,
+ bool reload);
+extern void gtm_stop_timer16(struct gtm_timer *tmr);
+extern void gtm_ack_timer16(struct gtm_timer *tmr, u16 events);
+
+#endif /* __ASM_FSL_GTM_H */
diff --git a/arch/powerpc/include/asm/fsl_hcalls.h b/arch/powerpc/include/asm/fsl_hcalls.h
new file mode 100644
index 000000000..b889d1354
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_hcalls.h
@@ -0,0 +1,655 @@
+/*
+ * Freescale hypervisor call interface
+ *
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * This file is provided under a dual BSD/GPL license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FSL_HCALLS_H
+#define _FSL_HCALLS_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/byteorder.h>
+#include <asm/epapr_hcalls.h>
+
+#define FH_API_VERSION 1
+
+#define FH_ERR_GET_INFO 1
+#define FH_PARTITION_GET_DTPROP 2
+#define FH_PARTITION_SET_DTPROP 3
+#define FH_PARTITION_RESTART 4
+#define FH_PARTITION_GET_STATUS 5
+#define FH_PARTITION_START 6
+#define FH_PARTITION_STOP 7
+#define FH_PARTITION_MEMCPY 8
+#define FH_DMA_ENABLE 9
+#define FH_DMA_DISABLE 10
+#define FH_SEND_NMI 11
+#define FH_VMPIC_GET_MSIR 12
+#define FH_SYSTEM_RESET 13
+#define FH_GET_CORE_STATE 14
+#define FH_ENTER_NAP 15
+#define FH_EXIT_NAP 16
+#define FH_CLAIM_DEVICE 17
+#define FH_PARTITION_STOP_DMA 18
+
+/* vendor ID: Freescale Semiconductor */
+#define FH_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_FSL_VENDOR_ID, num)
+
+/*
+ * We use "uintptr_t" to define a register because it's guaranteed to be a
+ * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
+ * platform.
+ *
+ * All registers are either input/output or output only. Registers that are
+ * initialized before making the hypercall are input/output. All
+ * input/output registers are represented with "+r". Output-only registers
+ * are represented with "=r". Do not specify any unused registers. The
+ * clobber list will tell the compiler that the hypercall modifies those
+ * registers, which is good enough.
+ */
+
+/**
+ * fh_send_nmi - send NMI to virtual cpu(s).
+ * @vcpu_mask: send NMI to virtual cpu(s) specified by this mask.
+ *
+ * Returns 0 for success, or EINVAL for invalid vcpu_mask.
+ */
+static inline unsigned int fh_send_nmi(unsigned int vcpu_mask)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_SEND_NMI);
+ r3 = vcpu_mask;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/* Arbitrary limits to avoid excessive memory allocation in hypervisor */
+#define FH_DTPROP_MAX_PATHLEN 4096
+#define FH_DTPROP_MAX_PROPLEN 32768
+
+/**
+ * fh_partition_get_dtprop - get a property from a guest device tree.
+ * @handle: handle of partition whose device tree is to be accessed
+ * @dtpath_addr: physical address of device tree path to access
+ * @propname_addr: physical address of name of property
+ * @propvalue_addr: physical address of property value buffer
+ * @propvalue_len: length of buffer on entry, length of property on return
+ *
+ * Returns zero on success, non-zero on error.
+ */
+static inline unsigned int fh_partition_get_dtprop(int handle,
+ uint64_t dtpath_addr,
+ uint64_t propname_addr,
+ uint64_t propvalue_addr,
+ uint32_t *propvalue_len)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r8 __asm__("r8");
+ register uintptr_t r9 __asm__("r9");
+ register uintptr_t r10 __asm__("r10");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_DTPROP);
+ r3 = handle;
+
+#ifdef CONFIG_PHYS_64BIT
+ r4 = dtpath_addr >> 32;
+ r6 = propname_addr >> 32;
+ r8 = propvalue_addr >> 32;
+#else
+ r4 = 0;
+ r6 = 0;
+ r8 = 0;
+#endif
+ r5 = (uint32_t)dtpath_addr;
+ r7 = (uint32_t)propname_addr;
+ r9 = (uint32_t)propvalue_addr;
+ r10 = *propvalue_len;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11),
+ "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
+ "+r" (r8), "+r" (r9), "+r" (r10)
+ : : EV_HCALL_CLOBBERS8
+ );
+
+ *propvalue_len = r4;
+ return r3;
+}
+
+/**
+ * Set a property in a guest device tree.
+ * @handle: handle of partition whose device tree is to be accessed
+ * @dtpath_addr: physical address of device tree path to access
+ * @propname_addr: physical address of name of property
+ * @propvalue_addr: physical address of property value
+ * @propvalue_len: length of property
+ *
+ * Returns zero on success, non-zero on error.
+ */
+static inline unsigned int fh_partition_set_dtprop(int handle,
+ uint64_t dtpath_addr,
+ uint64_t propname_addr,
+ uint64_t propvalue_addr,
+ uint32_t propvalue_len)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r8 __asm__("r8");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r9 __asm__("r9");
+ register uintptr_t r10 __asm__("r10");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_SET_DTPROP);
+ r3 = handle;
+
+#ifdef CONFIG_PHYS_64BIT
+ r4 = dtpath_addr >> 32;
+ r6 = propname_addr >> 32;
+ r8 = propvalue_addr >> 32;
+#else
+ r4 = 0;
+ r6 = 0;
+ r8 = 0;
+#endif
+ r5 = (uint32_t)dtpath_addr;
+ r7 = (uint32_t)propname_addr;
+ r9 = (uint32_t)propvalue_addr;
+ r10 = propvalue_len;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11),
+ "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
+ "+r" (r8), "+r" (r9), "+r" (r10)
+ : : EV_HCALL_CLOBBERS8
+ );
+
+ return r3;
+}
+
+/**
+ * fh_partition_restart - reboot the current partition
+ * @partition: partition ID
+ *
+ * Returns an error code if reboot failed. Does not return if it succeeds.
+ */
+static inline unsigned int fh_partition_restart(unsigned int partition)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_RESTART);
+ r3 = partition;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+#define FH_PARTITION_STOPPED 0
+#define FH_PARTITION_RUNNING 1
+#define FH_PARTITION_STARTING 2
+#define FH_PARTITION_STOPPING 3
+#define FH_PARTITION_PAUSING 4
+#define FH_PARTITION_PAUSED 5
+#define FH_PARTITION_RESUMING 6
+
+/**
+ * fh_partition_get_status - gets the status of a partition
+ * @partition: partition ID
+ * @status: returned status code
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_get_status(unsigned int partition,
+ unsigned int *status)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_STATUS);
+ r3 = partition;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *status = r4;
+
+ return r3;
+}
+
+/**
+ * fh_partition_start - boots and starts execution of the specified partition
+ * @partition: partition ID
+ * @entry_point: guest physical address to start execution
+ *
+ * The hypervisor creates a 1-to-1 virtual/physical IMA mapping, so at boot
+ * time, guest physical address are the same as guest virtual addresses.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_start(unsigned int partition,
+ uint32_t entry_point, int load)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_START);
+ r3 = partition;
+ r4 = entry_point;
+ r5 = load;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5)
+ : : EV_HCALL_CLOBBERS3
+ );
+
+ return r3;
+}
+
+/**
+ * fh_partition_stop - stops another partition
+ * @partition: partition ID
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_stop(unsigned int partition)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP);
+ r3 = partition;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * struct fh_sg_list: definition of the fh_partition_memcpy S/G list
+ * @source: guest physical address to copy from
+ * @target: guest physical address to copy to
+ * @size: number of bytes to copy
+ * @reserved: reserved, must be zero
+ *
+ * The scatter/gather list for fh_partition_memcpy() is an array of these
+ * structures. The array must be guest physically contiguous.
+ *
+ * This structure must be aligned on 32-byte boundary, so that no single
+ * strucuture can span two pages.
+ */
+struct fh_sg_list {
+ uint64_t source; /**< guest physical address to copy from */
+ uint64_t target; /**< guest physical address to copy to */
+ uint64_t size; /**< number of bytes to copy */
+ uint64_t reserved; /**< reserved, must be zero */
+} __attribute__ ((aligned(32)));
+
+/**
+ * fh_partition_memcpy - copies data from one guest to another
+ * @source: the ID of the partition to copy from
+ * @target: the ID of the partition to copy to
+ * @sg_list: guest physical address of an array of &fh_sg_list structures
+ * @count: the number of entries in @sg_list
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_memcpy(unsigned int source,
+ unsigned int target, phys_addr_t sg_list, unsigned int count)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_MEMCPY);
+ r3 = source;
+ r4 = target;
+ r5 = (uint32_t) sg_list;
+
+#ifdef CONFIG_PHYS_64BIT
+ r6 = sg_list >> 32;
+#else
+ r6 = 0;
+#endif
+ r7 = count;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11),
+ "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7)
+ : : EV_HCALL_CLOBBERS5
+ );
+
+ return r3;
+}
+
+/**
+ * fh_dma_enable - enable DMA for the specified device
+ * @liodn: the LIODN of the I/O device for which to enable DMA
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_dma_enable(unsigned int liodn)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_DMA_ENABLE);
+ r3 = liodn;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * fh_dma_disable - disable DMA for the specified device
+ * @liodn: the LIODN of the I/O device for which to disable DMA
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_dma_disable(unsigned int liodn)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_DMA_DISABLE);
+ r3 = liodn;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+
+/**
+ * fh_vmpic_get_msir - returns the MPIC-MSI register value
+ * @interrupt: the interrupt number
+ * @msir_val: returned MPIC-MSI register value
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_vmpic_get_msir(unsigned int interrupt,
+ unsigned int *msir_val)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_VMPIC_GET_MSIR);
+ r3 = interrupt;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *msir_val = r4;
+
+ return r3;
+}
+
+/**
+ * fh_system_reset - reset the system
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_system_reset(void)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_SYSTEM_RESET);
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "=r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+
+/**
+ * fh_err_get_info - get platform error information
+ * @queue id:
+ * 0 for guest error event queue
+ * 1 for global error event queue
+ *
+ * @pointer to store the platform error data:
+ * platform error data is returned in registers r4 - r11
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_err_get_info(int queue, uint32_t *bufsize,
+ uint32_t addr_hi, uint32_t addr_lo, int peek)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+
+ r11 = FH_HCALL_TOKEN(FH_ERR_GET_INFO);
+ r3 = queue;
+ r4 = *bufsize;
+ r5 = addr_hi;
+ r6 = addr_lo;
+ r7 = peek;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6),
+ "+r" (r7)
+ : : EV_HCALL_CLOBBERS5
+ );
+
+ *bufsize = r4;
+
+ return r3;
+}
+
+
+#define FH_VCPU_RUN 0
+#define FH_VCPU_IDLE 1
+#define FH_VCPU_NAP 2
+
+/**
+ * fh_get_core_state - get the state of a vcpu
+ *
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ * @state:the current state of the vcpu, see FH_VCPU_*
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_get_core_state(unsigned int handle,
+ unsigned int vcpu, unsigned int *state)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_GET_CORE_STATE);
+ r3 = handle;
+ r4 = vcpu;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *state = r4;
+ return r3;
+}
+
+/**
+ * fh_enter_nap - enter nap on a vcpu
+ *
+ * Note that though the API supports entering nap on a vcpu other
+ * than the caller, this may not be implmented and may return EINVAL.
+ *
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_enter_nap(unsigned int handle, unsigned int vcpu)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_ENTER_NAP);
+ r3 = handle;
+ r4 = vcpu;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ return r3;
+}
+
+/**
+ * fh_exit_nap - exit nap on a vcpu
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_exit_nap(unsigned int handle, unsigned int vcpu)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_EXIT_NAP);
+ r3 = handle;
+ r4 = vcpu;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ return r3;
+}
+/**
+ * fh_claim_device - claim a "claimable" shared device
+ * @handle: fsl,hv-device-handle of node to claim
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_claim_device(unsigned int handle)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_CLAIM_DEVICE);
+ r3 = handle;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * Run deferred DMA disabling on a partition's private devices
+ *
+ * This applies to devices which a partition owns either privately,
+ * or which are claimable and still actively owned by that partition,
+ * and which do not have the no-dma-disable property.
+ *
+ * @handle: partition (must be stopped) whose DMA is to be disabled
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_stop_dma(unsigned int handle)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP_DMA);
+ r3 = handle;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+#endif
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
new file mode 100644
index 000000000..c4af5ee71
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Freescale Local Bus Controller
+ *
+ * Copyright © 2006-2007, 2010 Freescale Semiconductor
+ *
+ * Authors: Nick Spence <nick.spence@freescale.com>,
+ * Scott Wood <scottwood@freescale.com>
+ * Jack Lan <jack.lan@freescale.com>
+ */
+
+#ifndef __ASM_FSL_LBC_H
+#define __ASM_FSL_LBC_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+
+struct fsl_lbc_bank {
+ __be32 br; /**< Base Register */
+#define BR_BA 0xFFFF8000
+#define BR_BA_SHIFT 15
+#define BR_PS 0x00001800
+#define BR_PS_SHIFT 11
+#define BR_PS_8 0x00000800 /* Port Size 8 bit */
+#define BR_PS_16 0x00001000 /* Port Size 16 bit */
+#define BR_PS_32 0x00001800 /* Port Size 32 bit */
+#define BR_DECC 0x00000600
+#define BR_DECC_SHIFT 9
+#define BR_DECC_OFF 0x00000000 /* HW ECC checking and generation off */
+#define BR_DECC_CHK 0x00000200 /* HW ECC checking on, generation off */
+#define BR_DECC_CHK_GEN 0x00000400 /* HW ECC checking and generation on */
+#define BR_WP 0x00000100
+#define BR_WP_SHIFT 8
+#define BR_MSEL 0x000000E0
+#define BR_MSEL_SHIFT 5
+#define BR_MS_GPCM 0x00000000 /* GPCM */
+#define BR_MS_FCM 0x00000020 /* FCM */
+#define BR_MS_SDRAM 0x00000060 /* SDRAM */
+#define BR_MS_UPMA 0x00000080 /* UPMA */
+#define BR_MS_UPMB 0x000000A0 /* UPMB */
+#define BR_MS_UPMC 0x000000C0 /* UPMC */
+#define BR_V 0x00000001
+#define BR_V_SHIFT 0
+#define BR_RES ~(BR_BA|BR_PS|BR_DECC|BR_WP|BR_MSEL|BR_V)
+
+ __be32 or; /**< Base Register */
+#define OR0 0x5004
+#define OR1 0x500C
+#define OR2 0x5014
+#define OR3 0x501C
+#define OR4 0x5024
+#define OR5 0x502C
+#define OR6 0x5034
+#define OR7 0x503C
+
+#define OR_FCM_AM 0xFFFF8000
+#define OR_FCM_AM_SHIFT 15
+#define OR_FCM_BCTLD 0x00001000
+#define OR_FCM_BCTLD_SHIFT 12
+#define OR_FCM_PGS 0x00000400
+#define OR_FCM_PGS_SHIFT 10
+#define OR_FCM_CSCT 0x00000200
+#define OR_FCM_CSCT_SHIFT 9
+#define OR_FCM_CST 0x00000100
+#define OR_FCM_CST_SHIFT 8
+#define OR_FCM_CHT 0x00000080
+#define OR_FCM_CHT_SHIFT 7
+#define OR_FCM_SCY 0x00000070
+#define OR_FCM_SCY_SHIFT 4
+#define OR_FCM_SCY_1 0x00000010
+#define OR_FCM_SCY_2 0x00000020
+#define OR_FCM_SCY_3 0x00000030
+#define OR_FCM_SCY_4 0x00000040
+#define OR_FCM_SCY_5 0x00000050
+#define OR_FCM_SCY_6 0x00000060
+#define OR_FCM_SCY_7 0x00000070
+#define OR_FCM_RST 0x00000008
+#define OR_FCM_RST_SHIFT 3
+#define OR_FCM_TRLX 0x00000004
+#define OR_FCM_TRLX_SHIFT 2
+#define OR_FCM_EHTR 0x00000002
+#define OR_FCM_EHTR_SHIFT 1
+
+#define OR_GPCM_AM 0xFFFF8000
+#define OR_GPCM_AM_SHIFT 15
+};
+
+struct fsl_lbc_regs {
+ struct fsl_lbc_bank bank[12];
+ u8 res0[0x8];
+ __be32 mar; /**< UPM Address Register */
+ u8 res1[0x4];
+ __be32 mamr; /**< UPMA Mode Register */
+#define MxMR_OP_NO (0 << 28) /**< normal operation */
+#define MxMR_OP_WA (1 << 28) /**< write array */
+#define MxMR_OP_RA (2 << 28) /**< read array */
+#define MxMR_OP_RP (3 << 28) /**< run pattern */
+#define MxMR_MAD 0x3f /**< machine address */
+ __be32 mbmr; /**< UPMB Mode Register */
+ __be32 mcmr; /**< UPMC Mode Register */
+ u8 res2[0x8];
+ __be32 mrtpr; /**< Memory Refresh Timer Prescaler Register */
+ __be32 mdr; /**< UPM Data Register */
+ u8 res3[0x4];
+ __be32 lsor; /**< Special Operation Initiation Register */
+ __be32 lsdmr; /**< SDRAM Mode Register */
+ u8 res4[0x8];
+ __be32 lurt; /**< UPM Refresh Timer */
+ __be32 lsrt; /**< SDRAM Refresh Timer */
+ u8 res5[0x8];
+ __be32 ltesr; /**< Transfer Error Status Register */
+#define LTESR_BM 0x80000000
+#define LTESR_FCT 0x40000000
+#define LTESR_PAR 0x20000000
+#define LTESR_WP 0x04000000
+#define LTESR_ATMW 0x00800000
+#define LTESR_ATMR 0x00400000
+#define LTESR_CS 0x00080000
+#define LTESR_UPM 0x00000002
+#define LTESR_CC 0x00000001
+#define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC)
+#define LTESR_MASK (LTESR_BM | LTESR_FCT | LTESR_PAR | LTESR_WP \
+ | LTESR_ATMW | LTESR_ATMR | LTESR_CS | LTESR_UPM \
+ | LTESR_CC)
+#define LTESR_CLEAR 0xFFFFFFFF
+#define LTECCR_CLEAR 0xFFFFFFFF
+#define LTESR_STATUS LTESR_MASK
+#define LTEIR_ENABLE LTESR_MASK
+#define LTEDR_ENABLE 0x00000000
+ __be32 ltedr; /**< Transfer Error Disable Register */
+ __be32 lteir; /**< Transfer Error Interrupt Register */
+ __be32 lteatr; /**< Transfer Error Attributes Register */
+ __be32 ltear; /**< Transfer Error Address Register */
+ __be32 lteccr; /**< Transfer Error ECC Register */
+ u8 res6[0x8];
+ __be32 lbcr; /**< Configuration Register */
+#define LBCR_LDIS 0x80000000
+#define LBCR_LDIS_SHIFT 31
+#define LBCR_BCTLC 0x00C00000
+#define LBCR_BCTLC_SHIFT 22
+#define LBCR_AHD 0x00200000
+#define LBCR_LPBSE 0x00020000
+#define LBCR_LPBSE_SHIFT 17
+#define LBCR_EPAR 0x00010000
+#define LBCR_EPAR_SHIFT 16
+#define LBCR_BMT 0x0000FF00
+#define LBCR_BMT_SHIFT 8
+#define LBCR_BMTPS 0x0000000F
+#define LBCR_BMTPS_SHIFT 0
+#define LBCR_INIT 0x00040000
+ __be32 lcrr; /**< Clock Ratio Register */
+#define LCRR_DBYP 0x80000000
+#define LCRR_DBYP_SHIFT 31
+#define LCRR_BUFCMDC 0x30000000
+#define LCRR_BUFCMDC_SHIFT 28
+#define LCRR_ECL 0x03000000
+#define LCRR_ECL_SHIFT 24
+#define LCRR_EADC 0x00030000
+#define LCRR_EADC_SHIFT 16
+#define LCRR_CLKDIV 0x0000000F
+#define LCRR_CLKDIV_SHIFT 0
+ u8 res7[0x8];
+ __be32 fmr; /**< Flash Mode Register */
+#define FMR_CWTO 0x0000F000
+#define FMR_CWTO_SHIFT 12
+#define FMR_BOOT 0x00000800
+#define FMR_ECCM 0x00000100
+#define FMR_AL 0x00000030
+#define FMR_AL_SHIFT 4
+#define FMR_OP 0x00000003
+#define FMR_OP_SHIFT 0
+ __be32 fir; /**< Flash Instruction Register */
+#define FIR_OP0 0xF0000000
+#define FIR_OP0_SHIFT 28
+#define FIR_OP1 0x0F000000
+#define FIR_OP1_SHIFT 24
+#define FIR_OP2 0x00F00000
+#define FIR_OP2_SHIFT 20
+#define FIR_OP3 0x000F0000
+#define FIR_OP3_SHIFT 16
+#define FIR_OP4 0x0000F000
+#define FIR_OP4_SHIFT 12
+#define FIR_OP5 0x00000F00
+#define FIR_OP5_SHIFT 8
+#define FIR_OP6 0x000000F0
+#define FIR_OP6_SHIFT 4
+#define FIR_OP7 0x0000000F
+#define FIR_OP7_SHIFT 0
+#define FIR_OP_NOP 0x0 /* No operation and end of sequence */
+#define FIR_OP_CA 0x1 /* Issue current column address */
+#define FIR_OP_PA 0x2 /* Issue current block+page address */
+#define FIR_OP_UA 0x3 /* Issue user defined address */
+#define FIR_OP_CM0 0x4 /* Issue command from FCR[CMD0] */
+#define FIR_OP_CM1 0x5 /* Issue command from FCR[CMD1] */
+#define FIR_OP_CM2 0x6 /* Issue command from FCR[CMD2] */
+#define FIR_OP_CM3 0x7 /* Issue command from FCR[CMD3] */
+#define FIR_OP_WB 0x8 /* Write FBCR bytes from FCM buffer */
+#define FIR_OP_WS 0x9 /* Write 1 or 2 bytes from MDR[AS] */
+#define FIR_OP_RB 0xA /* Read FBCR bytes to FCM buffer */
+#define FIR_OP_RS 0xB /* Read 1 or 2 bytes to MDR[AS] */
+#define FIR_OP_CW0 0xC /* Wait then issue FCR[CMD0] */
+#define FIR_OP_CW1 0xD /* Wait then issue FCR[CMD1] */
+#define FIR_OP_RBW 0xE /* Wait then read FBCR bytes */
+#define FIR_OP_RSW 0xE /* Wait then read 1 or 2 bytes */
+ __be32 fcr; /**< Flash Command Register */
+#define FCR_CMD0 0xFF000000
+#define FCR_CMD0_SHIFT 24
+#define FCR_CMD1 0x00FF0000
+#define FCR_CMD1_SHIFT 16
+#define FCR_CMD2 0x0000FF00
+#define FCR_CMD2_SHIFT 8
+#define FCR_CMD3 0x000000FF
+#define FCR_CMD3_SHIFT 0
+ __be32 fbar; /**< Flash Block Address Register */
+#define FBAR_BLK 0x00FFFFFF
+ __be32 fpar; /**< Flash Page Address Register */
+#define FPAR_SP_PI 0x00007C00
+#define FPAR_SP_PI_SHIFT 10
+#define FPAR_SP_MS 0x00000200
+#define FPAR_SP_CI 0x000001FF
+#define FPAR_SP_CI_SHIFT 0
+#define FPAR_LP_PI 0x0003F000
+#define FPAR_LP_PI_SHIFT 12
+#define FPAR_LP_MS 0x00000800
+#define FPAR_LP_CI 0x000007FF
+#define FPAR_LP_CI_SHIFT 0
+ __be32 fbcr; /**< Flash Byte Count Register */
+#define FBCR_BC 0x00000FFF
+};
+
+/*
+ * FSL UPM routines
+ */
+struct fsl_upm {
+ __be32 __iomem *mxmr;
+ int width;
+};
+
+extern u32 fsl_lbc_addr(phys_addr_t addr_base);
+extern int fsl_lbc_find(phys_addr_t addr_base);
+extern int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm);
+
+/**
+ * fsl_upm_start_pattern - start UPM patterns execution
+ * @upm: pointer to the fsl_upm structure obtained via fsl_upm_find
+ * @pat_offset: UPM pattern offset for the command to be executed
+ *
+ * This routine programmes UPM so the next memory access that hits an UPM
+ * will trigger pattern execution, starting at pat_offset.
+ */
+static inline void fsl_upm_start_pattern(struct fsl_upm *upm, u8 pat_offset)
+{
+ clrsetbits_be32(upm->mxmr, MxMR_MAD, MxMR_OP_RP | pat_offset);
+}
+
+/**
+ * fsl_upm_end_pattern - end UPM patterns execution
+ * @upm: pointer to the fsl_upm structure obtained via fsl_upm_find
+ *
+ * This routine reverts UPM to normal operation mode.
+ */
+static inline void fsl_upm_end_pattern(struct fsl_upm *upm)
+{
+ clrbits32(upm->mxmr, MxMR_OP_RP);
+
+ while (in_be32(upm->mxmr) & MxMR_OP_RP)
+ cpu_relax();
+}
+
+/* overview of the fsl lbc controller */
+
+struct fsl_lbc_ctrl {
+ /* device info */
+ struct device *dev;
+ struct fsl_lbc_regs __iomem *regs;
+ int irq[2];
+ wait_queue_head_t irq_wait;
+ spinlock_t lock;
+ void *nand;
+
+ /* status read from LTESR by irq handler */
+ unsigned int irq_status;
+
+#ifdef CONFIG_SUSPEND
+ /* save regs when system go to deep-sleep */
+ struct fsl_lbc_regs *saved_regs;
+#endif
+};
+
+extern int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base,
+ u32 mar);
+extern struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev;
+
+#endif /* __ASM_FSL_LBC_H */
diff --git a/arch/powerpc/include/asm/fsl_pamu_stash.h b/arch/powerpc/include/asm/fsl_pamu_stash.h
new file mode 100644
index 000000000..c0fbadb70
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_pamu_stash.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ */
+
+#ifndef __FSL_PAMU_STASH_H
+#define __FSL_PAMU_STASH_H
+
+struct iommu_domain;
+
+/* cache stash targets */
+enum pamu_stash_target {
+ PAMU_ATTR_CACHE_L1 = 1,
+ PAMU_ATTR_CACHE_L2,
+ PAMU_ATTR_CACHE_L3,
+};
+
+int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu);
+
+#endif /* __FSL_PAMU_STASH_H */
diff --git a/arch/powerpc/include/asm/fsl_pm.h b/arch/powerpc/include/asm/fsl_pm.h
new file mode 100644
index 000000000..61a4c9773
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_pm.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Support Power Management
+ *
+ * Copyright 2014-2015 Freescale Semiconductor Inc.
+ */
+#ifndef __PPC_FSL_PM_H
+#define __PPC_FSL_PM_H
+
+#define E500_PM_PH10 1
+#define E500_PM_PH15 2
+#define E500_PM_PH20 3
+#define E500_PM_PH30 4
+#define E500_PM_DOZE E500_PM_PH10
+#define E500_PM_NAP E500_PM_PH15
+
+#define PLAT_PM_SLEEP 20
+#define PLAT_PM_LPM20 30
+
+#define FSL_PM_SLEEP (1 << 0)
+#define FSL_PM_DEEP_SLEEP (1 << 1)
+
+struct fsl_pm_ops {
+ /* mask pending interrupts to the RCPM from MPIC */
+ void (*irq_mask)(int cpu);
+
+ /* unmask pending interrupts to the RCPM from MPIC */
+ void (*irq_unmask)(int cpu);
+ void (*cpu_enter_state)(int cpu, int state);
+ void (*cpu_exit_state)(int cpu, int state);
+ void (*cpu_up_prepare)(int cpu);
+ void (*cpu_die)(int cpu);
+ int (*plat_enter_sleep)(void);
+ void (*freeze_time_base)(bool freeze);
+
+ /* keep the power of IP blocks during sleep/deep sleep */
+ void (*set_ip_power)(bool enable, u32 mask);
+
+ /* get platform supported power management modes */
+ unsigned int (*get_pm_modes)(void);
+};
+
+extern const struct fsl_pm_ops *qoriq_pm_ops;
+
+int __init fsl_rcpm_init(void);
+
+#endif /* __PPC_FSL_PM_H */
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
new file mode 100644
index 000000000..e3d1f377b
--- /dev/null
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_FTRACE
+#define _ASM_POWERPC_FTRACE
+
+#include <asm/types.h>
+
+#ifdef CONFIG_FUNCTION_TRACER
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
+#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
+
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+
+#ifndef __ASSEMBLY__
+extern void _mcount(void);
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ /* relocation of mcount call site is the same as the address */
+ return addr;
+}
+
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
+ unsigned long sp);
+
+struct dyn_arch_ftrace {
+ struct module *mod;
+};
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
+struct ftrace_regs {
+ struct pt_regs regs;
+};
+
+static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
+{
+ /* We clear regs.msr in ftrace_call */
+ return fregs->regs.msr ? &fregs->regs : NULL;
+}
+
+static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs,
+ unsigned long ip)
+{
+ regs_set_return_ip(&fregs->regs, ip);
+}
+
+struct ftrace_ops;
+
+#define ftrace_graph_func ftrace_graph_func
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
+#endif
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_FTRACE_SYSCALLS
+/*
+ * Some syscall entry functions on powerpc start with "ppc_" (fork and clone,
+ * for instance) or ppc32_/ppc64_. We should also match the sys_ variant with
+ * those.
+ */
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
+{
+ return !strcmp(sym, name) ||
+ (!strncmp(sym, "__se_sys", 8) && !strcmp(sym + 5, name)) ||
+ (!strncmp(sym, "ppc_", 4) && !strcmp(sym + 4, name + 4)) ||
+ (!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
+ (!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
+}
+#endif /* CONFIG_FTRACE_SYSCALLS */
+
+#if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER)
+#include <asm/paca.h>
+
+static inline void this_cpu_disable_ftrace(void)
+{
+ get_paca()->ftrace_enabled = 0;
+}
+
+static inline void this_cpu_enable_ftrace(void)
+{
+ get_paca()->ftrace_enabled = 1;
+}
+
+/* Disable ftrace on this CPU if possible (may not be implemented) */
+static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled)
+{
+ get_paca()->ftrace_enabled = ftrace_enabled;
+}
+
+static inline u8 this_cpu_get_ftrace_enabled(void)
+{
+ return get_paca()->ftrace_enabled;
+}
+
+void ftrace_free_init_tramp(void);
+#else /* CONFIG_PPC64 */
+static inline void this_cpu_disable_ftrace(void) { }
+static inline void this_cpu_enable_ftrace(void) { }
+static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { }
+static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
+static inline void ftrace_free_init_tramp(void) { }
+#endif /* CONFIG_PPC64 */
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_FTRACE */
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
new file mode 100644
index 000000000..b3001f8b2
--- /dev/null
+++ b/arch/powerpc/include/asm/futex.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_FUTEX_H
+#define _ASM_POWERPC_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+#include <asm/synch.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+ __asm__ __volatile ( \
+ PPC_ATOMIC_ENTRY_BARRIER \
+"1: lwarx %0,0,%2\n" \
+ insn \
+"2: stwcx. %1,0,%2\n" \
+ "bne- 1b\n" \
+ PPC_ATOMIC_EXIT_BARRIER \
+ "li %1,0\n" \
+"3: .section .fixup,\"ax\"\n" \
+"4: li %1,%3\n" \
+ "b 3b\n" \
+ ".previous\n" \
+ EX_TABLE(1b, 4b) \
+ EX_TABLE(2b, 4b) \
+ : "=&r" (oldval), "=&r" (ret) \
+ : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
+ : "cr0", "memory")
+
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
+{
+ int oldval = 0, ret;
+
+ if (!user_access_begin(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mr %1,%4\n", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %1,%0,%4\n", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %1,%0,%4\n", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("andc %1,%0,%4\n", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %1,%0,%4\n", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+ user_access_end();
+
+ *oval = oldval;
+
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 prev;
+
+ if (!user_access_begin(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __asm__ __volatile__ (
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
+ cmpw 0,%1,%4\n\
+ bne- 3f\n"
+"2: stwcx. %5,0,%3\n\
+ bne- 1b\n"
+ PPC_ATOMIC_EXIT_BARRIER
+"3: .section .fixup,\"ax\"\n\
+4: li %0,%6\n\
+ b 3b\n\
+ .previous\n"
+ EX_TABLE(1b, 4b)
+ EX_TABLE(2b, 4b)
+ : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
+ : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
+ : "cc", "memory");
+
+ user_access_end();
+
+ *uval = prev;
+
+ return ret;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_FUTEX_H */
diff --git a/arch/powerpc/include/asm/grackle.h b/arch/powerpc/include/asm/grackle.h
new file mode 100644
index 000000000..7376e3fa1
--- /dev/null
+++ b/arch/powerpc/include/asm/grackle.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_GRACKLE_H
+#define _ASM_POWERPC_GRACKLE_H
+#ifdef __KERNEL__
+/*
+ * Functions for setting up and using a MPC106 northbridge
+ */
+
+#include <asm/pci-bridge.h>
+
+extern void setup_grackle(struct pci_controller *hose);
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_GRACKLE_H */
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
new file mode 100644
index 000000000..f133b5930
--- /dev/null
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_HARDIRQ_H
+#define _ASM_POWERPC_HARDIRQ_H
+
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+typedef struct {
+ unsigned int __softirq_pending;
+ unsigned int timer_irqs_event;
+ unsigned int broadcast_irqs_event;
+ unsigned int timer_irqs_others;
+ unsigned int pmu_irqs;
+ unsigned int mce_exceptions;
+ unsigned int spurious_irqs;
+ unsigned int sreset_irqs;
+#ifdef CONFIG_PPC_WATCHDOG
+ unsigned int soft_nmi_irqs;
+#endif
+#ifdef CONFIG_PPC_DOORBELL
+ unsigned int doorbell_irqs;
+#endif
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+
+#define __ARCH_IRQ_STAT
+#define __ARCH_IRQ_EXIT_IRQS_DISABLED
+
+static inline void ack_bad_irq(unsigned int irq)
+{
+ printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
+}
+
+extern u64 arch_irq_stat_cpu(unsigned int cpu);
+#define arch_irq_stat_cpu arch_irq_stat_cpu
+
+#endif /* _ASM_POWERPC_HARDIRQ_H */
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
new file mode 100644
index 000000000..d73153b02
--- /dev/null
+++ b/arch/powerpc/include/asm/head-64.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_HEAD_64_H
+#define _ASM_POWERPC_HEAD_64_H
+
+#include <asm/cache.h>
+
+#ifdef __ASSEMBLY__
+/*
+ * We can't do CPP stringification and concatination directly into the section
+ * name for some reason, so these macros can do it for us.
+ */
+.macro define_ftsec name
+ .section ".head.text.\name\()","ax",@progbits
+.endm
+.macro define_data_ftsec name
+ .section ".head.data.\name\()","a",@progbits
+.endm
+.macro use_ftsec name
+ .section ".head.text.\name\()","ax",@progbits
+.endm
+
+/*
+ * Fixed (location) sections are used by opening fixed sections and emitting
+ * fixed section entries into them before closing them. Multiple fixed sections
+ * can be open at any time.
+ *
+ * Each fixed section created in a .S file must have corresponding linkage
+ * directives including location, added to arch/powerpc/kernel/vmlinux.lds.S
+ *
+ * For each fixed section, code is generated into it in the order which it
+ * appears in the source. Fixed section entries can be placed at a fixed
+ * location within the section using _LOCATION postifx variants. These must
+ * be ordered according to their relative placements within the section.
+ *
+ * OPEN_FIXED_SECTION(section_name, start_address, end_address)
+ * FIXED_SECTION_ENTRY_BEGIN(section_name, label1)
+ *
+ * USE_FIXED_SECTION(section_name)
+ * label3:
+ * li r10,128
+ * mv r11,r10
+
+ * FIXED_SECTION_ENTRY_BEGIN_LOCATION(section_name, label2, start_address, size)
+ * FIXED_SECTION_ENTRY_END_LOCATION(section_name, label2, start_address, size)
+ * CLOSE_FIXED_SECTION(section_name)
+ *
+ * ZERO_FIXED_SECTION can be used to emit zeroed data.
+ *
+ * Troubleshooting:
+ * - If the build dies with "Error: attempt to move .org backwards" at
+ * CLOSE_FIXED_SECTION() or elsewhere, there may be something
+ * unexpected being added there. Remove the '. = x_len' line, rebuild, and
+ * check what is pushing the section down.
+ * - If the build dies in linking, check arch/powerpc/tools/head_check.sh
+ * comments.
+ * - If the kernel crashes or hangs in very early boot, it could be linker
+ * stubs at the start of the main text.
+ */
+
+#define OPEN_FIXED_SECTION(sname, start, end) \
+ sname##_start = (start); \
+ sname##_end = (end); \
+ sname##_len = (end) - (start); \
+ define_ftsec sname; \
+ . = 0x0; \
+start_##sname:
+
+/*
+ * .linker_stub_catch section is used to catch linker stubs from being
+ * inserted in our .text section, above the start_text label (which breaks
+ * the ABS_ADDR calculation). See kernel/vmlinux.lds.S and tools/head_check.sh
+ * for more details. We would prefer to just keep a cacheline (0x80), but
+ * 0x100 seems to be how the linker aligns branch stub groups.
+ */
+#ifdef CONFIG_LD_HEAD_STUB_CATCH
+#define OPEN_TEXT_SECTION(start) \
+ .section ".linker_stub_catch","ax",@progbits; \
+linker_stub_catch: \
+ . = 0x4; \
+ text_start = (start) + 0x100; \
+ .section ".text","ax",@progbits; \
+ .balign 0x100; \
+start_text:
+#else
+#define OPEN_TEXT_SECTION(start) \
+ text_start = (start); \
+ .section ".text","ax",@progbits; \
+ . = 0x0; \
+start_text:
+#endif
+
+#define ZERO_FIXED_SECTION(sname, start, end) \
+ sname##_start = (start); \
+ sname##_end = (end); \
+ sname##_len = (end) - (start); \
+ define_data_ftsec sname; \
+ . = 0x0; \
+ . = sname##_len;
+
+#define USE_FIXED_SECTION(sname) \
+ use_ftsec sname;
+
+#define USE_TEXT_SECTION() \
+ .text
+
+#define CLOSE_FIXED_SECTION(sname) \
+ USE_FIXED_SECTION(sname); \
+ . = sname##_len; \
+end_##sname:
+
+
+#define __FIXED_SECTION_ENTRY_BEGIN(sname, name, __align) \
+ USE_FIXED_SECTION(sname); \
+ .balign __align; \
+ .global name; \
+name:
+
+#define FIXED_SECTION_ENTRY_BEGIN(sname, name) \
+ __FIXED_SECTION_ENTRY_BEGIN(sname, name, IFETCH_ALIGN_BYTES)
+
+#define FIXED_SECTION_ENTRY_BEGIN_LOCATION(sname, name, start, size) \
+ USE_FIXED_SECTION(sname); \
+ name##_start = (start); \
+ .if ((start) % (size) != 0); \
+ .error "Fixed section exception vector misalignment"; \
+ .endif; \
+ .if ((size) != 0x20) && ((size) != 0x80) && ((size) != 0x100) && ((size) != 0x1000); \
+ .error "Fixed section exception vector bad size"; \
+ .endif; \
+ .if (start) < sname##_start; \
+ .error "Fixed section underflow"; \
+ .abort; \
+ .endif; \
+ . = (start) - sname##_start; \
+ .global name; \
+name:
+
+#define FIXED_SECTION_ENTRY_END_LOCATION(sname, name, start, size) \
+ .if (start) + (size) > sname##_end; \
+ .error "Fixed section overflow"; \
+ .abort; \
+ .endif; \
+ .if (. - name > (start) + (size) - name##_start); \
+ .error "Fixed entry overflow"; \
+ .abort; \
+ .endif; \
+ . = ((start) + (size) - sname##_start); \
+
+
+/*
+ * These macros are used to change symbols in other fixed sections to be
+ * absolute or related to our current fixed section.
+ *
+ * - DEFINE_FIXED_SYMBOL / FIXED_SYMBOL_ABS_ADDR is used to find the
+ * absolute address of a symbol within a fixed section, from any section.
+ *
+ * - ABS_ADDR is used to find the absolute address of any symbol, from within
+ * a fixed section.
+ */
+// define label as being _in_ sname
+#define DEFINE_FIXED_SYMBOL(label, sname) \
+ label##_absolute = (label - start_ ## sname + sname ## _start)
+
+#define FIXED_SYMBOL_ABS_ADDR(label) \
+ (label##_absolute)
+
+// find label from _within_ sname
+#define ABS_ADDR(label, sname) (label - start_ ## sname + sname ## _start)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_HEAD_64_H */
diff --git a/arch/powerpc/include/asm/heathrow.h b/arch/powerpc/include/asm/heathrow.h
new file mode 100644
index 000000000..8bc5b1687
--- /dev/null
+++ b/arch/powerpc/include/asm/heathrow.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_HEATHROW_H
+#define _ASM_POWERPC_HEATHROW_H
+#ifdef __KERNEL__
+/*
+ * heathrow.h: definitions for using the "Heathrow" I/O controller chip.
+ *
+ * Grabbed from Open Firmware definitions on a PowerBook G3 Series
+ *
+ * Copyright (C) 1997 Paul Mackerras.
+ */
+
+/* Front light color on Yikes/B&W G3. 32 bits */
+#define HEATHROW_FRONT_LIGHT 0x32 /* (set to 0 or 0xffffffff) */
+
+/* Brightness/contrast (gossamer iMac ?). 8 bits */
+#define HEATHROW_BRIGHTNESS_CNTL 0x32
+#define HEATHROW_CONTRAST_CNTL 0x33
+
+/* offset from ohare base for feature control register */
+#define HEATHROW_MBCR 0x34 /* Media bay control */
+#define HEATHROW_FCR 0x38 /* Feature control */
+#define HEATHROW_AUX_CNTL_REG 0x3c /* Aux control */
+
+/*
+ * Bits in feature control register.
+ * Bits postfixed with a _N are in inverse logic
+ */
+#define HRW_SCC_TRANS_EN_N 0x00000001 /* Also controls modem power */
+#define HRW_BAY_POWER_N 0x00000002
+#define HRW_BAY_PCI_ENABLE 0x00000004
+#define HRW_BAY_IDE_ENABLE 0x00000008
+#define HRW_BAY_FLOPPY_ENABLE 0x00000010
+#define HRW_IDE0_ENABLE 0x00000020
+#define HRW_IDE0_RESET_N 0x00000040
+#define HRW_BAY_DEV_MASK 0x0000001c
+#define HRW_BAY_RESET_N 0x00000080
+#define HRW_IOBUS_ENABLE 0x00000100 /* Internal IDE ? */
+#define HRW_SCC_ENABLE 0x00000200
+#define HRW_MESH_ENABLE 0x00000400
+#define HRW_SWIM_ENABLE 0x00000800
+#define HRW_SOUND_POWER_N 0x00001000
+#define HRW_SOUND_CLK_ENABLE 0x00002000
+#define HRW_SCCA_IO 0x00004000
+#define HRW_SCCB_IO 0x00008000
+#define HRW_PORT_OR_DESK_VIA_N 0x00010000 /* This one is 0 on PowerBook */
+#define HRW_PWM_MON_ID_N 0x00020000 /* ??? (0) */
+#define HRW_HOOK_MB_CNT_N 0x00040000 /* ??? (0) */
+#define HRW_SWIM_CLONE_FLOPPY 0x00080000 /* ??? (0) */
+#define HRW_AUD_RUN22 0x00100000 /* ??? (1) */
+#define HRW_SCSI_LINK_MODE 0x00200000 /* Read ??? (1) */
+#define HRW_ARB_BYPASS 0x00400000 /* Disable internal PCI arbitrer */
+#define HRW_IDE1_RESET_N 0x00800000 /* Media bay */
+#define HRW_SLOW_SCC_PCLK 0x01000000 /* ??? (0) */
+#define HRW_RESET_SCC 0x02000000
+#define HRW_MFDC_CELL_ENABLE 0x04000000 /* ??? (0) */
+#define HRW_USE_MFDC 0x08000000 /* ??? (0) */
+#define HRW_BMAC_IO_ENABLE 0x60000000 /* two bits, not documented in OF */
+#define HRW_BMAC_RESET 0x80000000 /* not documented in OF */
+
+/* We OR those features at boot on desktop G3s */
+#define HRW_DEFAULTS (HRW_SCCA_IO | HRW_SCCB_IO | HRW_SCC_ENABLE)
+
+/* Looks like Heathrow has some sort of GPIOs as well... */
+#define HRW_GPIO_MODEM_RESET 0x6d
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_HEATHROW_H */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
new file mode 100644
index 000000000..c0fcd1bbd
--- /dev/null
+++ b/arch/powerpc/include/asm/highmem.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * highmem.h: virtual kernel memory mappings for high memory
+ *
+ * PowerPC version, stolen from the i386 version.
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual addresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ * Gerhard.Wichert@pdb.siemens.de
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * up to 16 Terrabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/interrupt.h>
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+#include <asm/fixmap.h>
+
+extern pte_t *pkmap_page_table;
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+/*
+ * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte
+ * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP
+ * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP
+ * in case of 16K/64K/256K page sizes.
+ */
+#ifdef CONFIG_PPC_4K_PAGES
+#define PKMAP_ORDER PTE_SHIFT
+#else
+#define PKMAP_ORDER 9
+#endif
+#define LAST_PKMAP (1 << PKMAP_ORDER)
+#ifndef CONFIG_PPC_4K_PAGES
+#define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
+#else
+#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
+#endif
+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define flush_cache_kmaps() flush_cache_all()
+
+#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
+ __set_pte_at(mm, vaddr, ptep, ptev, 1)
+#define arch_kmap_local_post_map(vaddr, pteval) \
+ local_flush_tlb_page(NULL, vaddr)
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_page(NULL, vaddr)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/powerpc/include/asm/hmi.h b/arch/powerpc/include/asm/hmi.h
new file mode 100644
index 000000000..155748460
--- /dev/null
+++ b/arch/powerpc/include/asm/hmi.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Hypervisor Maintenance Interrupt header file.
+ *
+ * Copyright 2015 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#ifndef __ASM_PPC64_HMI_H__
+#define __ASM_PPC64_HMI_H__
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+
+#define CORE_TB_RESYNC_REQ_BIT 63
+#define MAX_SUBCORE_PER_CORE 4
+
+/*
+ * sibling_subcore_state structure is used to co-ordinate all threads
+ * during HMI to avoid TB corruption. This structure is allocated once
+ * per each core and shared by all threads on that core.
+ */
+struct sibling_subcore_state {
+ unsigned long flags;
+ u8 in_guest[MAX_SUBCORE_PER_CORE];
+};
+
+extern void wait_for_subcore_guest_exit(void);
+extern void wait_for_tb_resync(void);
+#else
+static inline void wait_for_subcore_guest_exit(void) { }
+static inline void wait_for_tb_resync(void) { }
+#endif
+
+struct pt_regs;
+extern long hmi_handle_debugtrig(struct pt_regs *regs);
+
+#endif /* __ASM_PPC64_HMI_H__ */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
new file mode 100644
index 000000000..ea71f7245
--- /dev/null
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_HUGETLB_H
+#define _ASM_POWERPC_HUGETLB_H
+
+#ifdef CONFIG_HUGETLB_PAGE
+#include <asm/page.h>
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/book3s/64/hugetlb.h>
+#elif defined(CONFIG_PPC_E500)
+#include <asm/nohash/hugetlb-e500.h>
+#elif defined(CONFIG_PPC_8xx)
+#include <asm/nohash/32/hugetlb-8xx.h>
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+extern bool hugetlb_disabled;
+
+void __init hugetlbpage_init_defaultsize(void);
+
+int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
+ unsigned long len);
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr,
+ unsigned long len)
+{
+ if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU) && !radix_enabled())
+ return slice_is_hugepage_only_range(mm, addr, len);
+ return 0;
+}
+#define is_hugepage_only_range is_hugepage_only_range
+
+#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
+void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
+ unsigned long end, unsigned long floor,
+ unsigned long ceiling);
+
+#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t pte;
+
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ flush_hugetlb_page(vma, addr);
+ return pte;
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty);
+
+void gigantic_hugetlb_cma_reserve(void) __init;
+#include <asm-generic/hugetlb.h>
+
+#else /* ! CONFIG_HUGETLB_PAGE */
+static inline void flush_hugetlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+}
+
+#define hugepd_shift(x) 0
+static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
+ unsigned pdshift)
+{
+ return NULL;
+}
+
+
+static inline void __init gigantic_hugetlb_cma_reserve(void)
+{
+}
+
+static inline void __init hugetlbpage_init_defaultsize(void)
+{
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#endif /* _ASM_POWERPC_HUGETLB_H */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
new file mode 100644
index 000000000..95fd7f948
--- /dev/null
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -0,0 +1,672 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_HVCALL_H
+#define _ASM_POWERPC_HVCALL_H
+#ifdef __KERNEL__
+
+#define HVSC .long 0x44000022
+
+#define H_SUCCESS 0
+#define H_BUSY 1 /* Hardware busy -- retry later */
+#define H_CLOSED 2 /* Resource closed */
+#define H_NOT_AVAILABLE 3
+#define H_CONSTRAINED 4 /* Resource request constrained to max allowed */
+#define H_PARTIAL 5
+#define H_IN_PROGRESS 14 /* Kind of like busy */
+#define H_PAGE_REGISTERED 15
+#define H_PARTIAL_STORE 16
+#define H_PENDING 17 /* returned from H_POLL_PENDING */
+#define H_CONTINUE 18 /* Returned from H_Join on success */
+#define H_LONG_BUSY_START_RANGE 9900 /* Start of long busy range */
+#define H_LONG_BUSY_ORDER_1_MSEC 9900 /* Long busy, hint that 1msec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_10_MSEC 9901 /* Long busy, hint that 10msec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_100_MSEC 9902 /* Long busy, hint that 100msec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_1_SEC 9903 /* Long busy, hint that 1sec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_10_SEC 9904 /* Long busy, hint that 10sec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \
+ is a good time to retry */
+#define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */
+
+/* Internal value used in book3s_hv kvm support; not returned to guests */
+#define H_TOO_HARD 9999
+
+#define H_HARDWARE -1 /* Hardware error */
+#define H_FUNCTION -2 /* Function not supported */
+#define H_PRIVILEGE -3 /* Caller not privileged */
+#define H_PARAMETER -4 /* Parameter invalid, out-of-range or conflicting */
+#define H_BAD_MODE -5 /* Illegal msr value */
+#define H_PTEG_FULL -6 /* PTEG is full */
+#define H_NOT_FOUND -7 /* PTE was not found" */
+#define H_RESERVED_DABR -8 /* DABR address is reserved by the hypervisor on this processor" */
+#define H_NO_MEM -9
+#define H_AUTHORITY -10
+#define H_PERMISSION -11
+#define H_DROPPED -12
+#define H_SOURCE_PARM -13
+#define H_DEST_PARM -14
+#define H_REMOTE_PARM -15
+#define H_RESOURCE -16
+#define H_ADAPTER_PARM -17
+#define H_RH_PARM -18
+#define H_RCQ_PARM -19
+#define H_SCQ_PARM -20
+#define H_EQ_PARM -21
+#define H_RT_PARM -22
+#define H_ST_PARM -23
+#define H_SIGT_PARM -24
+#define H_TOKEN_PARM -25
+#define H_MLENGTH_PARM -27
+#define H_MEM_PARM -28
+#define H_MEM_ACCESS_PARM -29
+#define H_ATTR_PARM -30
+#define H_PORT_PARM -31
+#define H_MCG_PARM -32
+#define H_VL_PARM -33
+#define H_TSIZE_PARM -34
+#define H_TRACE_PARM -35
+
+#define H_MASK_PARM -37
+#define H_MCG_FULL -38
+#define H_ALIAS_EXIST -39
+#define H_P_COUNTER -40
+#define H_TABLE_FULL -41
+#define H_ALT_TABLE -42
+#define H_MR_CONDITION -43
+#define H_NOT_ENOUGH_RESOURCES -44
+#define H_R_STATE -45
+#define H_RESCINDED -46
+#define H_ABORTED -54
+#define H_P2 -55
+#define H_P3 -56
+#define H_P4 -57
+#define H_P5 -58
+#define H_P6 -59
+#define H_P7 -60
+#define H_P8 -61
+#define H_P9 -62
+#define H_NOOP -63
+#define H_TOO_BIG -64
+#define H_UNSUPPORTED -67
+#define H_OVERLAP -68
+#define H_INTERRUPT -69
+#define H_BAD_DATA -70
+#define H_NOT_ACTIVE -71
+#define H_SG_LIST -72
+#define H_OP_MODE -73
+#define H_COP_HW -74
+#define H_STATE -75
+#define H_IN_USE -77
+#define H_UNSUPPORTED_FLAG_START -256
+#define H_UNSUPPORTED_FLAG_END -511
+#define H_MULTI_THREADS_ACTIVE -9005
+#define H_OUTSTANDING_COP_OPS -9006
+
+
+/* Long Busy is a condition that can be returned by the firmware
+ * when a call cannot be completed now, but the identical call
+ * should be retried later. This prevents calls blocking in the
+ * firmware for long periods of time. Annoyingly the firmware can return
+ * a range of return codes, hinting at how long we should wait before
+ * retrying. If you don't care for the hint, the macro below is a good
+ * way to check for the long_busy return codes
+ */
+#define H_IS_LONG_BUSY(x) ((x >= H_LONG_BUSY_START_RANGE) \
+ && (x <= H_LONG_BUSY_END_RANGE))
+
+/* Flags */
+#define H_LARGE_PAGE (1UL<<(63-16))
+#define H_EXACT (1UL<<(63-24)) /* Use exact PTE or return H_PTEG_FULL */
+#define H_R_XLATE (1UL<<(63-25)) /* include a valid logical page num in the pte if the valid bit is set */
+#define H_READ_4 (1UL<<(63-26)) /* Return 4 PTEs */
+#define H_PAGE_STATE_CHANGE (1UL<<(63-28))
+#define H_PAGE_UNUSED ((1UL<<(63-29)) | (1UL<<(63-30)))
+#define H_PAGE_SET_UNUSED (H_PAGE_STATE_CHANGE | H_PAGE_UNUSED)
+#define H_PAGE_SET_LOANED (H_PAGE_SET_UNUSED | (1UL<<(63-31)))
+#define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE
+#define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */
+#define H_ANDCOND (1UL<<(63-33))
+#define H_LOCAL (1UL<<(63-35))
+#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */
+#define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */
+#define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */
+#define H_ZERO_PAGE (1UL<<(63-48)) /* zero the page before mapping (ignored for IO pages) */
+#define H_COPY_PAGE (1UL<<(63-49))
+#define H_N (1UL<<(63-61))
+#define H_PP1 (1UL<<(63-62))
+#define H_PP2 (1UL<<(63-63))
+
+/* Flags for H_REGISTER_VPA subfunction field */
+#define H_VPA_FUNC_SHIFT (63-18) /* Bit posn of subfunction code */
+#define H_VPA_FUNC_MASK 7UL
+#define H_VPA_REG_VPA 1UL /* Register Virtual Processor Area */
+#define H_VPA_REG_DTL 2UL /* Register Dispatch Trace Log */
+#define H_VPA_REG_SLB 3UL /* Register SLB shadow buffer */
+#define H_VPA_DEREG_VPA 5UL /* Deregister Virtual Processor Area */
+#define H_VPA_DEREG_DTL 6UL /* Deregister Dispatch Trace Log */
+#define H_VPA_DEREG_SLB 7UL /* Deregister SLB shadow buffer */
+
+/* VASI States */
+#define H_VASI_INVALID 0
+#define H_VASI_ENABLED 1
+#define H_VASI_ABORTED 2
+#define H_VASI_SUSPENDING 3
+#define H_VASI_SUSPENDED 4
+#define H_VASI_RESUMED 5
+#define H_VASI_COMPLETED 6
+
+/* VASI signal codes. Only the Cancel code is valid for H_VASI_SIGNAL. */
+#define H_VASI_SIGNAL_CANCEL 1
+#define H_VASI_SIGNAL_ABORT 2
+#define H_VASI_SIGNAL_SUSPEND 3
+#define H_VASI_SIGNAL_COMPLETE 4
+#define H_VASI_SIGNAL_ENABLE 5
+#define H_VASI_SIGNAL_FAILOVER 6
+
+/* Each control block has to be on a 4K boundary */
+#define H_CB_ALIGNMENT 4096
+
+/* pSeries hypervisor opcodes */
+#define H_REMOVE 0x04
+#define H_ENTER 0x08
+#define H_READ 0x0c
+#define H_CLEAR_MOD 0x10
+#define H_CLEAR_REF 0x14
+#define H_PROTECT 0x18
+#define H_GET_TCE 0x1c
+#define H_PUT_TCE 0x20
+#define H_SET_SPRG0 0x24
+#define H_SET_DABR 0x28
+#define H_PAGE_INIT 0x2c
+#define H_SET_ASR 0x30
+#define H_ASR_ON 0x34
+#define H_ASR_OFF 0x38
+#define H_LOGICAL_CI_LOAD 0x3c
+#define H_LOGICAL_CI_STORE 0x40
+#define H_LOGICAL_CACHE_LOAD 0x44
+#define H_LOGICAL_CACHE_STORE 0x48
+#define H_LOGICAL_ICBI 0x4c
+#define H_LOGICAL_DCBF 0x50
+#define H_GET_TERM_CHAR 0x54
+#define H_PUT_TERM_CHAR 0x58
+#define H_REAL_TO_LOGICAL 0x5c
+#define H_HYPERVISOR_DATA 0x60
+#define H_EOI 0x64
+#define H_CPPR 0x68
+#define H_IPI 0x6c
+#define H_IPOLL 0x70
+#define H_XIRR 0x74
+#define H_PERFMON 0x7c
+#define H_MIGRATE_DMA 0x78
+#define H_REGISTER_VPA 0xDC
+#define H_CEDE 0xE0
+#define H_CONFER 0xE4
+#define H_PROD 0xE8
+#define H_GET_PPP 0xEC
+#define H_SET_PPP 0xF0
+#define H_PURR 0xF4
+#define H_PIC 0xF8
+#define H_REG_CRQ 0xFC
+#define H_FREE_CRQ 0x100
+#define H_VIO_SIGNAL 0x104
+#define H_SEND_CRQ 0x108
+#define H_COPY_RDMA 0x110
+#define H_REGISTER_LOGICAL_LAN 0x114
+#define H_FREE_LOGICAL_LAN 0x118
+#define H_ADD_LOGICAL_LAN_BUFFER 0x11C
+#define H_SEND_LOGICAL_LAN 0x120
+#define H_BULK_REMOVE 0x124
+#define H_MULTICAST_CTRL 0x130
+#define H_SET_XDABR 0x134
+#define H_STUFF_TCE 0x138
+#define H_PUT_TCE_INDIRECT 0x13C
+#define H_CHANGE_LOGICAL_LAN_MAC 0x14C
+#define H_VTERM_PARTNER_INFO 0x150
+#define H_REGISTER_VTERM 0x154
+#define H_FREE_VTERM 0x158
+#define H_RESET_EVENTS 0x15C
+#define H_ALLOC_RESOURCE 0x160
+#define H_FREE_RESOURCE 0x164
+#define H_MODIFY_QP 0x168
+#define H_QUERY_QP 0x16C
+#define H_REREGISTER_PMR 0x170
+#define H_REGISTER_SMR 0x174
+#define H_QUERY_MR 0x178
+#define H_QUERY_MW 0x17C
+#define H_QUERY_HCA 0x180
+#define H_QUERY_PORT 0x184
+#define H_MODIFY_PORT 0x188
+#define H_DEFINE_AQP1 0x18C
+#define H_GET_TRACE_BUFFER 0x190
+#define H_DEFINE_AQP0 0x194
+#define H_RESIZE_MR 0x198
+#define H_ATTACH_MCQP 0x19C
+#define H_DETACH_MCQP 0x1A0
+#define H_CREATE_RPT 0x1A4
+#define H_REMOVE_RPT 0x1A8
+#define H_REGISTER_RPAGES 0x1AC
+#define H_DISABLE_AND_GET 0x1B0
+#define H_ERROR_DATA 0x1B4
+#define H_GET_HCA_INFO 0x1B8
+#define H_GET_PERF_COUNT 0x1BC
+#define H_MANAGE_TRACE 0x1C0
+#define H_GET_CPU_CHARACTERISTICS 0x1C8
+#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
+#define H_QUERY_INT_STATE 0x1E4
+#define H_POLL_PENDING 0x1D8
+#define H_ILLAN_ATTRIBUTES 0x244
+#define H_MODIFY_HEA_QP 0x250
+#define H_QUERY_HEA_QP 0x254
+#define H_QUERY_HEA 0x258
+#define H_QUERY_HEA_PORT 0x25C
+#define H_MODIFY_HEA_PORT 0x260
+#define H_REG_BCMC 0x264
+#define H_DEREG_BCMC 0x268
+#define H_REGISTER_HEA_RPAGES 0x26C
+#define H_DISABLE_AND_GET_HEA 0x270
+#define H_GET_HEA_INFO 0x274
+#define H_ALLOC_HEA_RESOURCE 0x278
+#define H_ADD_CONN 0x284
+#define H_DEL_CONN 0x288
+#define H_JOIN 0x298
+#define H_VASI_SIGNAL 0x2A0
+#define H_VASI_STATE 0x2A4
+#define H_VIOCTL 0x2A8
+#define H_ENABLE_CRQ 0x2B0
+#define H_GET_EM_PARMS 0x2B8
+#define H_SET_MPP 0x2D0
+#define H_GET_MPP 0x2D4
+#define H_REG_SUB_CRQ 0x2DC
+#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
+#define H_FREE_SUB_CRQ 0x2E0
+#define H_SEND_SUB_CRQ 0x2E4
+#define H_SEND_SUB_CRQ_INDIRECT 0x2E8
+#define H_BEST_ENERGY 0x2F4
+#define H_XIRR_X 0x2FC
+#define H_RANDOM 0x300
+#define H_COP 0x304
+#define H_GET_MPP_X 0x314
+#define H_SET_MODE 0x31C
+#define H_BLOCK_REMOVE 0x328
+#define H_CLEAR_HPT 0x358
+#define H_REQUEST_VMC 0x360
+#define H_RESIZE_HPT_PREPARE 0x36C
+#define H_RESIZE_HPT_COMMIT 0x370
+#define H_REGISTER_PROC_TBL 0x37C
+#define H_SIGNAL_SYS_RESET 0x380
+#define H_ALLOCATE_VAS_WINDOW 0x388
+#define H_MODIFY_VAS_WINDOW 0x38C
+#define H_DEALLOCATE_VAS_WINDOW 0x390
+#define H_QUERY_VAS_WINDOW 0x394
+#define H_QUERY_VAS_CAPABILITIES 0x398
+#define H_QUERY_NX_CAPABILITIES 0x39C
+#define H_GET_NX_FAULT 0x3A0
+#define H_INT_GET_SOURCE_INFO 0x3A8
+#define H_INT_SET_SOURCE_CONFIG 0x3AC
+#define H_INT_GET_SOURCE_CONFIG 0x3B0
+#define H_INT_GET_QUEUE_INFO 0x3B4
+#define H_INT_SET_QUEUE_CONFIG 0x3B8
+#define H_INT_GET_QUEUE_CONFIG 0x3BC
+#define H_INT_SET_OS_REPORTING_LINE 0x3C0
+#define H_INT_GET_OS_REPORTING_LINE 0x3C4
+#define H_INT_ESB 0x3C8
+#define H_INT_SYNC 0x3CC
+#define H_INT_RESET 0x3D0
+#define H_SCM_READ_METADATA 0x3E4
+#define H_SCM_WRITE_METADATA 0x3E8
+#define H_SCM_BIND_MEM 0x3EC
+#define H_SCM_UNBIND_MEM 0x3F0
+#define H_SCM_QUERY_BLOCK_MEM_BINDING 0x3F4
+#define H_SCM_QUERY_LOGICAL_MEM_BINDING 0x3F8
+#define H_SCM_UNBIND_ALL 0x3FC
+#define H_SCM_HEALTH 0x400
+#define H_SCM_PERFORMANCE_STATS 0x418
+#define H_PKS_GET_CONFIG 0x41C
+#define H_PKS_SET_PASSWORD 0x420
+#define H_PKS_GEN_PASSWORD 0x424
+#define H_PKS_WRITE_OBJECT 0x42C
+#define H_PKS_GEN_KEY 0x430
+#define H_PKS_READ_OBJECT 0x434
+#define H_PKS_REMOVE_OBJECT 0x438
+#define H_PKS_CONFIRM_OBJECT_FLUSHED 0x43C
+#define H_RPT_INVALIDATE 0x448
+#define H_SCM_FLUSH 0x44C
+#define H_GET_ENERGY_SCALE_INFO 0x450
+#define H_WATCHDOG 0x45C
+#define MAX_HCALL_OPCODE H_WATCHDOG
+
+/* Scope args for H_SCM_UNBIND_ALL */
+#define H_UNBIND_SCOPE_ALL (0x1)
+#define H_UNBIND_SCOPE_DRC (0x2)
+
+/* H_VIOCTL functions */
+#define H_GET_VIOA_DUMP_SIZE 0x01
+#define H_GET_VIOA_DUMP 0x02
+#define H_GET_ILLAN_NUM_VLAN_IDS 0x03
+#define H_GET_ILLAN_VLAN_ID_LIST 0x04
+#define H_GET_ILLAN_SWITCH_ID 0x05
+#define H_DISABLE_MIGRATION 0x06
+#define H_ENABLE_MIGRATION 0x07
+#define H_GET_PARTNER_INFO 0x08
+#define H_GET_PARTNER_WWPN_LIST 0x09
+#define H_DISABLE_ALL_VIO_INTS 0x0A
+#define H_DISABLE_VIO_INTERRUPT 0x0B
+#define H_ENABLE_VIO_INTERRUPT 0x0C
+#define H_GET_SESSION_TOKEN 0x19
+#define H_SESSION_ERR_DETECTED 0x1A
+
+
+/* Platform specific hcalls, used by KVM */
+#define H_RTAS 0xf000
+
+/*
+ * Platform specific hcalls, used by QEMU/SLOF. These are ignored by
+ * KVM and only kept here so we can identify them during tracing.
+ */
+#define H_LOGICAL_MEMOP 0xF001
+#define H_CAS 0XF002
+#define H_UPDATE_DT 0XF003
+
+/* "Platform specific hcalls", provided by PHYP */
+#define H_GET_24X7_CATALOG_PAGE 0xF078
+#define H_GET_24X7_DATA 0xF07C
+#define H_GET_PERF_COUNTER_INFO 0xF080
+
+/* Platform-specific hcalls used for nested HV KVM */
+#define H_SET_PARTITION_TABLE 0xF800
+#define H_ENTER_NESTED 0xF804
+#define H_TLB_INVALIDATE 0xF808
+#define H_COPY_TOFROM_GUEST 0xF80C
+
+/* Flags for H_SVM_PAGE_IN */
+#define H_PAGE_IN_SHARED 0x1
+
+/* Platform-specific hcalls used by the Ultravisor */
+#define H_SVM_PAGE_IN 0xEF00
+#define H_SVM_PAGE_OUT 0xEF04
+#define H_SVM_INIT_START 0xEF08
+#define H_SVM_INIT_DONE 0xEF0C
+#define H_SVM_INIT_ABORT 0xEF14
+
+/* Values for 2nd argument to H_SET_MODE */
+#define H_SET_MODE_RESOURCE_SET_CIABR 1
+#define H_SET_MODE_RESOURCE_SET_DAWR0 2
+#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3
+#define H_SET_MODE_RESOURCE_LE 4
+#define H_SET_MODE_RESOURCE_SET_DAWR1 5
+
+/* Values for argument to H_SIGNAL_SYS_RESET */
+#define H_SIGNAL_SYS_RESET_ALL -1
+#define H_SIGNAL_SYS_RESET_ALL_OTHERS -2
+/* >= 0 values are CPU number */
+
+/* H_GET_CPU_CHARACTERISTICS return values */
+#define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0
+#define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1
+#define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
+#define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
+#define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
+#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5
+#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6
+#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7
+#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) // IBM bit 9
+#define H_CPU_CHAR_BCCTR_LINK_FLUSH_ASSIST (1ull << 52) // IBM bit 11
+
+#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
+#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
+#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
+#define H_CPU_BEHAV_FAVOUR_SECURITY_H (1ull << 60) // IBM bit 3
+#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
+#define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6
+#define H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY (1ull << 56) // IBM bit 7
+#define H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS (1ull << 55) // IBM bit 8
+#define H_CPU_BEHAV_NO_STF_BARRIER (1ull << 54) // IBM bit 9
+
+/* Flag values used in H_REGISTER_PROC_TBL hcall */
+#define PROC_TABLE_OP_MASK 0x18
+#define PROC_TABLE_DEREG 0x10
+#define PROC_TABLE_NEW 0x18
+#define PROC_TABLE_TYPE_MASK 0x06
+#define PROC_TABLE_HPT_SLB 0x00
+#define PROC_TABLE_HPT_PT 0x02
+#define PROC_TABLE_RADIX 0x04
+#define PROC_TABLE_GTSE 0x01
+
+/*
+ * Defines for
+ * H_RPT_INVALIDATE - Invalidate RPT translation lookaside information.
+ */
+
+/* Type of translation to invalidate (type) */
+#define H_RPTI_TYPE_NESTED 0x0001 /* Invalidate nested guest partition-scope */
+#define H_RPTI_TYPE_TLB 0x0002 /* Invalidate TLB */
+#define H_RPTI_TYPE_PWC 0x0004 /* Invalidate Page Walk Cache */
+/* Invalidate caching of Process Table Entries if H_RPTI_TYPE_NESTED is clear */
+#define H_RPTI_TYPE_PRT 0x0008
+/* Invalidate caching of Partition Table Entries if H_RPTI_TYPE_NESTED is set */
+#define H_RPTI_TYPE_PAT 0x0008
+#define H_RPTI_TYPE_ALL (H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | \
+ H_RPTI_TYPE_PRT)
+#define H_RPTI_TYPE_NESTED_ALL (H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | \
+ H_RPTI_TYPE_PAT)
+
+/* Invalidation targets (target) */
+#define H_RPTI_TARGET_CMMU 0x01 /* All virtual processors in the partition */
+#define H_RPTI_TARGET_CMMU_LOCAL 0x02 /* Current virtual processor */
+/* All nest/accelerator agents in use by the partition */
+#define H_RPTI_TARGET_NMMU 0x04
+
+/* Page size mask (page sizes) */
+#define H_RPTI_PAGE_4K 0x01
+#define H_RPTI_PAGE_64K 0x02
+#define H_RPTI_PAGE_2M 0x04
+#define H_RPTI_PAGE_1G 0x08
+#define H_RPTI_PAGE_ALL (-1UL)
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+/**
+ * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments
+ * @opcode: The hypervisor call to make.
+ *
+ * This call supports up to 7 arguments and only returns the status of
+ * the hcall. Use this version where possible, its slightly faster than
+ * the other plpar_hcalls.
+ */
+long plpar_hcall_norets(unsigned long opcode, ...);
+
+/* Variant which does not do hcall tracing */
+long plpar_hcall_norets_notrace(unsigned long opcode, ...);
+
+/**
+ * plpar_hcall: - Make a pseries hypervisor call
+ * @opcode: The hypervisor call to make.
+ * @retbuf: Buffer to store up to 4 return arguments in.
+ *
+ * This call supports up to 6 arguments and 4 return arguments. Use
+ * PLPAR_HCALL_BUFSIZE to size the return argument buffer.
+ *
+ * Used for all but the craziest of phyp interfaces (see plpar_hcall9)
+ */
+#define PLPAR_HCALL_BUFSIZE 4
+long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
+
+/**
+ * plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats
+ * @opcode: The hypervisor call to make.
+ * @retbuf: Buffer to store up to 4 return arguments in.
+ *
+ * This call supports up to 6 arguments and 4 return arguments. Use
+ * PLPAR_HCALL_BUFSIZE to size the return argument buffer.
+ *
+ * Used when phyp interface needs to be called in real mode. Similar to
+ * plpar_hcall, but plpar_hcall_raw works in real mode and does not
+ * calculate hypervisor call statistics.
+ */
+long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
+
+/**
+ * plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments
+ * @opcode: The hypervisor call to make.
+ * @retbuf: Buffer to store up to 9 return arguments in.
+ *
+ * This call supports up to 9 arguments and 9 return arguments. Use
+ * PLPAR_HCALL9_BUFSIZE to size the return argument buffer.
+ */
+#define PLPAR_HCALL9_BUFSIZE 9
+long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
+long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
+
+/* pseries hcall tracing */
+extern struct static_key hcall_tracepoint_key;
+void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
+void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
+
+struct hvcall_mpp_data {
+ unsigned long entitled_mem;
+ unsigned long mapped_mem;
+ unsigned short group_num;
+ unsigned short pool_num;
+ unsigned char mem_weight;
+ unsigned char unallocated_mem_weight;
+ unsigned long unallocated_entitlement; /* value in bytes */
+ unsigned long pool_size;
+ signed long loan_request;
+ unsigned long backing_mem;
+};
+
+int h_get_mpp(struct hvcall_mpp_data *);
+
+struct hvcall_mpp_x_data {
+ unsigned long coalesced_bytes;
+ unsigned long pool_coalesced_bytes;
+ unsigned long pool_purr_cycles;
+ unsigned long pool_spurr_cycles;
+ unsigned long reserved[3];
+};
+
+int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data);
+
+static inline unsigned int get_longbusy_msecs(int longbusy_rc)
+{
+ switch (longbusy_rc) {
+ case H_LONG_BUSY_ORDER_1_MSEC:
+ return 1;
+ case H_LONG_BUSY_ORDER_10_MSEC:
+ return 10;
+ case H_LONG_BUSY_ORDER_100_MSEC:
+ return 100;
+ case H_LONG_BUSY_ORDER_1_SEC:
+ return 1000;
+ case H_LONG_BUSY_ORDER_10_SEC:
+ return 10000;
+ case H_LONG_BUSY_ORDER_100_SEC:
+ return 100000;
+ default:
+ return 1;
+ }
+}
+
+struct h_cpu_char_result {
+ u64 character;
+ u64 behaviour;
+};
+
+/*
+ * Register state for entering a nested guest with H_ENTER_NESTED.
+ * New member must be added at the end.
+ */
+struct hv_guest_state {
+ u64 version; /* version of this structure layout, must be first */
+ u32 lpid;
+ u32 vcpu_token;
+ /* These registers are hypervisor privileged (at least for writing) */
+ u64 lpcr;
+ u64 pcr;
+ u64 amor;
+ u64 dpdes;
+ u64 hfscr;
+ s64 tb_offset;
+ u64 dawr0;
+ u64 dawrx0;
+ u64 ciabr;
+ u64 hdec_expiry;
+ u64 purr;
+ u64 spurr;
+ u64 ic;
+ u64 vtb;
+ u64 hdar;
+ u64 hdsisr;
+ u64 heir;
+ u64 asdr;
+ /* These are OS privileged but need to be set late in guest entry */
+ u64 srr0;
+ u64 srr1;
+ u64 sprg[4];
+ u64 pidr;
+ u64 cfar;
+ u64 ppr;
+ /* Version 1 ends here */
+ u64 dawr1;
+ u64 dawrx1;
+ /* Version 2 ends here */
+};
+
+/* Latest version of hv_guest_state structure */
+#define HV_GUEST_STATE_VERSION 2
+
+static inline int hv_guest_state_size(unsigned int version)
+{
+ switch (version) {
+ case 1:
+ return offsetofend(struct hv_guest_state, ppr);
+ case 2:
+ return offsetofend(struct hv_guest_state, dawrx1);
+ default:
+ return -1;
+ }
+}
+
+/*
+ * From the document "H_GetPerformanceCounterInfo Interface" v1.07
+ *
+ * H_GET_PERF_COUNTER_INFO argument
+ */
+struct hv_get_perf_counter_info_params {
+ __be32 counter_request; /* I */
+ __be32 starting_index; /* IO */
+ __be16 secondary_index; /* IO */
+ __be16 returned_values; /* O */
+ __be32 detail_rc; /* O, only needed when called via *_norets() */
+
+ /*
+ * O, size each of counter_value element in bytes, only set for version
+ * >= 0x3
+ */
+ __be16 cv_element_size;
+
+ /* I, 0 (zero) for versions < 0x3 */
+ __u8 counter_info_version_in;
+
+ /* O, 0 (zero) if version < 0x3. Must be set to 0 when making hcall */
+ __u8 counter_info_version_out;
+ __u8 reserved[0xC];
+ __u8 counter_value[];
+} __packed;
+
+#define HGPCI_REQ_BUFFER_SIZE 4096
+#define HGPCI_MAX_DATA_BYTES \
+ (HGPCI_REQ_BUFFER_SIZE - sizeof(struct hv_get_perf_counter_info_params))
+
+struct hv_gpci_request_buffer {
+ struct hv_get_perf_counter_info_params params;
+ uint8_t bytes[HGPCI_MAX_DATA_BYTES];
+} __packed;
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/include/asm/hvconsole.h b/arch/powerpc/include/asm/hvconsole.h
new file mode 100644
index 000000000..ccb203450
--- /dev/null
+++ b/arch/powerpc/include/asm/hvconsole.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * hvconsole.h
+ * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
+ *
+ * LPAR console support.
+ */
+
+#ifndef _PPC64_HVCONSOLE_H
+#define _PPC64_HVCONSOLE_H
+#ifdef __KERNEL__
+
+/*
+ * PSeries firmware will only send/recv up to 16 bytes of character data per
+ * hcall.
+ */
+#define MAX_VIO_PUT_CHARS 16
+#define SIZE_VIO_GET_CHARS 16
+
+/*
+ * Vio firmware always attempts to fetch MAX_VIO_GET_CHARS chars. The 'count'
+ * parm is included to conform to put_chars() function pointer template
+ */
+extern int hvc_get_chars(uint32_t vtermno, char *buf, int count);
+extern int hvc_put_chars(uint32_t vtermno, const char *buf, int count);
+
+/* Provided by HVC VIO */
+void hvc_vio_init_early(void);
+
+#endif /* __KERNEL__ */
+#endif /* _PPC64_HVCONSOLE_H */
diff --git a/arch/powerpc/include/asm/hvcserver.h b/arch/powerpc/include/asm/hvcserver.h
new file mode 100644
index 000000000..2b20403e9
--- /dev/null
+++ b/arch/powerpc/include/asm/hvcserver.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * hvcserver.h
+ * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
+ *
+ * PPC64 virtual I/O console server support.
+ */
+
+#ifndef _PPC64_HVCSERVER_H
+#define _PPC64_HVCSERVER_H
+#ifdef __KERNEL__
+
+#include <linux/list.h>
+
+/* Converged Location Code length */
+#define HVCS_CLC_LENGTH 79
+
+/**
+ * hvcs_partner_info - an element in a list of partner info
+ * @node: list_head denoting this partner_info struct's position in the list of
+ * partner info.
+ * @unit_address: The partner unit address of this entry.
+ * @partition_ID: The partner partition ID of this entry.
+ * @location_code: The converged location code of this entry + 1 char for the
+ * null-term.
+ *
+ * This structure outlines the format that partner info is presented to a caller
+ * of the hvcs partner info fetching functions. These are strung together into
+ * a list using linux kernel lists.
+ */
+struct hvcs_partner_info {
+ struct list_head node;
+ uint32_t unit_address;
+ uint32_t partition_ID;
+ char location_code[HVCS_CLC_LENGTH + 1]; /* CLC + 1 null-term char */
+};
+
+extern int hvcs_free_partner_info(struct list_head *head);
+extern int hvcs_get_partner_info(uint32_t unit_address,
+ struct list_head *head, unsigned long *pi_buff);
+extern int hvcs_register_connection(uint32_t unit_address,
+ uint32_t p_partition_ID, uint32_t p_unit_address);
+extern int hvcs_free_connection(uint32_t unit_address);
+
+#endif /* __KERNEL__ */
+#endif /* _PPC64_HVCSERVER_H */
diff --git a/arch/powerpc/include/asm/hvsi.h b/arch/powerpc/include/asm/hvsi.h
new file mode 100644
index 000000000..464a7519e
--- /dev/null
+++ b/arch/powerpc/include/asm/hvsi.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _HVSI_H
+#define _HVSI_H
+
+#define VS_DATA_PACKET_HEADER 0xff
+#define VS_CONTROL_PACKET_HEADER 0xfe
+#define VS_QUERY_PACKET_HEADER 0xfd
+#define VS_QUERY_RESPONSE_PACKET_HEADER 0xfc
+
+/* control verbs */
+#define VSV_SET_MODEM_CTL 1 /* to service processor only */
+#define VSV_MODEM_CTL_UPDATE 2 /* from service processor only */
+#define VSV_CLOSE_PROTOCOL 3
+
+/* query verbs */
+#define VSV_SEND_VERSION_NUMBER 1
+#define VSV_SEND_MODEM_CTL_STATUS 2
+
+/* yes, these masks are not consecutive. */
+#define HVSI_TSDTR 0x01
+#define HVSI_TSCD 0x20
+
+#define HVSI_MAX_OUTGOING_DATA 12
+#define HVSI_VERSION 1
+
+struct hvsi_header {
+ uint8_t type;
+ uint8_t len;
+ __be16 seqno;
+} __attribute__((packed));
+
+struct hvsi_data {
+ struct hvsi_header hdr;
+ uint8_t data[HVSI_MAX_OUTGOING_DATA];
+} __attribute__((packed));
+
+struct hvsi_control {
+ struct hvsi_header hdr;
+ __be16 verb;
+ /* optional depending on verb: */
+ __be32 word;
+ __be32 mask;
+} __attribute__((packed));
+
+struct hvsi_query {
+ struct hvsi_header hdr;
+ __be16 verb;
+} __attribute__((packed));
+
+struct hvsi_query_response {
+ struct hvsi_header hdr;
+ __be16 verb;
+ __be16 query_seqno;
+ union {
+ uint8_t version;
+ __be32 mctrl_word;
+ } u;
+} __attribute__((packed));
+
+/* hvsi lib struct definitions */
+#define HVSI_INBUF_SIZE 255
+struct tty_struct;
+struct hvsi_priv {
+ unsigned int inbuf_len; /* data in input buffer */
+ unsigned char inbuf[HVSI_INBUF_SIZE];
+ unsigned int inbuf_cur; /* Cursor in input buffer */
+ unsigned int inbuf_pktlen; /* packet length from cursor */
+ atomic_t seqno; /* packet sequence number */
+ unsigned int opened:1; /* driver opened */
+ unsigned int established:1; /* protocol established */
+ unsigned int is_console:1; /* used as a kernel console device */
+ unsigned int mctrl_update:1; /* modem control updated */
+ unsigned short mctrl; /* modem control */
+ struct tty_struct *tty; /* tty structure */
+ int (*get_chars)(uint32_t termno, char *buf, int count);
+ int (*put_chars)(uint32_t termno, const char *buf, int count);
+ uint32_t termno;
+};
+
+/* hvsi lib functions */
+struct hvc_struct;
+extern void hvsilib_init(struct hvsi_priv *pv,
+ int (*get_chars)(uint32_t termno, char *buf, int count),
+ int (*put_chars)(uint32_t termno, const char *buf,
+ int count),
+ int termno, int is_console);
+extern int hvsilib_open(struct hvsi_priv *pv, struct hvc_struct *hp);
+extern void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp);
+extern int hvsilib_read_mctrl(struct hvsi_priv *pv);
+extern int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr);
+extern void hvsilib_establish(struct hvsi_priv *pv);
+extern int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count);
+extern int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count);
+
+#endif /* _HVSI_H */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
new file mode 100644
index 000000000..84d39fd42
--- /dev/null
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PowerPC BookIII S hardware breakpoint definitions
+ *
+ * Copyright 2010, IBM Corporation.
+ * Author: K.Prasad <prasad@linux.vnet.ibm.com>
+ */
+
+#ifndef _PPC_BOOK3S_64_HW_BREAKPOINT_H
+#define _PPC_BOOK3S_64_HW_BREAKPOINT_H
+
+#include <asm/cpu_has_feature.h>
+
+#ifdef __KERNEL__
+struct arch_hw_breakpoint {
+ unsigned long address;
+ u16 type;
+ u16 len; /* length of the target data symbol */
+ u16 hw_len; /* length programmed in hw */
+ u8 flags;
+};
+
+/* Note: Don't change the first 6 bits below as they are in the same order
+ * as the dabr and dabrx.
+ */
+#define HW_BRK_TYPE_READ 0x01
+#define HW_BRK_TYPE_WRITE 0x02
+#define HW_BRK_TYPE_TRANSLATE 0x04
+#define HW_BRK_TYPE_USER 0x08
+#define HW_BRK_TYPE_KERNEL 0x10
+#define HW_BRK_TYPE_HYP 0x20
+#define HW_BRK_TYPE_EXTRANEOUS_IRQ 0x80
+
+/* bits that overlap with the bottom 3 bits of the dabr */
+#define HW_BRK_TYPE_RDWR (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)
+#define HW_BRK_TYPE_DABR (HW_BRK_TYPE_RDWR | HW_BRK_TYPE_TRANSLATE)
+#define HW_BRK_TYPE_PRIV_ALL (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \
+ HW_BRK_TYPE_HYP)
+
+#define HW_BRK_FLAG_DISABLED 0x1
+
+/* Minimum granularity */
+#ifdef CONFIG_PPC_8xx
+#define HW_BREAKPOINT_SIZE 0x4
+#else
+#define HW_BREAKPOINT_SIZE 0x8
+#endif
+#define HW_BREAKPOINT_SIZE_QUADWORD 0x10
+
+#define DABR_MAX_LEN 8
+#define DAWR_MAX_LEN 512
+
+static inline int nr_wp_slots(void)
+{
+ return cpu_has_feature(CPU_FTR_DAWR1) ? 2 : 1;
+}
+
+bool wp_check_constraints(struct pt_regs *regs, ppc_inst_t instr,
+ unsigned long ea, int type, int size,
+ struct arch_hw_breakpoint *info);
+
+void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr,
+ int *type, int *size, unsigned long *ea);
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+#include <linux/kdebug.h>
+#include <asm/reg.h>
+#include <asm/debug.h>
+
+struct perf_event_attr;
+struct perf_event;
+struct pmu;
+struct perf_sample_data;
+struct task_struct;
+
+extern int hw_breakpoint_slots(int type);
+extern int arch_bp_generic_fields(int type, int *gen_bp_type);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data);
+int arch_install_hw_breakpoint(struct perf_event *bp);
+void arch_uninstall_hw_breakpoint(struct perf_event *bp);
+void hw_breakpoint_pmu_read(struct perf_event *bp);
+extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
+
+extern struct pmu perf_ops_bp;
+extern void ptrace_triggered(struct perf_event *bp,
+ struct perf_sample_data *data, struct pt_regs *regs);
+static inline void hw_breakpoint_disable(void)
+{
+ int i;
+ struct arch_hw_breakpoint null_brk = {0};
+
+ if (!ppc_breakpoint_available())
+ return;
+
+ for (i = 0; i < nr_wp_slots(); i++)
+ __set_breakpoint(i, &null_brk);
+}
+extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
+int hw_breakpoint_handler(struct die_args *args);
+
+#else /* CONFIG_HAVE_HW_BREAKPOINT */
+static inline void hw_breakpoint_disable(void) { }
+static inline void thread_change_pc(struct task_struct *tsk,
+ struct pt_regs *regs) { }
+
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+
+#ifdef CONFIG_PPC_DAWR
+extern bool dawr_force_enable;
+static inline bool dawr_enabled(void)
+{
+ return dawr_force_enable;
+}
+int set_dawr(int nr, struct arch_hw_breakpoint *brk);
+#else
+static inline bool dawr_enabled(void) { return false; }
+static inline int set_dawr(int nr, struct arch_hw_breakpoint *brk) { return -1; }
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
new file mode 100644
index 000000000..317659fde
--- /dev/null
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -0,0 +1,521 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_HW_IRQ_H
+#define _ASM_POWERPC_HW_IRQ_H
+
+#ifdef __KERNEL__
+
+#include <linux/errno.h>
+#include <linux/compiler.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+
+#ifdef CONFIG_PPC64
+
+/*
+ * PACA flags in paca->irq_happened.
+ *
+ * This bits are set when interrupts occur while soft-disabled
+ * and allow a proper replay.
+ *
+ * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
+ * always in synch with the MSR[EE] state, except:
+ * - A window in interrupt entry, where hardware disables MSR[EE] and that
+ * must be "reconciled" with the soft mask state.
+ * - NMI interrupts that hit in awkward places, until they fix the state.
+ * - When local irqs are being enabled and state is being fixed up.
+ * - When returning from an interrupt there are some windows where this
+ * can become out of synch, but gets fixed before the RFI or before
+ * executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
+ */
+#define PACA_IRQ_HARD_DIS 0x01
+#define PACA_IRQ_DBELL 0x02
+#define PACA_IRQ_EE 0x04
+#define PACA_IRQ_DEC 0x08 /* Or FIT */
+#define PACA_IRQ_HMI 0x10
+#define PACA_IRQ_PMI 0x20
+#define PACA_IRQ_REPLAYING 0x40
+
+/*
+ * Some soft-masked interrupts must be hard masked until they are replayed
+ * (e.g., because the soft-masked handler does not clear the exception).
+ * Interrupt replay itself must remain hard masked too.
+ */
+#ifdef CONFIG_PPC_BOOK3S
+#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI|PACA_IRQ_REPLAYING)
+#else
+#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_REPLAYING)
+#endif
+
+#endif /* CONFIG_PPC64 */
+
+/*
+ * flags for paca->irq_soft_mask
+ */
+#define IRQS_ENABLED 0
+#define IRQS_DISABLED 1 /* local_irq_disable() interrupts */
+#define IRQS_PMI_DISABLED 2
+#define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
+
+#ifndef __ASSEMBLY__
+
+static inline void __hard_irq_enable(void)
+{
+ if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
+ wrtee(MSR_EE);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EIE);
+ else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ __mtmsrd(MSR_EE | MSR_RI, 1);
+ else
+ mtmsr(mfmsr() | MSR_EE);
+}
+
+static inline void __hard_irq_disable(void)
+{
+ if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
+ wrtee(0);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EID);
+ else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ __mtmsrd(MSR_RI, 1);
+ else
+ mtmsr(mfmsr() & ~MSR_EE);
+}
+
+static inline void __hard_EE_RI_disable(void)
+{
+ if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
+ wrtee(0);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_NRI);
+ else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ __mtmsrd(0, 1);
+ else
+ mtmsr(mfmsr() & ~(MSR_EE | MSR_RI));
+}
+
+static inline void __hard_RI_enable(void)
+{
+ if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
+ return;
+
+ if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EID);
+ else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ __mtmsrd(MSR_RI, 1);
+ else
+ mtmsr(mfmsr() | MSR_RI);
+}
+
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+
+static inline notrace unsigned long irq_soft_mask_return(void)
+{
+ unsigned long flags;
+
+ asm volatile(
+ "lbz %0,%1(13)"
+ : "=r" (flags)
+ : "i" (offsetof(struct paca_struct, irq_soft_mask)));
+
+ return flags;
+}
+
+/*
+ * The "memory" clobber acts as both a compiler barrier
+ * for the critical section and as a clobber because
+ * we changed paca->irq_soft_mask
+ */
+static inline notrace void irq_soft_mask_set(unsigned long mask)
+{
+ /*
+ * The irq mask must always include the STD bit if any are set.
+ *
+ * and interrupts don't get replayed until the standard
+ * interrupt (local_irq_disable()) is unmasked.
+ *
+ * Other masks must only provide additional masking beyond
+ * the standard, and they are also not replayed until the
+ * standard interrupt becomes unmasked.
+ *
+ * This could be changed, but it will require partial
+ * unmasks to be replayed, among other things. For now, take
+ * the simple approach.
+ */
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON(mask && !(mask & IRQS_DISABLED));
+
+ asm volatile(
+ "stb %0,%1(13)"
+ :
+ : "r" (mask),
+ "i" (offsetof(struct paca_struct, irq_soft_mask))
+ : "memory");
+}
+
+static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
+{
+ unsigned long flags = irq_soft_mask_return();
+
+ irq_soft_mask_set(mask);
+
+ return flags;
+}
+
+static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
+{
+ unsigned long flags = irq_soft_mask_return();
+
+ irq_soft_mask_set(flags | mask);
+
+ return flags;
+}
+
+static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
+{
+ unsigned long flags = irq_soft_mask_return();
+
+ irq_soft_mask_set(flags & ~mask);
+
+ return flags;
+}
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ return irq_soft_mask_return();
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ irq_soft_mask_set(IRQS_DISABLED);
+}
+
+extern void arch_local_irq_restore(unsigned long);
+
+static inline void arch_local_irq_enable(void)
+{
+ arch_local_irq_restore(IRQS_ENABLED);
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ return irq_soft_mask_or_return(IRQS_DISABLED);
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+ return flags & IRQS_DISABLED;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+static inline void set_pmi_irq_pending(void)
+{
+ /*
+ * Invoked from PMU callback functions to set PMI bit in the paca.
+ * This has to be called with irq's disabled (via hard_irq_disable()).
+ */
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+
+ get_paca()->irq_happened |= PACA_IRQ_PMI;
+}
+
+static inline void clear_pmi_irq_pending(void)
+{
+ /*
+ * Invoked from PMU callback functions to clear the pending PMI bit
+ * in the paca.
+ */
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+
+ get_paca()->irq_happened &= ~PACA_IRQ_PMI;
+}
+
+static inline bool pmi_irq_pending(void)
+{
+ /*
+ * Invoked from PMU callback functions to check if there is a pending
+ * PMI bit in the paca.
+ */
+ if (get_paca()->irq_happened & PACA_IRQ_PMI)
+ return true;
+
+ return false;
+}
+
+#ifdef CONFIG_PPC_BOOK3S
+/*
+ * To support disabling and enabling of irq with PMI, set of
+ * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
+ * functions are added. These macros are implemented using generic
+ * linux local_irq_* code from include/linux/irqflags.h.
+ */
+#define raw_local_irq_pmu_save(flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = irq_soft_mask_or_return(IRQS_DISABLED | \
+ IRQS_PMI_DISABLED); \
+ } while(0)
+
+#define raw_local_irq_pmu_restore(flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ arch_local_irq_restore(flags); \
+ } while(0)
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+#define powerpc_local_irq_pmu_save(flags) \
+ do { \
+ raw_local_irq_pmu_save(flags); \
+ if (!raw_irqs_disabled_flags(flags)) \
+ trace_hardirqs_off(); \
+ } while(0)
+#define powerpc_local_irq_pmu_restore(flags) \
+ do { \
+ if (!raw_irqs_disabled_flags(flags)) \
+ trace_hardirqs_on(); \
+ raw_local_irq_pmu_restore(flags); \
+ } while(0)
+#else
+#define powerpc_local_irq_pmu_save(flags) \
+ do { \
+ raw_local_irq_pmu_save(flags); \
+ } while(0)
+#define powerpc_local_irq_pmu_restore(flags) \
+ do { \
+ raw_local_irq_pmu_restore(flags); \
+ } while (0)
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
+#endif /* CONFIG_PPC_BOOK3S */
+
+#define hard_irq_disable() do { \
+ unsigned long flags; \
+ __hard_irq_disable(); \
+ flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
+ if (!arch_irqs_disabled_flags(flags)) { \
+ asm volatile("std%X0 %1,%0" : "=m" (local_paca->saved_r1) \
+ : "r" (current_stack_pointer)); \
+ trace_hardirqs_off(); \
+ } \
+} while(0)
+
+static inline bool __lazy_irq_pending(u8 irq_happened)
+{
+ return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
+}
+
+/*
+ * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
+ */
+static inline bool lazy_irq_pending(void)
+{
+ return __lazy_irq_pending(get_paca()->irq_happened);
+}
+
+/*
+ * Check if a lazy IRQ is pending, with no debugging checks.
+ * Should be called with IRQs hard disabled.
+ * For use in RI disabled code or other constrained situations.
+ */
+static inline bool lazy_irq_pending_nocheck(void)
+{
+ return __lazy_irq_pending(local_paca->irq_happened);
+}
+
+bool power_pmu_wants_prompt_pmi(void);
+
+/*
+ * This is called by asynchronous interrupts to check whether to
+ * conditionally re-enable hard interrupts after having cleared
+ * the source of the interrupt. They are kept disabled if there
+ * is a different soft-masked interrupt pending that requires hard
+ * masking.
+ */
+static inline bool should_hard_irq_enable(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
+ WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
+ WARN_ON(mfmsr() & MSR_EE);
+ }
+
+ if (!IS_ENABLED(CONFIG_PERF_EVENTS))
+ return false;
+ /*
+ * If the PMU is not running, there is not much reason to enable
+ * MSR[EE] in irq handlers because any interrupts would just be
+ * soft-masked.
+ *
+ * TODO: Add test for 64e
+ */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
+ if (!power_pmu_wants_prompt_pmi())
+ return false;
+ /*
+ * If PMIs are disabled then IRQs should be disabled as well,
+ * so we shouldn't see this condition, check for it just in
+ * case because we are about to enable PMIs.
+ */
+ if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
+ return false;
+ }
+
+ if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
+ return false;
+
+ return true;
+}
+
+/*
+ * Do the hard enabling, only call this if should_hard_irq_enable is true.
+ * This allows PMI interrupts to profile irq handlers.
+ */
+static inline void do_hard_irq_enable(void)
+{
+ /*
+ * Asynch interrupts come in with IRQS_ALL_DISABLED,
+ * PACA_IRQ_HARD_DIS, and MSR[EE]=0.
+ */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
+ get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ __hard_irq_enable();
+}
+
+static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
+{
+ return (regs->softe & IRQS_DISABLED);
+}
+
+extern bool prep_irq_for_idle(void);
+extern bool prep_irq_for_idle_irqsoff(void);
+extern void irq_set_pending_from_srr1(unsigned long srr1);
+
+#define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
+
+extern void force_external_irq_replay(void);
+
+static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
+{
+ regs->softe = val;
+}
+#else /* CONFIG_PPC64 */
+
+static inline notrace unsigned long irq_soft_mask_return(void)
+{
+ return 0;
+}
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ return mfmsr();
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ if (IS_ENABLED(CONFIG_BOOKE))
+ wrtee(flags);
+ else
+ mtmsr(flags);
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags = arch_local_save_flags();
+
+ if (IS_ENABLED(CONFIG_BOOKE))
+ wrtee(0);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EID);
+ else
+ mtmsr(flags & ~MSR_EE);
+
+ return flags;
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ __hard_irq_disable();
+}
+
+static inline void arch_local_irq_enable(void)
+{
+ __hard_irq_enable();
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+ return (flags & MSR_EE) == 0;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#define hard_irq_disable() arch_local_irq_disable()
+
+static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
+{
+ return !(regs->msr & MSR_EE);
+}
+
+static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline void do_hard_irq_enable(void)
+{
+ BUILD_BUG();
+}
+
+static inline void clear_pmi_irq_pending(void) { }
+static inline void set_pmi_irq_pending(void) { }
+static inline bool pmi_irq_pending(void) { return false; }
+
+static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
+{
+}
+#endif /* CONFIG_PPC64 */
+
+static inline unsigned long mtmsr_isync_irqsafe(unsigned long msr)
+{
+#ifdef CONFIG_PPC64
+ if (arch_irqs_disabled()) {
+ /*
+ * With soft-masking, MSR[EE] can change from 1 to 0
+ * asynchronously when irqs are disabled, and we don't want to
+ * set MSR[EE] back to 1 here if that has happened. A race-free
+ * way to do this is ensure EE is already 0. Another way it
+ * could be done is with a RESTART_TABLE handler, but that's
+ * probably overkill here.
+ */
+ msr &= ~MSR_EE;
+ mtmsr_isync(msr);
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ } else
+#endif
+ mtmsr_isync(msr);
+
+ return msr;
+}
+
+
+#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/hydra.h b/arch/powerpc/include/asm/hydra.h
new file mode 100644
index 000000000..d02444728
--- /dev/null
+++ b/arch/powerpc/include/asm/hydra.h
@@ -0,0 +1,99 @@
+/*
+ * include/asm-ppc/hydra.h -- Mac I/O `Hydra' definitions
+ *
+ * Copyright (C) 1997 Geert Uytterhoeven
+ *
+ * This file is based on the following documentation:
+ *
+ * Macintosh Technology in the Common Hardware Reference Platform
+ * Apple Computer, Inc.
+ *
+ * © Copyright 1995 Apple Computer, Inc. All rights reserved.
+ *
+ * It's available online from https://www.cpu.lu/~mlan/ftp/MacTech.pdf
+ * You can obtain paper copies of this book from computer bookstores or by
+ * writing Morgan Kaufmann Publishers, Inc., 340 Pine Street, Sixth Floor, San
+ * Francisco, CA 94104. Reference ISBN 1-55860-393-X.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASMPPC_HYDRA_H
+#define _ASMPPC_HYDRA_H
+
+#ifdef __KERNEL__
+
+struct Hydra {
+ /* DBDMA Controller Register Space */
+ char Pad1[0x30];
+ u_int CachePD;
+ u_int IDs;
+ u_int Feature_Control;
+ char Pad2[0x7fc4];
+ /* DBDMA Channel Register Space */
+ char SCSI_DMA[0x100];
+ char Pad3[0x300];
+ char SCCA_Tx_DMA[0x100];
+ char SCCA_Rx_DMA[0x100];
+ char SCCB_Tx_DMA[0x100];
+ char SCCB_Rx_DMA[0x100];
+ char Pad4[0x7800];
+ /* Device Register Space */
+ char SCSI[0x1000];
+ char ADB[0x1000];
+ char SCC_Legacy[0x1000];
+ char SCC[0x1000];
+ char Pad9[0x2000];
+ char VIA[0x2000];
+ char Pad10[0x28000];
+ char OpenPIC[0x40000];
+};
+
+extern volatile struct Hydra __iomem *Hydra;
+
+
+ /*
+ * Feature Control Register
+ */
+
+#define HYDRA_FC_SCC_CELL_EN 0x00000001 /* Enable SCC Clock */
+#define HYDRA_FC_SCSI_CELL_EN 0x00000002 /* Enable SCSI Clock */
+#define HYDRA_FC_SCCA_ENABLE 0x00000004 /* Enable SCC A Lines */
+#define HYDRA_FC_SCCB_ENABLE 0x00000008 /* Enable SCC B Lines */
+#define HYDRA_FC_ARB_BYPASS 0x00000010 /* Bypass Internal Arbiter */
+#define HYDRA_FC_RESET_SCC 0x00000020 /* Reset SCC */
+#define HYDRA_FC_MPIC_ENABLE 0x00000040 /* Enable OpenPIC */
+#define HYDRA_FC_SLOW_SCC_PCLK 0x00000080 /* 1=15.6672, 0=25 MHz */
+#define HYDRA_FC_MPIC_IS_MASTER 0x00000100 /* OpenPIC Master Mode */
+
+
+ /*
+ * OpenPIC Interrupt Sources
+ */
+
+#define HYDRA_INT_SIO 0
+#define HYDRA_INT_SCSI_DMA 1
+#define HYDRA_INT_SCCA_TX_DMA 2
+#define HYDRA_INT_SCCA_RX_DMA 3
+#define HYDRA_INT_SCCB_TX_DMA 4
+#define HYDRA_INT_SCCB_RX_DMA 5
+#define HYDRA_INT_SCSI 6
+#define HYDRA_INT_SCCA 7
+#define HYDRA_INT_SCCB 8
+#define HYDRA_INT_VIA 9
+#define HYDRA_INT_ADB 10
+#define HYDRA_INT_ADB_NMI 11
+#define HYDRA_INT_EXT1 12 /* PCI IRQW */
+#define HYDRA_INT_EXT2 13 /* PCI IRQX */
+#define HYDRA_INT_EXT3 14 /* PCI IRQY */
+#define HYDRA_INT_EXT4 15 /* PCI IRQZ */
+#define HYDRA_INT_EXT5 16 /* IDE Primary/Secondary */
+#define HYDRA_INT_EXT6 17 /* IDE Secondary */
+#define HYDRA_INT_EXT7 18 /* Power Off Request */
+#define HYDRA_INT_SPARE 19
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASMPPC_HYDRA_H */
diff --git a/arch/powerpc/include/asm/i8259.h b/arch/powerpc/include/asm/i8259.h
new file mode 100644
index 000000000..75481d363
--- /dev/null
+++ b/arch/powerpc/include/asm/i8259.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_I8259_H
+#define _ASM_POWERPC_I8259_H
+#ifdef __KERNEL__
+
+#include <linux/irq.h>
+
+extern void i8259_init(struct device_node *node, unsigned long intack_addr);
+extern unsigned int i8259_irq(void);
+struct irq_domain *__init i8259_get_host(void);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_I8259_H */
diff --git a/arch/powerpc/include/asm/ibmebus.h b/arch/powerpc/include/asm/ibmebus.h
new file mode 100644
index 000000000..088f95b2e
--- /dev/null
+++ b/arch/powerpc/include/asm/ibmebus.h
@@ -0,0 +1,60 @@
+/*
+ * IBM PowerPC eBus Infrastructure Support.
+ *
+ * Copyright (c) 2005 IBM Corporation
+ * Joachim Fenkes <fenkes@de.ibm.com>
+ * Heiko J Schick <schickhj@de.ibm.com>
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ASM_EBUS_H
+#define _ASM_EBUS_H
+#ifdef __KERNEL__
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+extern struct bus_type ibmebus_bus_type;
+
+int ibmebus_register_driver(struct platform_driver *drv);
+void ibmebus_unregister_driver(struct platform_driver *drv);
+
+int ibmebus_request_irq(u32 ist, irq_handler_t handler,
+ unsigned long irq_flags, const char *devname,
+ void *dev_id);
+void ibmebus_free_irq(u32 ist, void *dev_id);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_IBMEBUS_H */
diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h
new file mode 100644
index 000000000..f6599ccb3
--- /dev/null
+++ b/arch/powerpc/include/asm/icswx.h
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * ICSWX api
+ *
+ * Copyright (C) 2015 IBM Corp.
+ *
+ * This provides the Initiate Coprocessor Store Word Indexed (ICSWX)
+ * instruction. This instruction is used to communicate with PowerPC
+ * coprocessors. This also provides definitions of the structures used
+ * to communicate with the coprocessor.
+ *
+ * The RFC02130: Coprocessor Architecture document is the reference for
+ * everything in this file unless otherwise noted.
+ */
+#ifndef _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_
+#define _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_
+
+#include <asm/ppc-opcode.h> /* for PPC_ICSWX */
+
+/* Chapter 6.5.8 Coprocessor-Completion Block (CCB) */
+
+#define CCB_VALUE (0x3fffffffffffffff)
+#define CCB_ADDRESS (0xfffffffffffffff8)
+#define CCB_CM (0x0000000000000007)
+#define CCB_CM0 (0x0000000000000004)
+#define CCB_CM12 (0x0000000000000003)
+
+#define CCB_CM0_ALL_COMPLETIONS (0x0)
+#define CCB_CM0_LAST_IN_CHAIN (0x4)
+#define CCB_CM12_STORE (0x0)
+#define CCB_CM12_INTERRUPT (0x1)
+
+#define CCB_SIZE (0x10)
+#define CCB_ALIGN CCB_SIZE
+
+struct coprocessor_completion_block {
+ __be64 value;
+ __be64 address;
+} __packed __aligned(CCB_ALIGN);
+
+
+/* Chapter 6.5.7 Coprocessor-Status Block (CSB) */
+
+#define CSB_V (0x80)
+#define CSB_F (0x04)
+#define CSB_CH (0x03)
+#define CSB_CE_INCOMPLETE (0x80)
+#define CSB_CE_TERMINATION (0x40)
+#define CSB_CE_TPBC (0x20)
+
+#define CSB_CC_SUCCESS (0)
+#define CSB_CC_INVALID_ALIGN (1)
+#define CSB_CC_OPERAND_OVERLAP (2)
+#define CSB_CC_DATA_LENGTH (3)
+#define CSB_CC_TRANSLATION (5)
+#define CSB_CC_PROTECTION (6)
+#define CSB_CC_RD_EXTERNAL (7)
+#define CSB_CC_INVALID_OPERAND (8)
+#define CSB_CC_PRIVILEGE (9)
+#define CSB_CC_INTERNAL (10)
+#define CSB_CC_WR_EXTERNAL (12)
+#define CSB_CC_NOSPC (13)
+#define CSB_CC_EXCESSIVE_DDE (14)
+#define CSB_CC_WR_TRANSLATION (15)
+#define CSB_CC_WR_PROTECTION (16)
+#define CSB_CC_UNKNOWN_CODE (17)
+#define CSB_CC_ABORT (18)
+#define CSB_CC_EXCEED_BYTE_COUNT (19) /* P9 or later */
+#define CSB_CC_TRANSPORT (20)
+#define CSB_CC_INVALID_CRB (21) /* P9 or later */
+#define CSB_CC_INVALID_DDE (30) /* P9 or later */
+#define CSB_CC_SEGMENTED_DDL (31)
+#define CSB_CC_PROGRESS_POINT (32)
+#define CSB_CC_DDE_OVERFLOW (33)
+#define CSB_CC_SESSION (34)
+#define CSB_CC_PROVISION (36)
+#define CSB_CC_CHAIN (37)
+#define CSB_CC_SEQUENCE (38)
+#define CSB_CC_HW (39)
+/* P9 DD2 NX Workbook 3.2 (Table 4-36): Address translation fault */
+#define CSB_CC_FAULT_ADDRESS (250)
+
+#define CSB_SIZE (0x10)
+#define CSB_ALIGN CSB_SIZE
+
+struct coprocessor_status_block {
+ u8 flags;
+ u8 cs;
+ u8 cc;
+ u8 ce;
+ __be32 count;
+ __be64 address;
+} __packed __aligned(CSB_ALIGN);
+
+
+/* Chapter 6.5.10 Data-Descriptor List (DDL)
+ * each list contains one or more Data-Descriptor Entries (DDE)
+ */
+
+#define DDE_P (0x8000)
+
+#define DDE_SIZE (0x10)
+#define DDE_ALIGN DDE_SIZE
+
+struct data_descriptor_entry {
+ __be16 flags;
+ u8 count;
+ u8 index;
+ __be32 length;
+ __be64 address;
+} __packed __aligned(DDE_ALIGN);
+
+/* 4.3.2 NX-stamped Fault CRB */
+
+#define NX_STAMP_ALIGN (0x10)
+
+struct nx_fault_stamp {
+ __be64 fault_storage_addr;
+ __be16 reserved;
+ __u8 flags;
+ __u8 fault_status;
+ __be32 pswid;
+} __packed __aligned(NX_STAMP_ALIGN);
+
+/* Chapter 6.5.2 Coprocessor-Request Block (CRB) */
+
+#define CRB_SIZE (0x80)
+#define CRB_ALIGN (0x100) /* Errata: requires 256 alignment */
+
+/* Coprocessor Status Block field
+ * ADDRESS address of CSB
+ * C CCB is valid
+ * AT 0 = addrs are virtual, 1 = addrs are phys
+ * M enable perf monitor
+ */
+#define CRB_CSB_ADDRESS (0xfffffffffffffff0)
+#define CRB_CSB_C (0x0000000000000008)
+#define CRB_CSB_AT (0x0000000000000002)
+#define CRB_CSB_M (0x0000000000000001)
+
+struct coprocessor_request_block {
+ __be32 ccw;
+ __be32 flags;
+ __be64 csb_addr;
+
+ struct data_descriptor_entry source;
+ struct data_descriptor_entry target;
+
+ struct coprocessor_completion_block ccb;
+
+ union {
+ struct nx_fault_stamp nx;
+ u8 reserved[16];
+ } stamp;
+
+ u8 reserved[32];
+
+ struct coprocessor_status_block csb;
+} __aligned(128);
+
+/* RFC02167 Initiate Coprocessor Instructions document
+ * Chapter 8.2.1.1.1 RS
+ * Chapter 8.2.3 Coprocessor Directive
+ * Chapter 8.2.4 Execution
+ *
+ * The CCW must be converted to BE before passing to icswx()
+ */
+
+#define CCW_PS (0xff000000)
+#define CCW_CT (0x00ff0000)
+#define CCW_CD (0x0000ffff)
+#define CCW_CL (0x0000c000)
+
+
+/* RFC02167 Initiate Coprocessor Instructions document
+ * Chapter 8.2.1 Initiate Coprocessor Store Word Indexed (ICSWX)
+ * Chapter 8.2.4.1 Condition Register 0
+ */
+
+#define ICSWX_INITIATED (0x8)
+#define ICSWX_BUSY (0x4)
+#define ICSWX_REJECTED (0x2)
+#define ICSWX_XERS0 (0x1) /* undefined or set from XERSO. */
+
+static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
+{
+ __be64 ccw_reg = ccw;
+ u32 cr;
+
+ /* NB: the same structures are used by VAS-NX */
+ BUILD_BUG_ON(sizeof(*crb) != 128);
+
+ __asm__ __volatile__(
+ PPC_ICSWX(%1,0,%2) "\n"
+ "mfcr %0\n"
+ : "=r" (cr)
+ : "r" (ccw_reg), "r" (crb)
+ : "cr0", "memory");
+
+ return (int)((cr >> 28) & 0xf);
+}
+
+
+#endif /* _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_ */
diff --git a/arch/powerpc/include/asm/ide.h b/arch/powerpc/include/asm/ide.h
new file mode 100644
index 000000000..ce87a4441
--- /dev/null
+++ b/arch/powerpc/include/asm/ide.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ *
+ * This file contains the powerpc architecture specific IDE code.
+ */
+#ifndef _ASM_POWERPC_IDE_H
+#define _ASM_POWERPC_IDE_H
+
+#include <linux/compiler.h>
+#include <asm/io.h>
+
+#define __ide_mm_insw(p, a, c) readsw((void __iomem *)(p), (a), (c))
+#define __ide_mm_insl(p, a, c) readsl((void __iomem *)(p), (a), (c))
+#define __ide_mm_outsw(p, a, c) writesw((void __iomem *)(p), (a), (c))
+#define __ide_mm_outsl(p, a, c) writesl((void __iomem *)(p), (a), (c))
+
+#endif /* _ASM_POWERPC_IDE_H */
diff --git a/arch/powerpc/include/asm/idle.h b/arch/powerpc/include/asm/idle.h
new file mode 100644
index 000000000..accd1f500
--- /dev/null
+++ b/arch/powerpc/include/asm/idle.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_IDLE_H
+#define _ASM_POWERPC_IDLE_H
+#include <asm/runlatch.h>
+#include <asm/paca.h>
+
+#ifdef CONFIG_PPC_PSERIES
+DECLARE_PER_CPU(u64, idle_spurr_cycles);
+DECLARE_PER_CPU(u64, idle_entry_purr_snap);
+DECLARE_PER_CPU(u64, idle_entry_spurr_snap);
+
+static inline void snapshot_purr_idle_entry(void)
+{
+ *this_cpu_ptr(&idle_entry_purr_snap) = mfspr(SPRN_PURR);
+}
+
+static inline void snapshot_spurr_idle_entry(void)
+{
+ *this_cpu_ptr(&idle_entry_spurr_snap) = mfspr(SPRN_SPURR);
+}
+
+static inline void update_idle_purr_accounting(void)
+{
+ u64 wait_cycles;
+ u64 in_purr = *this_cpu_ptr(&idle_entry_purr_snap);
+
+ wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
+ wait_cycles += mfspr(SPRN_PURR) - in_purr;
+ get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
+}
+
+static inline void update_idle_spurr_accounting(void)
+{
+ u64 *idle_spurr_cycles_ptr = this_cpu_ptr(&idle_spurr_cycles);
+ u64 in_spurr = *this_cpu_ptr(&idle_entry_spurr_snap);
+
+ *idle_spurr_cycles_ptr += mfspr(SPRN_SPURR) - in_spurr;
+}
+
+static inline void pseries_idle_prolog(void)
+{
+ ppc64_runlatch_off();
+ snapshot_purr_idle_entry();
+ snapshot_spurr_idle_entry();
+ /*
+ * Indicate to the HV that we are idle. Now would be
+ * a good time to find other work to dispatch.
+ */
+ get_lppaca()->idle = 1;
+}
+
+static inline void pseries_idle_epilog(void)
+{
+ update_idle_purr_accounting();
+ update_idle_spurr_accounting();
+ get_lppaca()->idle = 0;
+ ppc64_runlatch_on();
+}
+
+static inline u64 read_this_idle_purr(void)
+{
+ /*
+ * If we are reading from an idle context, update the
+ * idle-purr cycles corresponding to the last idle period.
+ * Since the idle context is not yet over, take a fresh
+ * snapshot of the idle-purr.
+ */
+ if (unlikely(get_lppaca()->idle == 1)) {
+ update_idle_purr_accounting();
+ snapshot_purr_idle_entry();
+ }
+
+ return be64_to_cpu(get_lppaca()->wait_state_cycles);
+}
+
+static inline u64 read_this_idle_spurr(void)
+{
+ /*
+ * If we are reading from an idle context, update the
+ * idle-spurr cycles corresponding to the last idle period.
+ * Since the idle context is not yet over, take a fresh
+ * snapshot of the idle-spurr.
+ */
+ if (get_lppaca()->idle == 1) {
+ update_idle_spurr_accounting();
+ snapshot_spurr_idle_entry();
+ }
+
+ return *this_cpu_ptr(&idle_spurr_cycles);
+}
+
+#endif /* CONFIG_PPC_PSERIES */
+#endif
diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h
new file mode 100644
index 000000000..699a88584
--- /dev/null
+++ b/arch/powerpc/include/asm/imc-pmu.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_POWERPC_IMC_PMU_H
+#define __ASM_POWERPC_IMC_PMU_H
+
+/*
+ * IMC Nest Performance Monitor counter support.
+ *
+ * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation.
+ * (C) 2017 Anju T Sudhakar, IBM Corporation.
+ * (C) 2017 Hemant K Shaw, IBM Corporation.
+ */
+
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <asm/opal.h>
+
+/*
+ * Compatibility macros for IMC devices
+ */
+#define IMC_DTB_COMPAT "ibm,opal-in-memory-counters"
+#define IMC_DTB_UNIT_COMPAT "ibm,imc-counters"
+
+
+/*
+ * LDBAR: Counter address and Enable/Disable macro.
+ * perf/imc-pmu.c has the LDBAR layout information.
+ */
+#define THREAD_IMC_LDBAR_MASK 0x0003ffffffffe000ULL
+#define THREAD_IMC_ENABLE 0x8000000000000000ULL
+#define TRACE_IMC_ENABLE 0x4000000000000000ULL
+
+/*
+ * For debugfs interface for imc-mode and imc-command
+ */
+#define IMC_CNTL_BLK_OFFSET 0x3FC00
+#define IMC_CNTL_BLK_CMD_OFFSET 8
+#define IMC_CNTL_BLK_MODE_OFFSET 32
+
+/*
+ * Structure to hold memory address information for imc units.
+ */
+struct imc_mem_info {
+ u64 *vbase;
+ u32 id;
+};
+
+/*
+ * Place holder for nest pmu events and values.
+ */
+struct imc_events {
+ u32 value;
+ char *name;
+ char *unit;
+ char *scale;
+};
+
+/*
+ * Trace IMC hardware updates a 64bytes record on
+ * Core Performance Monitoring Counter (CPMC)
+ * overflow. Here is the layout for the trace imc record
+ *
+ * DW 0 : Timebase
+ * DW 1 : Program Counter
+ * DW 2 : PIDR information
+ * DW 3 : CPMC1
+ * DW 4 : CPMC2
+ * DW 5 : CPMC3
+ * Dw 6 : CPMC4
+ * DW 7 : Timebase
+ * .....
+ *
+ * The following is the data structure to hold trace imc data.
+ */
+struct trace_imc_data {
+ u64 tb1;
+ u64 ip;
+ u64 val;
+ u64 cpmc1;
+ u64 cpmc2;
+ u64 cpmc3;
+ u64 cpmc4;
+ u64 tb2;
+};
+
+/* Event attribute array index */
+#define IMC_FORMAT_ATTR 0
+#define IMC_EVENT_ATTR 1
+#define IMC_CPUMASK_ATTR 2
+#define IMC_NULL_ATTR 3
+
+/* PMU Format attribute macros */
+#define IMC_EVENT_OFFSET_MASK 0xffffffffULL
+
+/*
+ * Macro to mask bits 0:21 of first double word(which is the timebase) to
+ * compare with 8th double word (timebase) of trace imc record data.
+ */
+#define IMC_TRACE_RECORD_TB1_MASK 0x3ffffffffffULL
+
+/*
+ * Bit 0:1 in third DW of IMC trace record
+ * specifies the MSR[HV PR] values.
+ */
+#define IMC_TRACE_RECORD_VAL_HVPR(x) ((x) >> 62)
+
+/*
+ * Device tree parser code detects IMC pmu support and
+ * registers new IMC pmus. This structure will hold the
+ * pmu functions, events, counter memory information
+ * and attrs for each imc pmu and will be referenced at
+ * the time of pmu registration.
+ */
+struct imc_pmu {
+ struct pmu pmu;
+ struct imc_mem_info *mem_info;
+ struct imc_events *events;
+ /*
+ * Attribute groups for the PMU. Slot 0 used for
+ * format attribute, slot 1 used for cpusmask attribute,
+ * slot 2 used for event attribute. Slot 3 keep as
+ * NULL.
+ */
+ const struct attribute_group *attr_groups[4];
+ u32 counter_mem_size;
+ int domain;
+ /*
+ * flag to notify whether the memory is mmaped
+ * or allocated by kernel.
+ */
+ bool imc_counter_mmaped;
+};
+
+/*
+ * Structure to hold id, lock and reference count for the imc events which
+ * are inited.
+ */
+struct imc_pmu_ref {
+ spinlock_t lock;
+ unsigned int id;
+ int refc;
+};
+
+/*
+ * In-Memory Collection Counters type.
+ * Data comes from Device tree.
+ * Three device type are supported.
+ */
+
+enum {
+ IMC_TYPE_THREAD = 0x1,
+ IMC_TYPE_TRACE = 0x2,
+ IMC_TYPE_CORE = 0x4,
+ IMC_TYPE_CHIP = 0x10,
+};
+
+/*
+ * Domains for IMC PMUs
+ */
+#define IMC_DOMAIN_NEST 1
+#define IMC_DOMAIN_CORE 2
+#define IMC_DOMAIN_THREAD 3
+/* For trace-imc the domain is still thread but it operates in trace-mode */
+#define IMC_DOMAIN_TRACE 4
+
+extern int init_imc_pmu(struct device_node *parent,
+ struct imc_pmu *pmu_ptr, int pmu_id);
+extern void thread_imc_disable(void);
+extern int get_max_nest_dev(void);
+extern void unregister_thread_imc(void);
+#endif /* __ASM_POWERPC_IMC_PMU_H */
diff --git a/arch/powerpc/include/asm/immap_cpm2.h b/arch/powerpc/include/asm/immap_cpm2.h
new file mode 100644
index 000000000..845d5b3fb
--- /dev/null
+++ b/arch/powerpc/include/asm/immap_cpm2.h
@@ -0,0 +1,648 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CPM2 Internal Memory Map
+ * Copyright (c) 1999 Dan Malek (dmalek@jlc.net)
+ *
+ * The Internal Memory Map for devices with CPM2 on them. This
+ * is the superset of all CPM2 devices (8260, 8266, 8280, 8272,
+ * 8560).
+ */
+#ifdef __KERNEL__
+#ifndef __IMMAP_CPM2__
+#define __IMMAP_CPM2__
+
+#include <linux/types.h>
+
+/* System configuration registers.
+*/
+typedef struct sys_82xx_conf {
+ u32 sc_siumcr;
+ u32 sc_sypcr;
+ u8 res1[6];
+ u16 sc_swsr;
+ u8 res2[20];
+ u32 sc_bcr;
+ u8 sc_ppc_acr;
+ u8 res3[3];
+ u32 sc_ppc_alrh;
+ u32 sc_ppc_alrl;
+ u8 sc_lcl_acr;
+ u8 res4[3];
+ u32 sc_lcl_alrh;
+ u32 sc_lcl_alrl;
+ u32 sc_tescr1;
+ u32 sc_tescr2;
+ u32 sc_ltescr1;
+ u32 sc_ltescr2;
+ u32 sc_pdtea;
+ u8 sc_pdtem;
+ u8 res5[3];
+ u32 sc_ldtea;
+ u8 sc_ldtem;
+ u8 res6[163];
+} sysconf_82xx_cpm2_t;
+
+typedef struct sys_85xx_conf {
+ u32 sc_cear;
+ u16 sc_ceer;
+ u16 sc_cemr;
+ u8 res1[70];
+ u32 sc_smaer;
+ u8 res2[4];
+ u32 sc_smevr;
+ u32 sc_smctr;
+ u32 sc_lmaer;
+ u8 res3[4];
+ u32 sc_lmevr;
+ u32 sc_lmctr;
+ u8 res4[144];
+} sysconf_85xx_cpm2_t;
+
+typedef union sys_conf {
+ sysconf_82xx_cpm2_t siu_82xx;
+ sysconf_85xx_cpm2_t siu_85xx;
+} sysconf_cpm2_t;
+
+
+
+/* Memory controller registers.
+*/
+typedef struct mem_ctlr {
+ u32 memc_br0;
+ u32 memc_or0;
+ u32 memc_br1;
+ u32 memc_or1;
+ u32 memc_br2;
+ u32 memc_or2;
+ u32 memc_br3;
+ u32 memc_or3;
+ u32 memc_br4;
+ u32 memc_or4;
+ u32 memc_br5;
+ u32 memc_or5;
+ u32 memc_br6;
+ u32 memc_or6;
+ u32 memc_br7;
+ u32 memc_or7;
+ u32 memc_br8;
+ u32 memc_or8;
+ u32 memc_br9;
+ u32 memc_or9;
+ u32 memc_br10;
+ u32 memc_or10;
+ u32 memc_br11;
+ u32 memc_or11;
+ u8 res1[8];
+ u32 memc_mar;
+ u8 res2[4];
+ u32 memc_mamr;
+ u32 memc_mbmr;
+ u32 memc_mcmr;
+ u8 res3[8];
+ u16 memc_mptpr;
+ u8 res4[2];
+ u32 memc_mdr;
+ u8 res5[4];
+ u32 memc_psdmr;
+ u32 memc_lsdmr;
+ u8 memc_purt;
+ u8 res6[3];
+ u8 memc_psrt;
+ u8 res7[3];
+ u8 memc_lurt;
+ u8 res8[3];
+ u8 memc_lsrt;
+ u8 res9[3];
+ u32 memc_immr;
+ u32 memc_pcibr0;
+ u32 memc_pcibr1;
+ u8 res10[16];
+ u32 memc_pcimsk0;
+ u32 memc_pcimsk1;
+ u8 res11[52];
+} memctl_cpm2_t;
+
+/* System Integration Timers.
+*/
+typedef struct sys_int_timers {
+ u8 res1[32];
+ u16 sit_tmcntsc;
+ u8 res2[2];
+ u32 sit_tmcnt;
+ u8 res3[4];
+ u32 sit_tmcntal;
+ u8 res4[16];
+ u16 sit_piscr;
+ u8 res5[2];
+ u32 sit_pitc;
+ u32 sit_pitr;
+ u8 res6[94];
+ u8 res7[390];
+} sit_cpm2_t;
+
+#define PISCR_PIRQ_MASK ((u16)0xff00)
+#define PISCR_PS ((u16)0x0080)
+#define PISCR_PIE ((u16)0x0004)
+#define PISCR_PTF ((u16)0x0002)
+#define PISCR_PTE ((u16)0x0001)
+
+/* PCI Controller.
+*/
+typedef struct pci_ctlr {
+ u32 pci_omisr;
+ u32 pci_omimr;
+ u8 res1[8];
+ u32 pci_ifqpr;
+ u32 pci_ofqpr;
+ u8 res2[8];
+ u32 pci_imr0;
+ u32 pci_imr1;
+ u32 pci_omr0;
+ u32 pci_omr1;
+ u32 pci_odr;
+ u8 res3[4];
+ u32 pci_idr;
+ u8 res4[20];
+ u32 pci_imisr;
+ u32 pci_imimr;
+ u8 res5[24];
+ u32 pci_ifhpr;
+ u8 res6[4];
+ u32 pci_iftpr;
+ u8 res7[4];
+ u32 pci_iphpr;
+ u8 res8[4];
+ u32 pci_iptpr;
+ u8 res9[4];
+ u32 pci_ofhpr;
+ u8 res10[4];
+ u32 pci_oftpr;
+ u8 res11[4];
+ u32 pci_ophpr;
+ u8 res12[4];
+ u32 pci_optpr;
+ u8 res13[8];
+ u32 pci_mucr;
+ u8 res14[8];
+ u32 pci_qbar;
+ u8 res15[12];
+ u32 pci_dmamr0;
+ u32 pci_dmasr0;
+ u32 pci_dmacdar0;
+ u8 res16[4];
+ u32 pci_dmasar0;
+ u8 res17[4];
+ u32 pci_dmadar0;
+ u8 res18[4];
+ u32 pci_dmabcr0;
+ u32 pci_dmandar0;
+ u8 res19[86];
+ u32 pci_dmamr1;
+ u32 pci_dmasr1;
+ u32 pci_dmacdar1;
+ u8 res20[4];
+ u32 pci_dmasar1;
+ u8 res21[4];
+ u32 pci_dmadar1;
+ u8 res22[4];
+ u32 pci_dmabcr1;
+ u32 pci_dmandar1;
+ u8 res23[88];
+ u32 pci_dmamr2;
+ u32 pci_dmasr2;
+ u32 pci_dmacdar2;
+ u8 res24[4];
+ u32 pci_dmasar2;
+ u8 res25[4];
+ u32 pci_dmadar2;
+ u8 res26[4];
+ u32 pci_dmabcr2;
+ u32 pci_dmandar2;
+ u8 res27[88];
+ u32 pci_dmamr3;
+ u32 pci_dmasr3;
+ u32 pci_dmacdar3;
+ u8 res28[4];
+ u32 pci_dmasar3;
+ u8 res29[4];
+ u32 pci_dmadar3;
+ u8 res30[4];
+ u32 pci_dmabcr3;
+ u32 pci_dmandar3;
+ u8 res31[344];
+ u32 pci_potar0;
+ u8 res32[4];
+ u32 pci_pobar0;
+ u8 res33[4];
+ u32 pci_pocmr0;
+ u8 res34[4];
+ u32 pci_potar1;
+ u8 res35[4];
+ u32 pci_pobar1;
+ u8 res36[4];
+ u32 pci_pocmr1;
+ u8 res37[4];
+ u32 pci_potar2;
+ u8 res38[4];
+ u32 pci_pobar2;
+ u8 res39[4];
+ u32 pci_pocmr2;
+ u8 res40[50];
+ u32 pci_ptcr;
+ u32 pci_gpcr;
+ u32 pci_gcr;
+ u32 pci_esr;
+ u32 pci_emr;
+ u32 pci_ecr;
+ u32 pci_eacr;
+ u8 res41[4];
+ u32 pci_edcr;
+ u8 res42[4];
+ u32 pci_eccr;
+ u8 res43[44];
+ u32 pci_pitar1;
+ u8 res44[4];
+ u32 pci_pibar1;
+ u8 res45[4];
+ u32 pci_picmr1;
+ u8 res46[4];
+ u32 pci_pitar0;
+ u8 res47[4];
+ u32 pci_pibar0;
+ u8 res48[4];
+ u32 pci_picmr0;
+ u8 res49[4];
+ u32 pci_cfg_addr;
+ u32 pci_cfg_data;
+ u32 pci_int_ack;
+ u8 res50[756];
+} pci_cpm2_t;
+
+/* Interrupt Controller.
+*/
+typedef struct interrupt_controller {
+ u16 ic_sicr;
+ u8 res1[2];
+ u32 ic_sivec;
+ u32 ic_sipnrh;
+ u32 ic_sipnrl;
+ u32 ic_siprr;
+ u32 ic_scprrh;
+ u32 ic_scprrl;
+ u32 ic_simrh;
+ u32 ic_simrl;
+ u32 ic_siexr;
+ u8 res2[88];
+} intctl_cpm2_t;
+
+/* Clocks and Reset.
+*/
+typedef struct clk_and_reset {
+ u32 car_sccr;
+ u8 res1[4];
+ u32 car_scmr;
+ u8 res2[4];
+ u32 car_rsr;
+ u32 car_rmr;
+ u8 res[104];
+} car_cpm2_t;
+
+/* Input/Output Port control/status registers.
+ * Names consistent with processor manual, although they are different
+ * from the original 8xx names.......
+ */
+typedef struct io_port {
+ u32 iop_pdira;
+ u32 iop_ppara;
+ u32 iop_psora;
+ u32 iop_podra;
+ u32 iop_pdata;
+ u8 res1[12];
+ u32 iop_pdirb;
+ u32 iop_pparb;
+ u32 iop_psorb;
+ u32 iop_podrb;
+ u32 iop_pdatb;
+ u8 res2[12];
+ u32 iop_pdirc;
+ u32 iop_pparc;
+ u32 iop_psorc;
+ u32 iop_podrc;
+ u32 iop_pdatc;
+ u8 res3[12];
+ u32 iop_pdird;
+ u32 iop_ppard;
+ u32 iop_psord;
+ u32 iop_podrd;
+ u32 iop_pdatd;
+ u8 res4[12];
+} iop_cpm2_t;
+
+/* Communication Processor Module Timers
+*/
+typedef struct cpm_timers {
+ u8 cpmt_tgcr1;
+ u8 res1[3];
+ u8 cpmt_tgcr2;
+ u8 res2[11];
+ u16 cpmt_tmr1;
+ u16 cpmt_tmr2;
+ u16 cpmt_trr1;
+ u16 cpmt_trr2;
+ u16 cpmt_tcr1;
+ u16 cpmt_tcr2;
+ u16 cpmt_tcn1;
+ u16 cpmt_tcn2;
+ u16 cpmt_tmr3;
+ u16 cpmt_tmr4;
+ u16 cpmt_trr3;
+ u16 cpmt_trr4;
+ u16 cpmt_tcr3;
+ u16 cpmt_tcr4;
+ u16 cpmt_tcn3;
+ u16 cpmt_tcn4;
+ u16 cpmt_ter1;
+ u16 cpmt_ter2;
+ u16 cpmt_ter3;
+ u16 cpmt_ter4;
+ u8 res3[584];
+} cpmtimer_cpm2_t;
+
+/* DMA control/status registers.
+*/
+typedef struct sdma_csr {
+ u8 res0[24];
+ u8 sdma_sdsr;
+ u8 res1[3];
+ u8 sdma_sdmr;
+ u8 res2[3];
+ u8 sdma_idsr1;
+ u8 res3[3];
+ u8 sdma_idmr1;
+ u8 res4[3];
+ u8 sdma_idsr2;
+ u8 res5[3];
+ u8 sdma_idmr2;
+ u8 res6[3];
+ u8 sdma_idsr3;
+ u8 res7[3];
+ u8 sdma_idmr3;
+ u8 res8[3];
+ u8 sdma_idsr4;
+ u8 res9[3];
+ u8 sdma_idmr4;
+ u8 res10[707];
+} sdma_cpm2_t;
+
+/* Fast controllers
+*/
+typedef struct fcc {
+ u32 fcc_gfmr;
+ u32 fcc_fpsmr;
+ u16 fcc_ftodr;
+ u8 res1[2];
+ u16 fcc_fdsr;
+ u8 res2[2];
+ u16 fcc_fcce;
+ u8 res3[2];
+ u16 fcc_fccm;
+ u8 res4[2];
+ u8 fcc_fccs;
+ u8 res5[3];
+ u8 fcc_ftirr_phy[4];
+} fcc_t;
+
+/* Fast controllers continued
+ */
+typedef struct fcc_c {
+ u32 fcc_firper;
+ u32 fcc_firer;
+ u32 fcc_firsr_hi;
+ u32 fcc_firsr_lo;
+ u8 fcc_gfemr;
+ u8 res1[15];
+} fcc_c_t;
+
+/* TC Layer
+ */
+typedef struct tclayer {
+ u16 tc_tcmode;
+ u16 tc_cdsmr;
+ u16 tc_tcer;
+ u16 tc_rcc;
+ u16 tc_tcmr;
+ u16 tc_fcc;
+ u16 tc_ccc;
+ u16 tc_icc;
+ u16 tc_tcc;
+ u16 tc_ecc;
+ u8 res1[12];
+} tclayer_t;
+
+
+/* I2C
+*/
+typedef struct i2c {
+ u8 i2c_i2mod;
+ u8 res1[3];
+ u8 i2c_i2add;
+ u8 res2[3];
+ u8 i2c_i2brg;
+ u8 res3[3];
+ u8 i2c_i2com;
+ u8 res4[3];
+ u8 i2c_i2cer;
+ u8 res5[3];
+ u8 i2c_i2cmr;
+ u8 res6[331];
+} i2c_cpm2_t;
+
+typedef struct scc { /* Serial communication channels */
+ u32 scc_gsmrl;
+ u32 scc_gsmrh;
+ u16 scc_psmr;
+ u8 res1[2];
+ u16 scc_todr;
+ u16 scc_dsr;
+ u16 scc_scce;
+ u8 res2[2];
+ u16 scc_sccm;
+ u8 res3;
+ u8 scc_sccs;
+ u8 res4[8];
+} scc_t;
+
+typedef struct smc { /* Serial management channels */
+ u8 res1[2];
+ u16 smc_smcmr;
+ u8 res2[2];
+ u8 smc_smce;
+ u8 res3[3];
+ u8 smc_smcm;
+ u8 res4[5];
+} smc_t;
+
+/* Serial Peripheral Interface.
+*/
+typedef struct spi_ctrl {
+ u16 spi_spmode;
+ u8 res1[4];
+ u8 spi_spie;
+ u8 res2[3];
+ u8 spi_spim;
+ u8 res3[2];
+ u8 spi_spcom;
+ u8 res4[82];
+} spictl_cpm2_t;
+
+/* CPM Mux.
+*/
+typedef struct cpmux {
+ u8 cmx_si1cr;
+ u8 res1;
+ u8 cmx_si2cr;
+ u8 res2;
+ u32 cmx_fcr;
+ u32 cmx_scr;
+ u8 cmx_smr;
+ u8 res3;
+ u16 cmx_uar;
+ u8 res4[16];
+} cpmux_t;
+
+/* SIRAM control
+*/
+typedef struct siram {
+ u16 si_amr;
+ u16 si_bmr;
+ u16 si_cmr;
+ u16 si_dmr;
+ u8 si_gmr;
+ u8 res1;
+ u8 si_cmdr;
+ u8 res2;
+ u8 si_str;
+ u8 res3;
+ u16 si_rsr;
+} siramctl_t;
+
+typedef struct mcc {
+ u16 mcc_mcce;
+ u8 res1[2];
+ u16 mcc_mccm;
+ u8 res2[2];
+ u8 mcc_mccf;
+ u8 res3[7];
+} mcc_t;
+
+typedef struct comm_proc {
+ u32 cp_cpcr;
+ u32 cp_rccr;
+ u8 res1[14];
+ u16 cp_rter;
+ u8 res2[2];
+ u16 cp_rtmr;
+ u16 cp_rtscr;
+ u8 res3[2];
+ u32 cp_rtsr;
+ u8 res4[12];
+} cpm_cpm2_t;
+
+/* USB Controller.
+*/
+typedef struct cpm_usb_ctlr {
+ u8 usb_usmod;
+ u8 usb_usadr;
+ u8 usb_uscom;
+ u8 res1[1];
+ __be16 usb_usep[4];
+ u8 res2[4];
+ __be16 usb_usber;
+ u8 res3[2];
+ __be16 usb_usbmr;
+ u8 usb_usbs;
+ u8 res4[7];
+} usb_cpm2_t;
+
+/* ...and the whole thing wrapped up....
+*/
+
+typedef struct immap {
+ /* Some references are into the unique and known dpram spaces,
+ * others are from the generic base.
+ */
+#define im_dprambase im_dpram1
+ u8 im_dpram1[16*1024];
+ u8 res1[16*1024];
+ u8 im_dpram2[4*1024];
+ u8 res2[8*1024];
+ u8 im_dpram3[4*1024];
+ u8 res3[16*1024];
+
+ sysconf_cpm2_t im_siu_conf; /* SIU Configuration */
+ memctl_cpm2_t im_memctl; /* Memory Controller */
+ sit_cpm2_t im_sit; /* System Integration Timers */
+ pci_cpm2_t im_pci; /* PCI Controller */
+ intctl_cpm2_t im_intctl; /* Interrupt Controller */
+ car_cpm2_t im_clkrst; /* Clocks and reset */
+ iop_cpm2_t im_ioport; /* IO Port control/status */
+ cpmtimer_cpm2_t im_cpmtimer; /* CPM timers */
+ sdma_cpm2_t im_sdma; /* SDMA control/status */
+
+ fcc_t im_fcc[3]; /* Three FCCs */
+ u8 res4z[32];
+ fcc_c_t im_fcc_c[3]; /* Continued FCCs */
+
+ u8 res4[32];
+
+ tclayer_t im_tclayer[8]; /* Eight TCLayers */
+ u16 tc_tcgsr;
+ u16 tc_tcger;
+
+ /* First set of baud rate generators.
+ */
+ u8 res[236];
+ u32 im_brgc5;
+ u32 im_brgc6;
+ u32 im_brgc7;
+ u32 im_brgc8;
+
+ u8 res5[608];
+
+ i2c_cpm2_t im_i2c; /* I2C control/status */
+ cpm_cpm2_t im_cpm; /* Communication processor */
+
+ /* Second set of baud rate generators.
+ */
+ u32 im_brgc1;
+ u32 im_brgc2;
+ u32 im_brgc3;
+ u32 im_brgc4;
+
+ scc_t im_scc[4]; /* Four SCCs */
+ smc_t im_smc[2]; /* Couple of SMCs */
+ spictl_cpm2_t im_spi; /* A SPI */
+ cpmux_t im_cpmux; /* CPM clock route mux */
+ siramctl_t im_siramctl1; /* First SI RAM Control */
+ mcc_t im_mcc1; /* First MCC */
+ siramctl_t im_siramctl2; /* Second SI RAM Control */
+ mcc_t im_mcc2; /* Second MCC */
+ usb_cpm2_t im_usb; /* USB Controller */
+
+ u8 res6[1153];
+
+ u16 im_si1txram[256];
+ u8 res7[512];
+ u16 im_si1rxram[256];
+ u8 res8[512];
+ u16 im_si2txram[256];
+ u8 res9[512];
+ u16 im_si2rxram[256];
+ u8 res10[512];
+ u8 res11[4096];
+} cpm2_map_t;
+
+extern cpm2_map_t __iomem *cpm2_immr;
+
+#endif /* __IMMAP_CPM2__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h
new file mode 100644
index 000000000..684d3f453
--- /dev/null
+++ b/arch/powerpc/include/asm/inst.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_INST_H
+#define _ASM_POWERPC_INST_H
+
+#include <asm/ppc-opcode.h>
+#include <asm/reg.h>
+#include <asm/disassemble.h>
+#include <asm/uaccess.h>
+
+#define ___get_user_instr(gu_op, dest, ptr) \
+({ \
+ long __gui_ret; \
+ u32 __user *__gui_ptr = (u32 __user *)ptr; \
+ ppc_inst_t __gui_inst; \
+ unsigned int __prefix, __suffix; \
+ \
+ __chk_user_ptr(ptr); \
+ __gui_ret = gu_op(__prefix, __gui_ptr); \
+ if (__gui_ret == 0) { \
+ if (IS_ENABLED(CONFIG_PPC64) && (__prefix >> 26) == OP_PREFIX) { \
+ __gui_ret = gu_op(__suffix, __gui_ptr + 1); \
+ __gui_inst = ppc_inst_prefix(__prefix, __suffix); \
+ } else { \
+ __gui_inst = ppc_inst(__prefix); \
+ } \
+ if (__gui_ret == 0) \
+ (dest) = __gui_inst; \
+ } \
+ __gui_ret; \
+})
+
+#define get_user_instr(x, ptr) ___get_user_instr(get_user, x, ptr)
+
+#define __get_user_instr(x, ptr) ___get_user_instr(__get_user, x, ptr)
+
+/*
+ * Instruction data type for POWER
+ */
+
+#if defined(CONFIG_PPC64) || defined(__CHECKER__)
+static inline u32 ppc_inst_val(ppc_inst_t x)
+{
+ return x.val;
+}
+
+#define ppc_inst(x) ((ppc_inst_t){ .val = (x) })
+
+#else
+static inline u32 ppc_inst_val(ppc_inst_t x)
+{
+ return x;
+}
+#define ppc_inst(x) (x)
+#endif
+
+static inline int ppc_inst_primary_opcode(ppc_inst_t x)
+{
+ return ppc_inst_val(x) >> 26;
+}
+
+#ifdef CONFIG_PPC64
+#define ppc_inst_prefix(x, y) ((ppc_inst_t){ .val = (x), .suffix = (y) })
+
+static inline u32 ppc_inst_suffix(ppc_inst_t x)
+{
+ return x.suffix;
+}
+
+#else
+#define ppc_inst_prefix(x, y) ((void)y, ppc_inst(x))
+
+static inline u32 ppc_inst_suffix(ppc_inst_t x)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PPC64 */
+
+static inline ppc_inst_t ppc_inst_read(const u32 *ptr)
+{
+ if (IS_ENABLED(CONFIG_PPC64) && (*ptr >> 26) == OP_PREFIX)
+ return ppc_inst_prefix(*ptr, *(ptr + 1));
+ else
+ return ppc_inst(*ptr);
+}
+
+static inline bool ppc_inst_prefixed(ppc_inst_t x)
+{
+ return IS_ENABLED(CONFIG_PPC64) && ppc_inst_primary_opcode(x) == OP_PREFIX;
+}
+
+static inline ppc_inst_t ppc_inst_swab(ppc_inst_t x)
+{
+ return ppc_inst_prefix(swab32(ppc_inst_val(x)), swab32(ppc_inst_suffix(x)));
+}
+
+static inline bool ppc_inst_equal(ppc_inst_t x, ppc_inst_t y)
+{
+ if (ppc_inst_val(x) != ppc_inst_val(y))
+ return false;
+ if (!ppc_inst_prefixed(x))
+ return true;
+ return ppc_inst_suffix(x) == ppc_inst_suffix(y);
+}
+
+static inline int ppc_inst_len(ppc_inst_t x)
+{
+ return ppc_inst_prefixed(x) ? 8 : 4;
+}
+
+/*
+ * Return the address of the next instruction, if the instruction @value was
+ * located at @location.
+ */
+static inline u32 *ppc_inst_next(u32 *location, u32 *value)
+{
+ ppc_inst_t tmp;
+
+ tmp = ppc_inst_read(value);
+
+ return (void *)location + ppc_inst_len(tmp);
+}
+
+static inline unsigned long ppc_inst_as_ulong(ppc_inst_t x)
+{
+ if (IS_ENABLED(CONFIG_PPC32))
+ return ppc_inst_val(x);
+ else if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+ return (u64)ppc_inst_suffix(x) << 32 | ppc_inst_val(x);
+ else
+ return (u64)ppc_inst_val(x) << 32 | ppc_inst_suffix(x);
+}
+
+static inline void ppc_inst_write(u32 *ptr, ppc_inst_t x)
+{
+ if (!ppc_inst_prefixed(x))
+ *ptr = ppc_inst_val(x);
+ else
+ *(u64 *)ptr = ppc_inst_as_ulong(x);
+}
+
+static inline int __copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src)
+{
+ unsigned int val, suffix;
+
+/* See https://github.com/ClangBuiltLinux/linux/issues/1521 */
+#if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 140000
+ val = suffix = 0;
+#endif
+ __get_kernel_nofault(&val, src, u32, Efault);
+ if (IS_ENABLED(CONFIG_PPC64) && get_op(val) == OP_PREFIX) {
+ __get_kernel_nofault(&suffix, src + 1, u32, Efault);
+ *inst = ppc_inst_prefix(val, suffix);
+ } else {
+ *inst = ppc_inst(val);
+ }
+ return 0;
+Efault:
+ return -EFAULT;
+}
+
+static inline int copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src)
+{
+ if (unlikely(!is_kernel_addr((unsigned long)src)))
+ return -ERANGE;
+
+ return __copy_inst_from_kernel_nofault(inst, src);
+}
+
+#endif /* _ASM_POWERPC_INST_H */
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
new file mode 100644
index 000000000..6d8492b6e
--- /dev/null
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -0,0 +1,689 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_INTERRUPT_H
+#define _ASM_POWERPC_INTERRUPT_H
+
+/* BookE/4xx */
+#define INTERRUPT_CRITICAL_INPUT 0x100
+
+/* BookE */
+#define INTERRUPT_DEBUG 0xd00
+#ifdef CONFIG_BOOKE
+#define INTERRUPT_PERFMON 0x260
+#define INTERRUPT_DOORBELL 0x280
+#endif
+
+/* BookS/4xx/8xx */
+#define INTERRUPT_MACHINE_CHECK 0x200
+
+/* BookS/8xx */
+#define INTERRUPT_SYSTEM_RESET 0x100
+
+/* BookS */
+#define INTERRUPT_DATA_SEGMENT 0x380
+#define INTERRUPT_INST_SEGMENT 0x480
+#define INTERRUPT_TRACE 0xd00
+#define INTERRUPT_H_DATA_STORAGE 0xe00
+#define INTERRUPT_HMI 0xe60
+#define INTERRUPT_H_FAC_UNAVAIL 0xf80
+#ifdef CONFIG_PPC_BOOK3S
+#define INTERRUPT_DOORBELL 0xa00
+#define INTERRUPT_PERFMON 0xf00
+#define INTERRUPT_ALTIVEC_UNAVAIL 0xf20
+#endif
+
+/* BookE/BookS/4xx/8xx */
+#define INTERRUPT_DATA_STORAGE 0x300
+#define INTERRUPT_INST_STORAGE 0x400
+#define INTERRUPT_EXTERNAL 0x500
+#define INTERRUPT_ALIGNMENT 0x600
+#define INTERRUPT_PROGRAM 0x700
+#define INTERRUPT_SYSCALL 0xc00
+#define INTERRUPT_TRACE 0xd00
+
+/* BookE/BookS/44x */
+#define INTERRUPT_FP_UNAVAIL 0x800
+
+/* BookE/BookS/44x/8xx */
+#define INTERRUPT_DECREMENTER 0x900
+
+#ifndef INTERRUPT_PERFMON
+#define INTERRUPT_PERFMON 0x0
+#endif
+
+/* 8xx */
+#define INTERRUPT_SOFT_EMU_8xx 0x1000
+#define INTERRUPT_INST_TLB_MISS_8xx 0x1100
+#define INTERRUPT_DATA_TLB_MISS_8xx 0x1200
+#define INTERRUPT_INST_TLB_ERROR_8xx 0x1300
+#define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400
+#define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00
+#define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00
+
+/* 603 */
+#define INTERRUPT_INST_TLB_MISS_603 0x1000
+#define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100
+#define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200
+
+#ifndef __ASSEMBLY__
+
+#include <linux/context_tracking.h>
+#include <linux/hardirq.h>
+#include <asm/cputime.h>
+#include <asm/firmware.h>
+#include <asm/ftrace.h>
+#include <asm/kprobes.h>
+#include <asm/runlatch.h>
+
+#ifdef CONFIG_PPC64
+/*
+ * WARN/BUG is handled with a program interrupt so minimise checks here to
+ * avoid recursion and maximise the chance of getting the first oops handled.
+ */
+#define INT_SOFT_MASK_BUG_ON(regs, cond) \
+do { \
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && \
+ (user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM))) \
+ BUG_ON(cond); \
+} while (0)
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+extern char __end_soft_masked[];
+bool search_kernel_soft_mask_table(unsigned long addr);
+unsigned long search_kernel_restart_table(unsigned long addr);
+
+DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
+
+static inline bool is_implicit_soft_masked(struct pt_regs *regs)
+{
+ if (regs->msr & MSR_PR)
+ return false;
+
+ if (regs->nip >= (unsigned long)__end_soft_masked)
+ return false;
+
+ return search_kernel_soft_mask_table(regs->nip);
+}
+
+static inline void srr_regs_clobbered(void)
+{
+ local_paca->srr_valid = 0;
+ local_paca->hsrr_valid = 0;
+}
+#else
+static inline unsigned long search_kernel_restart_table(unsigned long addr)
+{
+ return 0;
+}
+
+static inline bool is_implicit_soft_masked(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline void srr_regs_clobbered(void)
+{
+}
+#endif
+
+static inline void nap_adjust_return(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC_970_NAP
+ if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
+ /* Can avoid a test-and-clear because NMIs do not call this */
+ clear_thread_local_flags(_TLF_NAPPING);
+ regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);
+ }
+#endif
+}
+
+static inline void booke_restore_dbcr0(void)
+{
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+ unsigned long dbcr0 = current->thread.debug.dbcr0;
+
+ if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
+ mtspr(SPRN_DBSR, -1);
+ mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
+ }
+#endif
+}
+
+static inline void interrupt_enter_prepare(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC32
+ if (!arch_irq_disabled_regs(regs))
+ trace_hardirqs_off();
+
+ if (user_mode(regs))
+ kuap_lock();
+ else
+ kuap_save_and_lock(regs);
+
+ if (user_mode(regs))
+ account_cpu_user_entry();
+#endif
+
+#ifdef CONFIG_PPC64
+ bool trace_enable = false;
+
+ if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) {
+ if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
+ trace_enable = true;
+ } else {
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ }
+
+ /*
+ * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].
+ * Asynchronous interrupts get here with HARD_DIS set (see below), so
+ * this enables MSR[EE] for synchronous interrupts. IRQs remain
+ * soft-masked. The interrupt handler may later call
+ * interrupt_cond_local_irq_enable() to achieve a regular process
+ * context.
+ */
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) {
+ INT_SOFT_MASK_BUG_ON(regs, !(regs->msr & MSR_EE));
+ __hard_irq_enable();
+ } else {
+ __hard_RI_enable();
+ }
+
+ /* Do this when RI=1 because it can cause SLB faults */
+ if (trace_enable)
+ trace_hardirqs_off();
+
+ if (user_mode(regs)) {
+ kuap_lock();
+ CT_WARN_ON(ct_state() != CONTEXT_USER);
+ user_exit_irqoff();
+
+ account_cpu_user_entry();
+ account_stolen_time();
+ } else {
+ kuap_save_and_lock(regs);
+ /*
+ * CT_WARN_ON comes here via program_check_exception,
+ * so avoid recursion.
+ */
+ if (TRAP(regs) != INTERRUPT_PROGRAM)
+ CT_WARN_ON(ct_state() != CONTEXT_KERNEL &&
+ ct_state() != CONTEXT_IDLE);
+ INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs));
+ INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) &&
+ search_kernel_restart_table(regs->nip));
+ }
+ INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) &&
+ !(regs->msr & MSR_EE));
+#endif
+
+ booke_restore_dbcr0();
+}
+
+/*
+ * Care should be taken to note that interrupt_exit_prepare and
+ * interrupt_async_exit_prepare do not necessarily return immediately to
+ * regs context (e.g., if regs is usermode, we don't necessarily return to
+ * user mode). Other interrupts might be taken between here and return,
+ * context switch / preemption may occur in the exit path after this, or a
+ * signal may be delivered, etc.
+ *
+ * The real interrupt exit code is platform specific, e.g.,
+ * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
+ *
+ * However interrupt_nmi_exit_prepare does return directly to regs, because
+ * NMIs do not do "exit work" or replay soft-masked interrupts.
+ */
+static inline void interrupt_exit_prepare(struct pt_regs *regs)
+{
+}
+
+static inline void interrupt_async_enter_prepare(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC64
+ /* Ensure interrupt_enter_prepare does not enable MSR[EE] */
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+#endif
+ interrupt_enter_prepare(regs);
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * RI=1 is set by interrupt_enter_prepare, so this thread flags access
+ * has to come afterward (it can cause SLB faults).
+ */
+ if (cpu_has_feature(CPU_FTR_CTRL) &&
+ !test_thread_local_flags(_TLF_RUNLATCH))
+ __ppc64_runlatch_on();
+#endif
+ irq_enter();
+}
+
+static inline void interrupt_async_exit_prepare(struct pt_regs *regs)
+{
+ /*
+ * Adjust at exit so the main handler sees the true NIA. This must
+ * come before irq_exit() because irq_exit can enable interrupts, and
+ * if another interrupt is taken before nap_adjust_return has run
+ * here, then that interrupt would return directly to idle nap return.
+ */
+ nap_adjust_return(regs);
+
+ irq_exit();
+ interrupt_exit_prepare(regs);
+}
+
+struct interrupt_nmi_state {
+#ifdef CONFIG_PPC64
+ u8 irq_soft_mask;
+ u8 irq_happened;
+ u8 ftrace_enabled;
+ u64 softe;
+#endif
+};
+
+static inline bool nmi_disables_ftrace(struct pt_regs *regs)
+{
+ /* Allow DEC and PMI to be traced when they are soft-NMI */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
+ if (TRAP(regs) == INTERRUPT_DECREMENTER)
+ return false;
+ if (TRAP(regs) == INTERRUPT_PERFMON)
+ return false;
+ }
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) {
+ if (TRAP(regs) == INTERRUPT_PERFMON)
+ return false;
+ }
+
+ return true;
+}
+
+static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
+{
+#ifdef CONFIG_PPC64
+ state->irq_soft_mask = local_paca->irq_soft_mask;
+ state->irq_happened = local_paca->irq_happened;
+ state->softe = regs->softe;
+
+ /*
+ * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
+ * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
+ * because that goes through irq tracing which we don't want in NMI.
+ */
+ local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
+ /*
+ * Adjust regs->softe to be soft-masked if it had not been
+ * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
+ * not yet set disabled), or if it was in an implicit soft
+ * masked state. This makes arch_irq_disabled_regs(regs)
+ * behave as expected.
+ */
+ regs->softe = IRQS_ALL_DISABLED;
+ }
+
+ __hard_RI_enable();
+
+ /* Don't do any per-CPU operations until interrupt state is fixed */
+
+ if (nmi_disables_ftrace(regs)) {
+ state->ftrace_enabled = this_cpu_get_ftrace_enabled();
+ this_cpu_set_ftrace_enabled(0);
+ }
+#endif
+
+ /* If data relocations are enabled, it's safe to use nmi_enter() */
+ if (mfmsr() & MSR_DR) {
+ nmi_enter();
+ return;
+ }
+
+ /*
+ * But do not use nmi_enter() for pseries hash guest taking a real-mode
+ * NMI because not everything it touches is within the RMA limit.
+ */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
+ firmware_has_feature(FW_FEATURE_LPAR) &&
+ !radix_enabled())
+ return;
+
+ /*
+ * Likewise, don't use it if we have some form of instrumentation (like
+ * KASAN shadow) that is not safe to access in real mode (even on radix)
+ */
+ if (IS_ENABLED(CONFIG_KASAN))
+ return;
+
+ /* Otherwise, it should be safe to call it */
+ nmi_enter();
+}
+
+static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
+{
+ if (mfmsr() & MSR_DR) {
+ // nmi_exit if relocations are on
+ nmi_exit();
+ } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
+ firmware_has_feature(FW_FEATURE_LPAR) &&
+ !radix_enabled()) {
+ // no nmi_exit for a pseries hash guest taking a real mode exception
+ } else if (IS_ENABLED(CONFIG_KASAN)) {
+ // no nmi_exit for KASAN in real mode
+ } else {
+ nmi_exit();
+ }
+
+ /*
+ * nmi does not call nap_adjust_return because nmi should not create
+ * new work to do (must use irq_work for that).
+ */
+
+#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
+ if (arch_irq_disabled_regs(regs)) {
+ unsigned long rst = search_kernel_restart_table(regs->nip);
+ if (rst)
+ regs_set_return_ip(regs, rst);
+ }
+#endif
+
+ if (nmi_disables_ftrace(regs))
+ this_cpu_set_ftrace_enabled(state->ftrace_enabled);
+
+ /* Check we didn't change the pending interrupt mask. */
+ WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
+ regs->softe = state->softe;
+ local_paca->irq_happened = state->irq_happened;
+ local_paca->irq_soft_mask = state->irq_soft_mask;
+#endif
+}
+
+/*
+ * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
+ * function definition. The reason for this is the noinstr section is placed
+ * after the main text section, i.e., very far away from the interrupt entry
+ * asm. That creates problems with fitting linker stubs when building large
+ * kernels.
+ */
+#define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
+
+/**
+ * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ */
+#define DECLARE_INTERRUPT_HANDLER_RAW(func) \
+ __visible long func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ *
+ * @func is called from ASM entry code.
+ *
+ * This is a plain function which does no tracing, reconciling, etc.
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ *
+ * raw interrupt handlers must not enable or disable interrupts, or
+ * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
+ * not be advisable either, although may be possible in a pinch, the
+ * trace will look odd at least.
+ *
+ * A raw handler may call one of the other interrupt handler functions
+ * to be converted into that interrupt context without these restrictions.
+ *
+ * On PPC64, _RAW handlers may return with fast_interrupt_return.
+ *
+ * Specific handlers may have additional restrictions.
+ */
+#define DEFINE_INTERRUPT_HANDLER_RAW(func) \
+static __always_inline __no_sanitize_address __no_kcsan long \
+____##func(struct pt_regs *regs); \
+ \
+interrupt_handler long func(struct pt_regs *regs) \
+{ \
+ long ret; \
+ \
+ __hard_RI_enable(); \
+ \
+ ret = ____##func (regs); \
+ \
+ return ret; \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline __no_sanitize_address __no_kcsan long \
+____##func(struct pt_regs *regs)
+
+/**
+ * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
+ * @func: Function name of the entry point
+ */
+#define DECLARE_INTERRUPT_HANDLER(func) \
+ __visible void func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
+ * @func: Function name of the entry point
+ *
+ * @func is called from ASM entry code.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ */
+#define DEFINE_INTERRUPT_HANDLER(func) \
+static __always_inline void ____##func(struct pt_regs *regs); \
+ \
+interrupt_handler void func(struct pt_regs *regs) \
+{ \
+ interrupt_enter_prepare(regs); \
+ \
+ ____##func (regs); \
+ \
+ interrupt_exit_prepare(regs); \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline void ____##func(struct pt_regs *regs)
+
+/**
+ * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ */
+#define DECLARE_INTERRUPT_HANDLER_RET(func) \
+ __visible long func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ *
+ * @func is called from ASM entry code.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ */
+#define DEFINE_INTERRUPT_HANDLER_RET(func) \
+static __always_inline long ____##func(struct pt_regs *regs); \
+ \
+interrupt_handler long func(struct pt_regs *regs) \
+{ \
+ long ret; \
+ \
+ interrupt_enter_prepare(regs); \
+ \
+ ret = ____##func (regs); \
+ \
+ interrupt_exit_prepare(regs); \
+ \
+ return ret; \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline long ____##func(struct pt_regs *regs)
+
+/**
+ * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
+ * @func: Function name of the entry point
+ */
+#define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \
+ __visible void func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
+ * @func: Function name of the entry point
+ *
+ * @func is called from ASM entry code.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ */
+#define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \
+static __always_inline void ____##func(struct pt_regs *regs); \
+ \
+interrupt_handler void func(struct pt_regs *regs) \
+{ \
+ interrupt_async_enter_prepare(regs); \
+ \
+ ____##func (regs); \
+ \
+ interrupt_async_exit_prepare(regs); \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline void ____##func(struct pt_regs *regs)
+
+/**
+ * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ */
+#define DECLARE_INTERRUPT_HANDLER_NMI(func) \
+ __visible long func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ *
+ * @func is called from ASM entry code.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ */
+#define DEFINE_INTERRUPT_HANDLER_NMI(func) \
+static __always_inline __no_sanitize_address __no_kcsan long \
+____##func(struct pt_regs *regs); \
+ \
+interrupt_handler long func(struct pt_regs *regs) \
+{ \
+ struct interrupt_nmi_state state; \
+ long ret; \
+ \
+ interrupt_nmi_enter_prepare(regs, &state); \
+ \
+ ret = ____##func (regs); \
+ \
+ interrupt_nmi_exit_prepare(regs, &state); \
+ \
+ return ret; \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline __no_sanitize_address __no_kcsan long \
+____##func(struct pt_regs *regs)
+
+
+/* Interrupt handlers */
+/* kernel/traps.c */
+DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
+#ifdef CONFIG_PPC_BOOK3S_64
+DECLARE_INTERRUPT_HANDLER_RAW(machine_check_early_boot);
+DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
+#endif
+DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
+DECLARE_INTERRUPT_HANDLER(SMIException);
+DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
+DECLARE_INTERRUPT_HANDLER(unknown_exception);
+DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
+DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception);
+DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
+DECLARE_INTERRUPT_HANDLER(RunModeException);
+DECLARE_INTERRUPT_HANDLER(single_step_exception);
+DECLARE_INTERRUPT_HANDLER(program_check_exception);
+DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
+DECLARE_INTERRUPT_HANDLER(alignment_exception);
+DECLARE_INTERRUPT_HANDLER(StackOverflow);
+DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
+DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
+DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
+DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
+DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
+DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
+DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
+DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
+DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
+DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
+DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
+DECLARE_INTERRUPT_HANDLER(DebugException);
+DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
+DECLARE_INTERRUPT_HANDLER(CacheLockingException);
+DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
+DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
+DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException);
+DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
+
+/* slb.c */
+DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
+DECLARE_INTERRUPT_HANDLER(do_bad_segment_interrupt);
+
+/* hash_utils.c */
+DECLARE_INTERRUPT_HANDLER(do_hash_fault);
+
+/* fault.c */
+DECLARE_INTERRUPT_HANDLER(do_page_fault);
+DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
+
+/* process.c */
+DECLARE_INTERRUPT_HANDLER(do_break);
+
+/* time.c */
+DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
+
+/* mce.c */
+DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
+DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
+
+DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
+
+/* irq.c */
+DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
+
+void __noreturn unrecoverable_exception(struct pt_regs *regs);
+
+void replay_system_reset(void);
+void replay_soft_interrupts(void);
+
+static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
+{
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+}
+
+long system_call_exception(struct pt_regs *regs, unsigned long r0);
+notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv);
+notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs);
+notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs);
+#ifdef CONFIG_PPC64
+unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs);
+unsigned long interrupt_exit_user_restart(struct pt_regs *regs);
+unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs);
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_INTERRUPT_H */
diff --git a/arch/powerpc/include/asm/io-defs.h b/arch/powerpc/include/asm/io-defs.h
new file mode 100644
index 000000000..faf8617cc
--- /dev/null
+++ b/arch/powerpc/include/asm/io-defs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* This file is meant to be include multiple times by other headers */
+/* last 2 argments are used by platforms/cell/io-workarounds.[ch] */
+
+DEF_PCI_AC_RET(readb, u8, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readw, u16, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readl, u32, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readw_be, u16, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readl_be, u32, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_NORET(writeb, (u8 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writew, (u16 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writel, (u32 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writew_be, (u16 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writel_be, (u32 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+
+#ifdef __powerpc64__
+DEF_PCI_AC_RET(readq, u64, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readq_be, u64, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_NORET(writeq, (u64 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writeq_be, (u64 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+#endif /* __powerpc64__ */
+
+DEF_PCI_AC_RET(inb, u8, (unsigned long port), (port), pio, port)
+DEF_PCI_AC_RET(inw, u16, (unsigned long port), (port), pio, port)
+DEF_PCI_AC_RET(inl, u32, (unsigned long port), (port), pio, port)
+DEF_PCI_AC_NORET(outb, (u8 val, unsigned long port), (val, port), pio, port)
+DEF_PCI_AC_NORET(outw, (u16 val, unsigned long port), (val, port), pio, port)
+DEF_PCI_AC_NORET(outl, (u32 val, unsigned long port), (val, port), pio, port)
+
+DEF_PCI_AC_NORET(readsb, (const PCI_IO_ADDR a, void *b, unsigned long c),
+ (a, b, c), mem, a)
+DEF_PCI_AC_NORET(readsw, (const PCI_IO_ADDR a, void *b, unsigned long c),
+ (a, b, c), mem, a)
+DEF_PCI_AC_NORET(readsl, (const PCI_IO_ADDR a, void *b, unsigned long c),
+ (a, b, c), mem, a)
+DEF_PCI_AC_NORET(writesb, (PCI_IO_ADDR a, const void *b, unsigned long c),
+ (a, b, c), mem, a)
+DEF_PCI_AC_NORET(writesw, (PCI_IO_ADDR a, const void *b, unsigned long c),
+ (a, b, c), mem, a)
+DEF_PCI_AC_NORET(writesl, (PCI_IO_ADDR a, const void *b, unsigned long c),
+ (a, b, c), mem, a)
+
+DEF_PCI_AC_NORET(insb, (unsigned long p, void *b, unsigned long c),
+ (p, b, c), pio, p)
+DEF_PCI_AC_NORET(insw, (unsigned long p, void *b, unsigned long c),
+ (p, b, c), pio, p)
+DEF_PCI_AC_NORET(insl, (unsigned long p, void *b, unsigned long c),
+ (p, b, c), pio, p)
+DEF_PCI_AC_NORET(outsb, (unsigned long p, const void *b, unsigned long c),
+ (p, b, c), pio, p)
+DEF_PCI_AC_NORET(outsw, (unsigned long p, const void *b, unsigned long c),
+ (p, b, c), pio, p)
+DEF_PCI_AC_NORET(outsl, (unsigned long p, const void *b, unsigned long c),
+ (p, b, c), pio, p)
+
+DEF_PCI_AC_NORET(memset_io, (PCI_IO_ADDR a, int c, unsigned long n),
+ (a, c, n), mem, a)
+DEF_PCI_AC_NORET(memcpy_fromio, (void *d, const PCI_IO_ADDR s, unsigned long n),
+ (d, s, n), mem, s)
+DEF_PCI_AC_NORET(memcpy_toio, (PCI_IO_ADDR d, const void *s, unsigned long n),
+ (d, s, n), mem, d)
diff --git a/arch/powerpc/include/asm/io-workarounds.h b/arch/powerpc/include/asm/io-workarounds.h
new file mode 100644
index 000000000..3cce499fb
--- /dev/null
+++ b/arch/powerpc/include/asm/io-workarounds.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Support PCI IO workaround
+ *
+ * (C) Copyright 2007-2008 TOSHIBA CORPORATION
+ */
+
+#ifndef _IO_WORKAROUNDS_H
+#define _IO_WORKAROUNDS_H
+
+#ifdef CONFIG_PPC_IO_WORKAROUNDS
+#include <linux/io.h>
+#include <asm/pci-bridge.h>
+
+/* Bus info */
+struct iowa_bus {
+ struct pci_controller *phb;
+ struct ppc_pci_io *ops;
+ void *private;
+};
+
+void iowa_register_bus(struct pci_controller *, struct ppc_pci_io *,
+ int (*)(struct iowa_bus *, void *), void *);
+struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR);
+struct iowa_bus *iowa_pio_find_bus(unsigned long);
+
+extern struct ppc_pci_io spiderpci_ops;
+extern int spiderpci_iowa_init(struct iowa_bus *, void *);
+
+#define SPIDER_PCI_REG_BASE 0xd000
+#define SPIDER_PCI_REG_SIZE 0x1000
+#define SPIDER_PCI_VCI_CNTL_STAT 0x0110
+#define SPIDER_PCI_DUMMY_READ 0x0810
+#define SPIDER_PCI_DUMMY_READ_BASE 0x0814
+
+#endif
+
+#if defined(CONFIG_PPC_IO_WORKAROUNDS) && defined(CONFIG_PPC_INDIRECT_MMIO)
+extern bool io_workaround_inited;
+
+static inline bool iowa_is_active(void)
+{
+ return unlikely(io_workaround_inited);
+}
+#else
+static inline bool iowa_is_active(void)
+{
+ return false;
+}
+#endif
+
+void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
+ pgprot_t prot, void *caller);
+
+#endif /* _IO_WORKAROUNDS_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
new file mode 100644
index 000000000..fc112a91d
--- /dev/null
+++ b/arch/powerpc/include/asm/io.h
@@ -0,0 +1,1026 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_IO_H
+#define _ASM_POWERPC_IO_H
+#ifdef __KERNEL__
+
+#define ARCH_HAS_IOREMAP_WC
+#ifdef CONFIG_PPC32
+#define ARCH_HAS_IOREMAP_WT
+#endif
+
+/*
+ */
+
+/* Check of existence of legacy devices */
+extern int check_legacy_ioport(unsigned long base_port);
+#define I8042_DATA_REG 0x60
+#define FDC_BASE 0x3f0
+
+#if defined(CONFIG_PPC64) && defined(CONFIG_PCI)
+extern struct pci_dev *isa_bridge_pcidev;
+/*
+ * has legacy ISA devices ?
+ */
+#define arch_has_dev_port() (isa_bridge_pcidev != NULL || isa_io_special)
+#endif
+
+#include <linux/device.h>
+#include <linux/compiler.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <asm/byteorder.h>
+#include <asm/synch.h>
+#include <asm/delay.h>
+#include <asm/mmiowb.h>
+#include <asm/mmu.h>
+
+#define SIO_CONFIG_RA 0x398
+#define SIO_CONFIG_RD 0x399
+
+/* 32 bits uses slightly different variables for the various IO
+ * bases. Most of this file only uses _IO_BASE though which we
+ * define properly based on the platform
+ */
+#ifndef CONFIG_PCI
+#define _IO_BASE 0
+#define _ISA_MEM_BASE 0
+#define PCI_DRAM_OFFSET 0
+#elif defined(CONFIG_PPC32)
+#define _IO_BASE isa_io_base
+#define _ISA_MEM_BASE isa_mem_base
+#define PCI_DRAM_OFFSET pci_dram_offset
+#else
+#define _IO_BASE pci_io_base
+#define _ISA_MEM_BASE isa_mem_base
+#define PCI_DRAM_OFFSET 0
+#endif
+
+extern unsigned long isa_io_base;
+extern unsigned long pci_io_base;
+extern unsigned long pci_dram_offset;
+
+extern resource_size_t isa_mem_base;
+
+/* Boolean set by platform if PIO accesses are suppored while _IO_BASE
+ * is not set or addresses cannot be translated to MMIO. This is typically
+ * set when the platform supports "special" PIO accesses via a non memory
+ * mapped mechanism, and allows things like the early udbg UART code to
+ * function.
+ */
+extern bool isa_io_special;
+
+#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
+#error CONFIG_PPC_INDIRECT_{PIO,MMIO} are not yet supported on 32 bits
+#endif
+#endif
+
+/*
+ *
+ * Low level MMIO accessors
+ *
+ * This provides the non-bus specific accessors to MMIO. Those are PowerPC
+ * specific and thus shouldn't be used in generic code. The accessors
+ * provided here are:
+ *
+ * in_8, in_le16, in_be16, in_le32, in_be32, in_le64, in_be64
+ * out_8, out_le16, out_be16, out_le32, out_be32, out_le64, out_be64
+ * _insb, _insw_ns, _insl_ns, _outsb, _outsw_ns, _outsl_ns
+ *
+ * Those operate directly on a kernel virtual address. Note that the prototype
+ * for the out_* accessors has the arguments in opposite order from the usual
+ * linux PCI accessors. Unlike those, they take the address first and the value
+ * next.
+ *
+ * Note: I might drop the _ns suffix on the stream operations soon as it is
+ * simply normal for stream operations to not swap in the first place.
+ *
+ */
+
+#define DEF_MMIO_IN_X(name, size, insn) \
+static inline u##size name(const volatile u##size __iomem *addr) \
+{ \
+ u##size ret; \
+ __asm__ __volatile__("sync;"#insn" %0,%y1;twi 0,%0,0;isync" \
+ : "=r" (ret) : "Z" (*addr) : "memory"); \
+ return ret; \
+}
+
+#define DEF_MMIO_OUT_X(name, size, insn) \
+static inline void name(volatile u##size __iomem *addr, u##size val) \
+{ \
+ __asm__ __volatile__("sync;"#insn" %1,%y0" \
+ : "=Z" (*addr) : "r" (val) : "memory"); \
+ mmiowb_set_pending(); \
+}
+
+#define DEF_MMIO_IN_D(name, size, insn) \
+static inline u##size name(const volatile u##size __iomem *addr) \
+{ \
+ u##size ret; \
+ __asm__ __volatile__("sync;"#insn"%U1%X1 %0,%1;twi 0,%0,0;isync"\
+ : "=r" (ret) : "m<>" (*addr) : "memory"); \
+ return ret; \
+}
+
+#define DEF_MMIO_OUT_D(name, size, insn) \
+static inline void name(volatile u##size __iomem *addr, u##size val) \
+{ \
+ __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \
+ : "=m<>" (*addr) : "r" (val) : "memory"); \
+ mmiowb_set_pending(); \
+}
+
+DEF_MMIO_IN_D(in_8, 8, lbz);
+DEF_MMIO_OUT_D(out_8, 8, stb);
+
+#ifdef __BIG_ENDIAN__
+DEF_MMIO_IN_D(in_be16, 16, lhz);
+DEF_MMIO_IN_D(in_be32, 32, lwz);
+DEF_MMIO_IN_X(in_le16, 16, lhbrx);
+DEF_MMIO_IN_X(in_le32, 32, lwbrx);
+
+DEF_MMIO_OUT_D(out_be16, 16, sth);
+DEF_MMIO_OUT_D(out_be32, 32, stw);
+DEF_MMIO_OUT_X(out_le16, 16, sthbrx);
+DEF_MMIO_OUT_X(out_le32, 32, stwbrx);
+#else
+DEF_MMIO_IN_X(in_be16, 16, lhbrx);
+DEF_MMIO_IN_X(in_be32, 32, lwbrx);
+DEF_MMIO_IN_D(in_le16, 16, lhz);
+DEF_MMIO_IN_D(in_le32, 32, lwz);
+
+DEF_MMIO_OUT_X(out_be16, 16, sthbrx);
+DEF_MMIO_OUT_X(out_be32, 32, stwbrx);
+DEF_MMIO_OUT_D(out_le16, 16, sth);
+DEF_MMIO_OUT_D(out_le32, 32, stw);
+
+#endif /* __BIG_ENDIAN */
+
+#ifdef __powerpc64__
+
+#ifdef __BIG_ENDIAN__
+DEF_MMIO_OUT_D(out_be64, 64, std);
+DEF_MMIO_IN_D(in_be64, 64, ld);
+
+/* There is no asm instructions for 64 bits reverse loads and stores */
+static inline u64 in_le64(const volatile u64 __iomem *addr)
+{
+ return swab64(in_be64(addr));
+}
+
+static inline void out_le64(volatile u64 __iomem *addr, u64 val)
+{
+ out_be64(addr, swab64(val));
+}
+#else
+DEF_MMIO_OUT_D(out_le64, 64, std);
+DEF_MMIO_IN_D(in_le64, 64, ld);
+
+/* There is no asm instructions for 64 bits reverse loads and stores */
+static inline u64 in_be64(const volatile u64 __iomem *addr)
+{
+ return swab64(in_le64(addr));
+}
+
+static inline void out_be64(volatile u64 __iomem *addr, u64 val)
+{
+ out_le64(addr, swab64(val));
+}
+
+#endif
+#endif /* __powerpc64__ */
+
+/*
+ * Low level IO stream instructions are defined out of line for now
+ */
+extern void _insb(const volatile u8 __iomem *addr, void *buf, long count);
+extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count);
+extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count);
+extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count);
+extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count);
+extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count);
+
+/* The _ns naming is historical and will be removed. For now, just #define
+ * the non _ns equivalent names
+ */
+#define _insw _insw_ns
+#define _insl _insl_ns
+#define _outsw _outsw_ns
+#define _outsl _outsl_ns
+
+
+/*
+ * memset_io, memcpy_toio, memcpy_fromio base implementations are out of line
+ */
+
+extern void _memset_io(volatile void __iomem *addr, int c, unsigned long n);
+extern void _memcpy_fromio(void *dest, const volatile void __iomem *src,
+ unsigned long n);
+extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
+ unsigned long n);
+
+/*
+ *
+ * PCI and standard ISA accessors
+ *
+ * Those are globally defined linux accessors for devices on PCI or ISA
+ * busses. They follow the Linux defined semantics. The current implementation
+ * for PowerPC is as close as possible to the x86 version of these, and thus
+ * provides fairly heavy weight barriers for the non-raw versions
+ *
+ * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_MMIO
+ * or CONFIG_PPC_INDIRECT_PIO are set allowing the platform to provide its
+ * own implementation of some or all of the accessors.
+ */
+
+/*
+ * Include the EEH definitions when EEH is enabled only so they don't get
+ * in the way when building for 32 bits
+ */
+#ifdef CONFIG_EEH
+#include <asm/eeh.h>
+#endif
+
+/* Shortcut to the MMIO argument pointer */
+#define PCI_IO_ADDR volatile void __iomem *
+
+/* Indirect IO address tokens:
+ *
+ * When CONFIG_PPC_INDIRECT_MMIO is set, the platform can provide hooks
+ * on all MMIOs. (Note that this is all 64 bits only for now)
+ *
+ * To help platforms who may need to differentiate MMIO addresses in
+ * their hooks, a bitfield is reserved for use by the platform near the
+ * top of MMIO addresses (not PIO, those have to cope the hard way).
+ *
+ * The highest address in the kernel virtual space are:
+ *
+ * d0003fffffffffff # with Hash MMU
+ * c00fffffffffffff # with Radix MMU
+ *
+ * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits
+ * that can be used for the field.
+ *
+ * The direct IO mapping operations will then mask off those bits
+ * before doing the actual access, though that only happen when
+ * CONFIG_PPC_INDIRECT_MMIO is set, thus be careful when you use that
+ * mechanism
+ *
+ * For PIO, there is a separate CONFIG_PPC_INDIRECT_PIO which makes
+ * all PIO functions call through a hook.
+ */
+
+#ifdef CONFIG_PPC_INDIRECT_MMIO
+#define PCI_IO_IND_TOKEN_SHIFT 52
+#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT)
+#define PCI_FIX_ADDR(addr) \
+ ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
+#define PCI_GET_ADDR_TOKEN(addr) \
+ (((unsigned long)(addr) & PCI_IO_IND_TOKEN_MASK) >> \
+ PCI_IO_IND_TOKEN_SHIFT)
+#define PCI_SET_ADDR_TOKEN(addr, token) \
+do { \
+ unsigned long __a = (unsigned long)(addr); \
+ __a &= ~PCI_IO_IND_TOKEN_MASK; \
+ __a |= ((unsigned long)(token)) << PCI_IO_IND_TOKEN_SHIFT; \
+ (addr) = (void __iomem *)__a; \
+} while(0)
+#else
+#define PCI_FIX_ADDR(addr) (addr)
+#endif
+
+
+/*
+ * Non ordered and non-swapping "raw" accessors
+ */
+
+static inline unsigned char __raw_readb(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned char __force *)PCI_FIX_ADDR(addr);
+}
+#define __raw_readb __raw_readb
+
+static inline unsigned short __raw_readw(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned short __force *)PCI_FIX_ADDR(addr);
+}
+#define __raw_readw __raw_readw
+
+static inline unsigned int __raw_readl(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned int __force *)PCI_FIX_ADDR(addr);
+}
+#define __raw_readl __raw_readl
+
+static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr)
+{
+ *(volatile unsigned char __force *)PCI_FIX_ADDR(addr) = v;
+}
+#define __raw_writeb __raw_writeb
+
+static inline void __raw_writew(unsigned short v, volatile void __iomem *addr)
+{
+ *(volatile unsigned short __force *)PCI_FIX_ADDR(addr) = v;
+}
+#define __raw_writew __raw_writew
+
+static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
+{
+ *(volatile unsigned int __force *)PCI_FIX_ADDR(addr) = v;
+}
+#define __raw_writel __raw_writel
+
+#ifdef __powerpc64__
+static inline unsigned long __raw_readq(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned long __force *)PCI_FIX_ADDR(addr);
+}
+#define __raw_readq __raw_readq
+
+static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
+{
+ *(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v;
+}
+#define __raw_writeq __raw_writeq
+
+static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr)
+{
+ __raw_writeq((__force unsigned long)cpu_to_be64(v), addr);
+}
+#define __raw_writeq_be __raw_writeq_be
+
+/*
+ * Real mode versions of the above. Those instructions are only supposed
+ * to be used in hypervisor real mode as per the architecture spec.
+ */
+static inline void __raw_rm_writeb(u8 val, volatile void __iomem *paddr)
+{
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ stbcix %0,0,%1; \
+ .machine pop;"
+ : : "r" (val), "r" (paddr) : "memory");
+}
+
+static inline void __raw_rm_writew(u16 val, volatile void __iomem *paddr)
+{
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ sthcix %0,0,%1; \
+ .machine pop;"
+ : : "r" (val), "r" (paddr) : "memory");
+}
+
+static inline void __raw_rm_writel(u32 val, volatile void __iomem *paddr)
+{
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ stwcix %0,0,%1; \
+ .machine pop;"
+ : : "r" (val), "r" (paddr) : "memory");
+}
+
+static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
+{
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ stdcix %0,0,%1; \
+ .machine pop;"
+ : : "r" (val), "r" (paddr) : "memory");
+}
+
+static inline void __raw_rm_writeq_be(u64 val, volatile void __iomem *paddr)
+{
+ __raw_rm_writeq((__force u64)cpu_to_be64(val), paddr);
+}
+
+static inline u8 __raw_rm_readb(volatile void __iomem *paddr)
+{
+ u8 ret;
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ lbzcix %0,0, %1; \
+ .machine pop;"
+ : "=r" (ret) : "r" (paddr) : "memory");
+ return ret;
+}
+
+static inline u16 __raw_rm_readw(volatile void __iomem *paddr)
+{
+ u16 ret;
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ lhzcix %0,0, %1; \
+ .machine pop;"
+ : "=r" (ret) : "r" (paddr) : "memory");
+ return ret;
+}
+
+static inline u32 __raw_rm_readl(volatile void __iomem *paddr)
+{
+ u32 ret;
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ lwzcix %0,0, %1; \
+ .machine pop;"
+ : "=r" (ret) : "r" (paddr) : "memory");
+ return ret;
+}
+
+static inline u64 __raw_rm_readq(volatile void __iomem *paddr)
+{
+ u64 ret;
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ ldcix %0,0, %1; \
+ .machine pop;"
+ : "=r" (ret) : "r" (paddr) : "memory");
+ return ret;
+}
+#endif /* __powerpc64__ */
+
+/*
+ *
+ * PCI PIO and MMIO accessors.
+ *
+ *
+ * On 32 bits, PIO operations have a recovery mechanism in case they trigger
+ * machine checks (which they occasionally do when probing non existing
+ * IO ports on some platforms, like PowerMac and 8xx).
+ * I always found it to be of dubious reliability and I am tempted to get
+ * rid of it one of these days. So if you think it's important to keep it,
+ * please voice up asap. We never had it for 64 bits and I do not intend
+ * to port it over
+ */
+
+#ifdef CONFIG_PPC32
+
+#define __do_in_asm(name, op) \
+static inline unsigned int name(unsigned int port) \
+{ \
+ unsigned int x; \
+ __asm__ __volatile__( \
+ "sync\n" \
+ "0:" op " %0,0,%1\n" \
+ "1: twi 0,%0,0\n" \
+ "2: isync\n" \
+ "3: nop\n" \
+ "4:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "5: li %0,-1\n" \
+ " b 4b\n" \
+ ".previous\n" \
+ EX_TABLE(0b, 5b) \
+ EX_TABLE(1b, 5b) \
+ EX_TABLE(2b, 5b) \
+ EX_TABLE(3b, 5b) \
+ : "=&r" (x) \
+ : "r" (port + _IO_BASE) \
+ : "memory"); \
+ return x; \
+}
+
+#define __do_out_asm(name, op) \
+static inline void name(unsigned int val, unsigned int port) \
+{ \
+ __asm__ __volatile__( \
+ "sync\n" \
+ "0:" op " %0,0,%1\n" \
+ "1: sync\n" \
+ "2:\n" \
+ EX_TABLE(0b, 2b) \
+ EX_TABLE(1b, 2b) \
+ : : "r" (val), "r" (port + _IO_BASE) \
+ : "memory"); \
+}
+
+__do_in_asm(_rec_inb, "lbzx")
+__do_in_asm(_rec_inw, "lhbrx")
+__do_in_asm(_rec_inl, "lwbrx")
+__do_out_asm(_rec_outb, "stbx")
+__do_out_asm(_rec_outw, "sthbrx")
+__do_out_asm(_rec_outl, "stwbrx")
+
+#endif /* CONFIG_PPC32 */
+
+/* The "__do_*" operations below provide the actual "base" implementation
+ * for each of the defined accessors. Some of them use the out_* functions
+ * directly, some of them still use EEH, though we might change that in the
+ * future. Those macros below provide the necessary argument swapping and
+ * handling of the IO base for PIO.
+ *
+ * They are themselves used by the macros that define the actual accessors
+ * and can be used by the hooks if any.
+ *
+ * Note that PIO operations are always defined in terms of their corresonding
+ * MMIO operations. That allows platforms like iSeries who want to modify the
+ * behaviour of both to only hook on the MMIO version and get both. It's also
+ * possible to hook directly at the toplevel PIO operation if they have to
+ * be handled differently
+ */
+#define __do_writeb(val, addr) out_8(PCI_FIX_ADDR(addr), val)
+#define __do_writew(val, addr) out_le16(PCI_FIX_ADDR(addr), val)
+#define __do_writel(val, addr) out_le32(PCI_FIX_ADDR(addr), val)
+#define __do_writeq(val, addr) out_le64(PCI_FIX_ADDR(addr), val)
+#define __do_writew_be(val, addr) out_be16(PCI_FIX_ADDR(addr), val)
+#define __do_writel_be(val, addr) out_be32(PCI_FIX_ADDR(addr), val)
+#define __do_writeq_be(val, addr) out_be64(PCI_FIX_ADDR(addr), val)
+
+#ifdef CONFIG_EEH
+#define __do_readb(addr) eeh_readb(PCI_FIX_ADDR(addr))
+#define __do_readw(addr) eeh_readw(PCI_FIX_ADDR(addr))
+#define __do_readl(addr) eeh_readl(PCI_FIX_ADDR(addr))
+#define __do_readq(addr) eeh_readq(PCI_FIX_ADDR(addr))
+#define __do_readw_be(addr) eeh_readw_be(PCI_FIX_ADDR(addr))
+#define __do_readl_be(addr) eeh_readl_be(PCI_FIX_ADDR(addr))
+#define __do_readq_be(addr) eeh_readq_be(PCI_FIX_ADDR(addr))
+#else /* CONFIG_EEH */
+#define __do_readb(addr) in_8(PCI_FIX_ADDR(addr))
+#define __do_readw(addr) in_le16(PCI_FIX_ADDR(addr))
+#define __do_readl(addr) in_le32(PCI_FIX_ADDR(addr))
+#define __do_readq(addr) in_le64(PCI_FIX_ADDR(addr))
+#define __do_readw_be(addr) in_be16(PCI_FIX_ADDR(addr))
+#define __do_readl_be(addr) in_be32(PCI_FIX_ADDR(addr))
+#define __do_readq_be(addr) in_be64(PCI_FIX_ADDR(addr))
+#endif /* !defined(CONFIG_EEH) */
+
+#ifdef CONFIG_PPC32
+#define __do_outb(val, port) _rec_outb(val, port)
+#define __do_outw(val, port) _rec_outw(val, port)
+#define __do_outl(val, port) _rec_outl(val, port)
+#define __do_inb(port) _rec_inb(port)
+#define __do_inw(port) _rec_inw(port)
+#define __do_inl(port) _rec_inl(port)
+#else /* CONFIG_PPC32 */
+#define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_outw(val, port) writew(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_outl(val, port) writel(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_inb(port) readb((PCI_IO_ADDR)_IO_BASE + port);
+#define __do_inw(port) readw((PCI_IO_ADDR)_IO_BASE + port);
+#define __do_inl(port) readl((PCI_IO_ADDR)_IO_BASE + port);
+#endif /* !CONFIG_PPC32 */
+
+#ifdef CONFIG_EEH
+#define __do_readsb(a, b, n) eeh_readsb(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsw(a, b, n) eeh_readsw(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsl(a, b, n) eeh_readsl(PCI_FIX_ADDR(a), (b), (n))
+#else /* CONFIG_EEH */
+#define __do_readsb(a, b, n) _insb(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsw(a, b, n) _insw(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsl(a, b, n) _insl(PCI_FIX_ADDR(a), (b), (n))
+#endif /* !CONFIG_EEH */
+#define __do_writesb(a, b, n) _outsb(PCI_FIX_ADDR(a),(b),(n))
+#define __do_writesw(a, b, n) _outsw(PCI_FIX_ADDR(a),(b),(n))
+#define __do_writesl(a, b, n) _outsl(PCI_FIX_ADDR(a),(b),(n))
+
+#define __do_insb(p, b, n) readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_insw(p, b, n) readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_insl(p, b, n) readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+#define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+#define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+
+#define __do_memset_io(addr, c, n) \
+ _memset_io(PCI_FIX_ADDR(addr), c, n)
+#define __do_memcpy_toio(dst, src, n) \
+ _memcpy_toio(PCI_FIX_ADDR(dst), src, n)
+
+#ifdef CONFIG_EEH
+#define __do_memcpy_fromio(dst, src, n) \
+ eeh_memcpy_fromio(dst, PCI_FIX_ADDR(src), n)
+#else /* CONFIG_EEH */
+#define __do_memcpy_fromio(dst, src, n) \
+ _memcpy_fromio(dst,PCI_FIX_ADDR(src),n)
+#endif /* !CONFIG_EEH */
+
+#ifdef CONFIG_PPC_INDIRECT_PIO
+#define DEF_PCI_HOOK_pio(x) x
+#else
+#define DEF_PCI_HOOK_pio(x) NULL
+#endif
+
+#ifdef CONFIG_PPC_INDIRECT_MMIO
+#define DEF_PCI_HOOK_mem(x) x
+#else
+#define DEF_PCI_HOOK_mem(x) NULL
+#endif
+
+/* Structure containing all the hooks */
+extern struct ppc_pci_io {
+
+#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) ret (*name) at;
+#define DEF_PCI_AC_NORET(name, at, al, space, aa) void (*name) at;
+
+#include <asm/io-defs.h>
+
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
+
+} ppc_pci_io;
+
+/* The inline wrappers */
+#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
+static inline ret name at \
+{ \
+ if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \
+ return ppc_pci_io.name al; \
+ return __do_##name al; \
+}
+
+#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
+static inline void name at \
+{ \
+ if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \
+ ppc_pci_io.name al; \
+ else \
+ __do_##name al; \
+}
+
+#include <asm/io-defs.h>
+
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
+
+/* Some drivers check for the presence of readq & writeq with
+ * a #ifdef, so we make them happy here.
+ */
+#define readb readb
+#define readw readw
+#define readl readl
+#define writeb writeb
+#define writew writew
+#define writel writel
+#define readsb readsb
+#define readsw readsw
+#define readsl readsl
+#define writesb writesb
+#define writesw writesw
+#define writesl writesl
+#define inb inb
+#define inw inw
+#define inl inl
+#define outb outb
+#define outw outw
+#define outl outl
+#define insb insb
+#define insw insw
+#define insl insl
+#define outsb outsb
+#define outsw outsw
+#define outsl outsl
+#ifdef __powerpc64__
+#define readq readq
+#define writeq writeq
+#endif
+#define memset_io memset_io
+#define memcpy_fromio memcpy_fromio
+#define memcpy_toio memcpy_toio
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p) __va(p)
+
+/*
+ * We don't do relaxed operations yet, at least not with this semantic
+ */
+#define readb_relaxed(addr) readb(addr)
+#define readw_relaxed(addr) readw(addr)
+#define readl_relaxed(addr) readl(addr)
+#define readq_relaxed(addr) readq(addr)
+#define writeb_relaxed(v, addr) writeb(v, addr)
+#define writew_relaxed(v, addr) writew(v, addr)
+#define writel_relaxed(v, addr) writel(v, addr)
+#define writeq_relaxed(v, addr) writeq(v, addr)
+
+#ifdef CONFIG_GENERIC_IOMAP
+#include <asm-generic/iomap.h>
+#else
+/*
+ * Here comes the implementation of the IOMAP interfaces.
+ */
+static inline unsigned int ioread16be(const void __iomem *addr)
+{
+ return readw_be(addr);
+}
+#define ioread16be ioread16be
+
+static inline unsigned int ioread32be(const void __iomem *addr)
+{
+ return readl_be(addr);
+}
+#define ioread32be ioread32be
+
+#ifdef __powerpc64__
+static inline u64 ioread64_lo_hi(const void __iomem *addr)
+{
+ return readq(addr);
+}
+#define ioread64_lo_hi ioread64_lo_hi
+
+static inline u64 ioread64_hi_lo(const void __iomem *addr)
+{
+ return readq(addr);
+}
+#define ioread64_hi_lo ioread64_hi_lo
+
+static inline u64 ioread64be(const void __iomem *addr)
+{
+ return readq_be(addr);
+}
+#define ioread64be ioread64be
+
+static inline u64 ioread64be_lo_hi(const void __iomem *addr)
+{
+ return readq_be(addr);
+}
+#define ioread64be_lo_hi ioread64be_lo_hi
+
+static inline u64 ioread64be_hi_lo(const void __iomem *addr)
+{
+ return readq_be(addr);
+}
+#define ioread64be_hi_lo ioread64be_hi_lo
+#endif /* __powerpc64__ */
+
+static inline void iowrite16be(u16 val, void __iomem *addr)
+{
+ writew_be(val, addr);
+}
+#define iowrite16be iowrite16be
+
+static inline void iowrite32be(u32 val, void __iomem *addr)
+{
+ writel_be(val, addr);
+}
+#define iowrite32be iowrite32be
+
+#ifdef __powerpc64__
+static inline void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+ writeq(val, addr);
+}
+#define iowrite64_lo_hi iowrite64_lo_hi
+
+static inline void iowrite64_hi_lo(u64 val, void __iomem *addr)
+{
+ writeq(val, addr);
+}
+#define iowrite64_hi_lo iowrite64_hi_lo
+
+static inline void iowrite64be(u64 val, void __iomem *addr)
+{
+ writeq_be(val, addr);
+}
+#define iowrite64be iowrite64be
+
+static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr)
+{
+ writeq_be(val, addr);
+}
+#define iowrite64be_lo_hi iowrite64be_lo_hi
+
+static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr)
+{
+ writeq_be(val, addr);
+}
+#define iowrite64be_hi_lo iowrite64be_hi_lo
+#endif /* __powerpc64__ */
+
+struct pci_dev;
+void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
+#define pci_iounmap pci_iounmap
+void __iomem *ioport_map(unsigned long port, unsigned int len);
+#define ioport_map ioport_map
+#endif
+
+static inline void iosync(void)
+{
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+
+/* Enforce in-order execution of data I/O.
+ * No distinction between read/write on PPC; use eieio for all three.
+ * Those are fairly week though. They don't provide a barrier between
+ * MMIO and cacheable storage nor do they provide a barrier vs. locks,
+ * they only provide barriers between 2 __raw MMIO operations and
+ * possibly break write combining.
+ */
+#define iobarrier_rw() eieio()
+#define iobarrier_r() eieio()
+#define iobarrier_w() eieio()
+
+
+/*
+ * output pause versions need a delay at least for the
+ * w83c105 ide controller in a p610.
+ */
+#define inb_p(port) inb(port)
+#define outb_p(val, port) (udelay(1), outb((val), (port)))
+#define inw_p(port) inw(port)
+#define outw_p(val, port) (udelay(1), outw((val), (port)))
+#define inl_p(port) inl(port)
+#define outl_p(val, port) (udelay(1), outl((val), (port)))
+
+
+#define IO_SPACE_LIMIT ~(0UL)
+
+/**
+ * ioremap - map bus memory into CPU space
+ * @address: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * We provide a few variations of it:
+ *
+ * * ioremap is the standard one and provides non-cacheable guarded mappings
+ * and can be hooked by the platform via ppc_md
+ *
+ * * ioremap_prot allows to specify the page flags as an argument and can
+ * also be hooked by the platform via ppc_md.
+ *
+ * * ioremap_wc enables write combining
+ *
+ * * ioremap_wt enables write through
+ *
+ * * ioremap_coherent maps coherent cached memory
+ *
+ * * iounmap undoes such a mapping and can be hooked
+ *
+ * * __ioremap_caller is the same as above but takes an explicit caller
+ * reference rather than using __builtin_return_address(0)
+ *
+ */
+extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
+extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,
+ unsigned long flags);
+extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);
+#define ioremap_wc ioremap_wc
+
+#ifdef CONFIG_PPC32
+void __iomem *ioremap_wt(phys_addr_t address, unsigned long size);
+#define ioremap_wt ioremap_wt
+#endif
+
+void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size);
+#define ioremap_uc(addr, size) ioremap((addr), (size))
+#define ioremap_cache(addr, size) \
+ ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL))
+
+extern void iounmap(volatile void __iomem *addr);
+
+void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size);
+
+int early_ioremap_range(unsigned long ea, phys_addr_t pa,
+ unsigned long size, pgprot_t prot);
+void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
+ pgprot_t prot, void *caller);
+
+extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size,
+ pgprot_t prot, void *caller);
+
+/*
+ * When CONFIG_PPC_INDIRECT_PIO is set, we use the generic iomap implementation
+ * which needs some additional definitions here. They basically allow PIO
+ * space overall to be 1GB. This will work as long as we never try to use
+ * iomap to map MMIO below 1GB which should be fine on ppc64
+ */
+#define HAVE_ARCH_PIO_SIZE 1
+#define PIO_OFFSET 0x00000000UL
+#define PIO_MASK (FULL_IO_SIZE - 1)
+#define PIO_RESERVED (FULL_IO_SIZE)
+
+#define mmio_read16be(addr) readw_be(addr)
+#define mmio_read32be(addr) readl_be(addr)
+#define mmio_read64be(addr) readq_be(addr)
+#define mmio_write16be(val, addr) writew_be(val, addr)
+#define mmio_write32be(val, addr) writel_be(val, addr)
+#define mmio_write64be(val, addr) writeq_be(val, addr)
+#define mmio_insb(addr, dst, count) readsb(addr, dst, count)
+#define mmio_insw(addr, dst, count) readsw(addr, dst, count)
+#define mmio_insl(addr, dst, count) readsl(addr, dst, count)
+#define mmio_outsb(addr, src, count) writesb(addr, src, count)
+#define mmio_outsw(addr, src, count) writesw(addr, src, count)
+#define mmio_outsl(addr, src, count) writesl(addr, src, count)
+
+/**
+ * virt_to_phys - map virtual addresses to physical
+ * @address: address to remap
+ *
+ * The returned physical address is the physical (CPU) mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses directly mapped or allocated via kmalloc.
+ *
+ * This function does not give bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+ WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && !virt_addr_valid(address));
+
+ return __pa((unsigned long)address);
+}
+#define virt_to_phys virt_to_phys
+
+/**
+ * phys_to_virt - map physical address to virtual
+ * @address: address to remap
+ *
+ * The returned virtual address is a current CPU mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses that have a kernel mapping
+ *
+ * This function does not handle bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+static inline void * phys_to_virt(unsigned long address)
+{
+ return (void *)__va(address);
+}
+#define phys_to_virt phys_to_virt
+
+/*
+ * Change "struct page" to physical address.
+ */
+static inline phys_addr_t page_to_phys(struct page *page)
+{
+ unsigned long pfn = page_to_pfn(page);
+
+ WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && !pfn_valid(pfn));
+
+ return PFN_PHYS(pfn);
+}
+
+/*
+ * 32 bits still uses virt_to_bus() for it's implementation of DMA
+ * mappings se we have to keep it defined here. We also have some old
+ * drivers (shame shame shame) that use bus_to_virt() and haven't been
+ * fixed yet so I need to define it here.
+ */
+#ifdef CONFIG_PPC32
+
+static inline unsigned long virt_to_bus(volatile void * address)
+{
+ if (address == NULL)
+ return 0;
+ return __pa(address) + PCI_DRAM_OFFSET;
+}
+#define virt_to_bus virt_to_bus
+
+static inline void * bus_to_virt(unsigned long address)
+{
+ if (address == 0)
+ return NULL;
+ return __va(address - PCI_DRAM_OFFSET);
+}
+#define bus_to_virt bus_to_virt
+
+#endif /* CONFIG_PPC32 */
+
+/* access ports */
+#define setbits32(_addr, _v) out_be32((_addr), in_be32(_addr) | (_v))
+#define clrbits32(_addr, _v) out_be32((_addr), in_be32(_addr) & ~(_v))
+
+#define setbits16(_addr, _v) out_be16((_addr), in_be16(_addr) | (_v))
+#define clrbits16(_addr, _v) out_be16((_addr), in_be16(_addr) & ~(_v))
+
+#define setbits8(_addr, _v) out_8((_addr), in_8(_addr) | (_v))
+#define clrbits8(_addr, _v) out_8((_addr), in_8(_addr) & ~(_v))
+
+/* Clear and set bits in one shot. These macros can be used to clear and
+ * set multiple bits in a register using a single read-modify-write. These
+ * macros can also be used to set a multiple-bit bit pattern using a mask,
+ * by specifying the mask in the 'clear' parameter and the new bit pattern
+ * in the 'set' parameter.
+ */
+
+#define clrsetbits(type, addr, clear, set) \
+ out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
+
+#ifdef __powerpc64__
+#define clrsetbits_be64(addr, clear, set) clrsetbits(be64, addr, clear, set)
+#define clrsetbits_le64(addr, clear, set) clrsetbits(le64, addr, clear, set)
+#endif
+
+#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
+#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
+
+#define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set)
+#define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set)
+
+#define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
+
+#include <asm-generic/io.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_IO_H */
diff --git a/arch/powerpc/include/asm/io_event_irq.h b/arch/powerpc/include/asm/io_event_irq.h
new file mode 100644
index 000000000..290c7530d
--- /dev/null
+++ b/arch/powerpc/include/asm/io_event_irq.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2010, 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation
+ */
+
+#ifndef _ASM_POWERPC_IO_EVENT_IRQ_H
+#define _ASM_POWERPC_IO_EVENT_IRQ_H
+
+#include <linux/types.h>
+#include <linux/notifier.h>
+
+#define PSERIES_IOEI_RPC_MAX_LEN 216
+
+#define PSERIES_IOEI_TYPE_ERR_DETECTED 0x01
+#define PSERIES_IOEI_TYPE_ERR_RECOVERED 0x02
+#define PSERIES_IOEI_TYPE_EVENT 0x03
+#define PSERIES_IOEI_TYPE_RPC_PASS_THRU 0x04
+
+#define PSERIES_IOEI_SUBTYPE_NOT_APP 0x00
+#define PSERIES_IOEI_SUBTYPE_REBALANCE_REQ 0x01
+#define PSERIES_IOEI_SUBTYPE_NODE_ONLINE 0x03
+#define PSERIES_IOEI_SUBTYPE_NODE_OFFLINE 0x04
+#define PSERIES_IOEI_SUBTYPE_DUMP_SIZE_CHANGE 0x05
+#define PSERIES_IOEI_SUBTYPE_TORRENT_IRV_UPDATE 0x06
+#define PSERIES_IOEI_SUBTYPE_TORRENT_HFI_CFGED 0x07
+
+#define PSERIES_IOEI_SCOPE_NOT_APP 0x00
+#define PSERIES_IOEI_SCOPE_RIO_HUB 0x36
+#define PSERIES_IOEI_SCOPE_RIO_BRIDGE 0x37
+#define PSERIES_IOEI_SCOPE_PHB 0x38
+#define PSERIES_IOEI_SCOPE_EADS_GLOBAL 0x39
+#define PSERIES_IOEI_SCOPE_EADS_SLOT 0x3A
+#define PSERIES_IOEI_SCOPE_TORRENT_HUB 0x3B
+#define PSERIES_IOEI_SCOPE_SERVICE_PROC 0x51
+
+/* Platform Event Log Format, Version 6, data portition of IO event section */
+struct pseries_io_event {
+ uint8_t event_type; /* 0x00 IO-Event Type */
+ uint8_t rpc_data_len; /* 0x01 RPC data length */
+ uint8_t scope; /* 0x02 Error/Event Scope */
+ uint8_t event_subtype; /* 0x03 I/O-Event Sub-Type */
+ uint32_t drc_index; /* 0x04 DRC Index */
+ uint8_t rpc_data[PSERIES_IOEI_RPC_MAX_LEN];
+ /* 0x08 RPC Data (0-216 bytes, */
+ /* padded to 4 bytes alignment) */
+};
+
+extern struct atomic_notifier_head pseries_ioei_notifier_list;
+
+#endif /* _ASM_POWERPC_IO_EVENT_IRQ_H */
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
new file mode 100644
index 000000000..7e29c73e3
--- /dev/null
+++ b/arch/powerpc/include/asm/iommu.h
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
+ * Rewrite, cleanup:
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
+ */
+
+#ifndef _ASM_IOMMU_H
+#define _ASM_IOMMU_H
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/dma-map-ops.h>
+#include <linux/bitops.h>
+#include <asm/machdep.h>
+#include <asm/types.h>
+#include <asm/pci-bridge.h>
+#include <asm/asm-const.h>
+
+#define IOMMU_PAGE_SHIFT_4K 12
+#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
+#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
+#define IOMMU_PAGE_ALIGN_4K(addr) ALIGN(addr, IOMMU_PAGE_SIZE_4K)
+
+#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
+#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
+#define IOMMU_PAGE_ALIGN(addr, tblptr) ALIGN(addr, IOMMU_PAGE_SIZE(tblptr))
+
+/* Boot time flags */
+extern int iommu_is_off;
+extern int iommu_force_on;
+
+struct iommu_table_ops {
+ /*
+ * When called with direction==DMA_NONE, it is equal to clear().
+ * uaddr is a linear map address.
+ */
+ int (*set)(struct iommu_table *tbl,
+ long index, long npages,
+ unsigned long uaddr,
+ enum dma_data_direction direction,
+ unsigned long attrs);
+#ifdef CONFIG_IOMMU_API
+ /*
+ * Exchanges existing TCE with new TCE plus direction bits;
+ * returns old TCE and DMA direction mask.
+ * @tce is a physical address.
+ */
+ int (*xchg_no_kill)(struct iommu_table *tbl,
+ long index,
+ unsigned long *hpa,
+ enum dma_data_direction *direction);
+
+ void (*tce_kill)(struct iommu_table *tbl,
+ unsigned long index,
+ unsigned long pages);
+
+ __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
+#endif
+ void (*clear)(struct iommu_table *tbl,
+ long index, long npages);
+ /* get() returns a physical address */
+ unsigned long (*get)(struct iommu_table *tbl, long index);
+ void (*flush)(struct iommu_table *tbl);
+ void (*free)(struct iommu_table *tbl);
+};
+
+/* These are used by VIO */
+extern struct iommu_table_ops iommu_table_lpar_multi_ops;
+extern struct iommu_table_ops iommu_table_pseries_ops;
+
+/*
+ * IOMAP_MAX_ORDER defines the largest contiguous block
+ * of dma space we can get. IOMAP_MAX_ORDER = 13
+ * allows up to 2**12 pages (4096 * 4096) = 16 MB
+ */
+#define IOMAP_MAX_ORDER 13
+
+#define IOMMU_POOL_HASHBITS 2
+#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
+
+struct iommu_pool {
+ unsigned long start;
+ unsigned long end;
+ unsigned long hint;
+ spinlock_t lock;
+} ____cacheline_aligned_in_smp;
+
+struct iommu_table {
+ unsigned long it_busno; /* Bus number this table belongs to */
+ unsigned long it_size; /* Size of iommu table in entries */
+ unsigned long it_indirect_levels;
+ unsigned long it_level_size;
+ unsigned long it_allocated_size;
+ unsigned long it_offset; /* Offset into global table */
+ unsigned long it_base; /* mapped address of tce table */
+ unsigned long it_index; /* which iommu table this is */
+ unsigned long it_type; /* type: PCI or Virtual Bus */
+ unsigned long it_blocksize; /* Entries in each block (cacheline) */
+ unsigned long poolsize;
+ unsigned long nr_pools;
+ struct iommu_pool large_pool;
+ struct iommu_pool pools[IOMMU_NR_POOLS];
+ unsigned long *it_map; /* A simple allocation bitmap for now */
+ unsigned long it_page_shift;/* table iommu page size */
+ struct list_head it_group_list;/* List of iommu_table_group_link */
+ __be64 *it_userspace; /* userspace view of the table */
+ struct iommu_table_ops *it_ops;
+ struct kref it_kref;
+ int it_nid;
+ unsigned long it_reserved_start; /* Start of not-DMA-able (MMIO) area */
+ unsigned long it_reserved_end;
+};
+
+#define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \
+ ((tbl)->it_ops->useraddrptr((tbl), (entry), false))
+#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
+ ((tbl)->it_ops->useraddrptr((tbl), (entry), true))
+
+/* Pure 2^n version of get_order */
+static inline __attribute_const__
+int get_iommu_order(unsigned long size, struct iommu_table *tbl)
+{
+ return __ilog2((size - 1) >> tbl->it_page_shift) + 1;
+}
+
+
+struct scatterlist;
+
+#ifdef CONFIG_PPC64
+
+static inline void set_iommu_table_base(struct device *dev,
+ struct iommu_table *base)
+{
+ dev->archdata.iommu_table_base = base;
+}
+
+static inline void *get_iommu_table_base(struct device *dev)
+{
+ return dev->archdata.iommu_table_base;
+}
+
+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
+
+extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl);
+extern int iommu_tce_table_put(struct iommu_table *tbl);
+
+/* Initializes an iommu_table based in values set in the passed-in
+ * structure
+ */
+extern struct iommu_table *iommu_init_table(struct iommu_table *tbl,
+ int nid, unsigned long res_start, unsigned long res_end);
+bool iommu_table_in_use(struct iommu_table *tbl);
+
+#define IOMMU_TABLE_GROUP_MAX_TABLES 2
+
+struct iommu_table_group;
+
+struct iommu_table_group_ops {
+ unsigned long (*get_table_size)(
+ __u32 page_shift,
+ __u64 window_size,
+ __u32 levels);
+ long (*create_table)(struct iommu_table_group *table_group,
+ int num,
+ __u32 page_shift,
+ __u64 window_size,
+ __u32 levels,
+ struct iommu_table **ptbl);
+ long (*set_window)(struct iommu_table_group *table_group,
+ int num,
+ struct iommu_table *tblnew);
+ long (*unset_window)(struct iommu_table_group *table_group,
+ int num);
+ /* Switch ownership from platform code to external user (e.g. VFIO) */
+ void (*take_ownership)(struct iommu_table_group *table_group);
+ /* Switch ownership from external user (e.g. VFIO) back to core */
+ void (*release_ownership)(struct iommu_table_group *table_group);
+};
+
+struct iommu_table_group_link {
+ struct list_head next;
+ struct rcu_head rcu;
+ struct iommu_table_group *table_group;
+};
+
+struct iommu_table_group {
+ /* IOMMU properties */
+ __u32 tce32_start;
+ __u32 tce32_size;
+ __u64 pgsizes; /* Bitmap of supported page sizes */
+ __u32 max_dynamic_windows_supported;
+ __u32 max_levels;
+
+ struct iommu_group *group;
+ struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
+ struct iommu_table_group_ops *ops;
+};
+
+#ifdef CONFIG_IOMMU_API
+
+extern void iommu_register_group(struct iommu_table_group *table_group,
+ int pci_domain_number, unsigned long pe_num);
+extern int iommu_add_device(struct iommu_table_group *table_group,
+ struct device *dev);
+extern void iommu_del_device(struct device *dev);
+extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl,
+ unsigned long entry, unsigned long *hpa,
+ enum dma_data_direction *direction);
+extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
+ struct iommu_table *tbl,
+ unsigned long entry, unsigned long *hpa,
+ enum dma_data_direction *direction);
+extern void iommu_tce_kill(struct iommu_table *tbl,
+ unsigned long entry, unsigned long pages);
+#else
+static inline void iommu_register_group(struct iommu_table_group *table_group,
+ int pci_domain_number,
+ unsigned long pe_num)
+{
+}
+
+static inline int iommu_add_device(struct iommu_table_group *table_group,
+ struct device *dev)
+{
+ return 0;
+}
+
+static inline void iommu_del_device(struct device *dev)
+{
+}
+#endif /* !CONFIG_IOMMU_API */
+
+u64 dma_iommu_get_required_mask(struct device *dev);
+#else
+
+static inline void *get_iommu_table_base(struct device *dev)
+{
+ return NULL;
+}
+
+static inline int dma_iommu_dma_supported(struct device *dev, u64 mask)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PPC64 */
+
+extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
+ struct scatterlist *sglist, int nelems,
+ unsigned long mask,
+ enum dma_data_direction direction,
+ unsigned long attrs);
+extern void ppc_iommu_unmap_sg(struct iommu_table *tbl,
+ struct scatterlist *sglist,
+ int nelems,
+ enum dma_data_direction direction,
+ unsigned long attrs);
+
+extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
+ size_t size, dma_addr_t *dma_handle,
+ unsigned long mask, gfp_t flag, int node);
+extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
+ struct page *page, unsigned long offset,
+ size_t size, unsigned long mask,
+ enum dma_data_direction direction,
+ unsigned long attrs);
+extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction,
+ unsigned long attrs);
+
+void __init iommu_init_early_pSeries(void);
+extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
+extern void iommu_init_early_pasemi(void);
+
+#if defined(CONFIG_PPC64) && defined(CONFIG_PM)
+static inline void iommu_restore(void)
+{
+ if (ppc_md.iommu_restore)
+ ppc_md.iommu_restore();
+}
+#endif
+
+/* The API to support IOMMU operations for VFIO */
+extern int iommu_tce_check_ioba(unsigned long page_shift,
+ unsigned long offset, unsigned long size,
+ unsigned long ioba, unsigned long npages);
+extern int iommu_tce_check_gpa(unsigned long page_shift,
+ unsigned long gpa);
+
+#define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \
+ (iommu_tce_check_ioba((tbl)->it_page_shift, \
+ (tbl)->it_offset, (tbl)->it_size, \
+ (ioba), (npages)) || (tce_value))
+#define iommu_tce_put_param_check(tbl, ioba, gpa) \
+ (iommu_tce_check_ioba((tbl)->it_page_shift, \
+ (tbl)->it_offset, (tbl)->it_size, \
+ (ioba), 1) || \
+ iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
+
+extern void iommu_flush_tce(struct iommu_table *tbl);
+extern int iommu_take_ownership(struct iommu_table *tbl);
+extern void iommu_release_ownership(struct iommu_table *tbl);
+
+extern enum dma_data_direction iommu_tce_direction(unsigned long tce);
+extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir);
+
+#ifdef CONFIG_PPC_CELL_NATIVE
+extern bool iommu_fixed_is_weak;
+#else
+#define iommu_fixed_is_weak false
+#endif
+
+extern const struct dma_map_ops dma_iommu_ops;
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_IOMMU_H */
diff --git a/arch/powerpc/include/asm/ipic.h b/arch/powerpc/include/asm/ipic.h
new file mode 100644
index 000000000..b47ca7dc7
--- /dev/null
+++ b/arch/powerpc/include/asm/ipic.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * IPIC external definitions and structure.
+ *
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
+ *
+ * Copyright 2005 Freescale Semiconductor, Inc
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_IPIC_H__
+#define __ASM_IPIC_H__
+
+#include <linux/irq.h>
+
+/* Flags when we init the IPIC */
+#define IPIC_SPREADMODE_GRP_A 0x00000001
+#define IPIC_SPREADMODE_GRP_B 0x00000002
+#define IPIC_SPREADMODE_GRP_C 0x00000004
+#define IPIC_SPREADMODE_GRP_D 0x00000008
+#define IPIC_SPREADMODE_MIX_A 0x00000010
+#define IPIC_SPREADMODE_MIX_B 0x00000020
+#define IPIC_DISABLE_MCP_OUT 0x00000040
+#define IPIC_IRQ0_MCP 0x00000080
+
+/* IPIC registers offsets */
+#define IPIC_SICFR 0x00 /* System Global Interrupt Configuration Register */
+#define IPIC_SIVCR 0x04 /* System Global Interrupt Vector Register */
+#define IPIC_SIPNR_H 0x08 /* System Internal Interrupt Pending Register (HIGH) */
+#define IPIC_SIPNR_L 0x0C /* System Internal Interrupt Pending Register (LOW) */
+#define IPIC_SIPRR_A 0x10 /* System Internal Interrupt group A Priority Register */
+#define IPIC_SIPRR_B 0x14 /* System Internal Interrupt group B Priority Register */
+#define IPIC_SIPRR_C 0x18 /* System Internal Interrupt group C Priority Register */
+#define IPIC_SIPRR_D 0x1C /* System Internal Interrupt group D Priority Register */
+#define IPIC_SIMSR_H 0x20 /* System Internal Interrupt Mask Register (HIGH) */
+#define IPIC_SIMSR_L 0x24 /* System Internal Interrupt Mask Register (LOW) */
+#define IPIC_SICNR 0x28 /* System Internal Interrupt Control Register */
+#define IPIC_SEPNR 0x2C /* System External Interrupt Pending Register */
+#define IPIC_SMPRR_A 0x30 /* System Mixed Interrupt group A Priority Register */
+#define IPIC_SMPRR_B 0x34 /* System Mixed Interrupt group B Priority Register */
+#define IPIC_SEMSR 0x38 /* System External Interrupt Mask Register */
+#define IPIC_SECNR 0x3C /* System External Interrupt Control Register */
+#define IPIC_SERSR 0x40 /* System Error Status Register */
+#define IPIC_SERMR 0x44 /* System Error Mask Register */
+#define IPIC_SERCR 0x48 /* System Error Control Register */
+#define IPIC_SIFCR_H 0x50 /* System Internal Interrupt Force Register (HIGH) */
+#define IPIC_SIFCR_L 0x54 /* System Internal Interrupt Force Register (LOW) */
+#define IPIC_SEFCR 0x58 /* System External Interrupt Force Register */
+#define IPIC_SERFR 0x5C /* System Error Force Register */
+#define IPIC_SCVCR 0x60 /* System Critical Interrupt Vector Register */
+#define IPIC_SMVCR 0x64 /* System Management Interrupt Vector Register */
+
+enum ipic_prio_grp {
+ IPIC_INT_GRP_A = IPIC_SIPRR_A,
+ IPIC_INT_GRP_D = IPIC_SIPRR_D,
+ IPIC_MIX_GRP_A = IPIC_SMPRR_A,
+ IPIC_MIX_GRP_B = IPIC_SMPRR_B,
+};
+
+enum ipic_mcp_irq {
+ IPIC_MCP_IRQ0 = 0,
+ IPIC_MCP_WDT = 1,
+ IPIC_MCP_SBA = 2,
+ IPIC_MCP_PCI1 = 5,
+ IPIC_MCP_PCI2 = 6,
+ IPIC_MCP_MU = 7,
+};
+
+void __init ipic_set_default_priority(void);
+extern u32 ipic_get_mcp_status(void);
+extern void ipic_clear_mcp_status(u32 mask);
+
+extern struct ipic * ipic_init(struct device_node *node, unsigned int flags);
+extern unsigned int ipic_get_irq(void);
+
+#endif /* __ASM_IPIC_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
new file mode 100644
index 000000000..5c1516a5b
--- /dev/null
+++ b/arch/powerpc/include/asm/irq.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifdef __KERNEL__
+#ifndef _ASM_POWERPC_IRQ_H
+#define _ASM_POWERPC_IRQ_H
+
+/*
+ */
+
+#include <linux/threads.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+
+#include <asm/types.h>
+#include <linux/atomic.h>
+
+
+extern atomic_t ppc_n_lost_interrupts;
+
+/* This number is used when no interrupt has been assigned */
+#define NO_IRQ (0)
+
+/* Total number of virq in the platform */
+#define NR_IRQS CONFIG_NR_IRQS
+
+/* Number of irqs reserved for a legacy isa controller */
+#define NR_IRQS_LEGACY 16
+
+extern irq_hw_number_t virq_to_hw(unsigned int virq);
+
+static __inline__ int irq_canonicalize(int irq)
+{
+ return irq;
+}
+
+extern int distribute_irqs;
+
+struct pt_regs;
+
+#ifdef CONFIG_BOOKE_OR_40x
+/*
+ * Per-cpu stacks for handling critical, debug and machine check
+ * level interrupts.
+ */
+extern void *critirq_ctx[NR_CPUS];
+extern void *dbgirq_ctx[NR_CPUS];
+extern void *mcheckirq_ctx[NR_CPUS];
+#endif
+
+/*
+ * Per-cpu stacks for handling hard and soft interrupts.
+ */
+extern void *hardirq_ctx[NR_CPUS];
+extern void *softirq_ctx[NR_CPUS];
+
+void __do_IRQ(struct pt_regs *regs);
+extern void __init init_IRQ(void);
+
+int irq_choose_cpu(const struct cpumask *mask);
+
+#endif /* _ASM_IRQ_H */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
new file mode 100644
index 000000000..b8b0be8f1
--- /dev/null
+++ b/arch/powerpc/include/asm/irq_work.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_IRQ_WORK_H
+#define _ASM_POWERPC_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return true;
+}
+extern void arch_irq_work_raise(void);
+
+#endif /* _ASM_POWERPC_IRQ_WORK_H */
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
new file mode 100644
index 000000000..1a6c1ce17
--- /dev/null
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IRQ flags handling
+ */
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#ifndef __ASSEMBLY__
+/*
+ * Get definitions for arch_local_save_flags(x), etc.
+ */
+#include <asm/hw_irq.h>
+
+#else
+#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_IRQSOFF_TRACER
+/*
+ * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
+ * which is the stack frame here, we need to force a stack frame
+ * in case we came from user space.
+ */
+#define TRACE_WITH_FRAME_BUFFER(func) \
+ mflr r0; \
+ stdu r1, -STACK_FRAME_OVERHEAD(r1); \
+ std r0, 16(r1); \
+ stdu r1, -STACK_FRAME_OVERHEAD(r1); \
+ bl func; \
+ ld r1, 0(r1); \
+ ld r1, 0(r1);
+#else
+#define TRACE_WITH_FRAME_BUFFER(func) \
+ bl func;
+#endif
+
+/*
+ * These are calls to C code, so the caller must be prepared for volatiles to
+ * be clobbered.
+ */
+#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on)
+#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off)
+
+/*
+ * This is used by assembly code to soft-disable interrupts first and
+ * reconcile irq state.
+ *
+ * NB: This may call C code, so the caller must be prepared for volatiles to
+ * be clobbered.
+ */
+#define RECONCILE_IRQ_STATE(__rA, __rB) \
+ lbz __rA,PACAIRQSOFTMASK(r13); \
+ lbz __rB,PACAIRQHAPPENED(r13); \
+ andi. __rA,__rA,IRQS_DISABLED; \
+ li __rA,IRQS_DISABLED; \
+ ori __rB,__rB,PACA_IRQ_HARD_DIS; \
+ stb __rB,PACAIRQHAPPENED(r13); \
+ bne 44f; \
+ stb __rA,PACAIRQSOFTMASK(r13); \
+ TRACE_DISABLE_INTS; \
+44:
+
+#else
+#define TRACE_ENABLE_INTS
+#define TRACE_DISABLE_INTS
+
+#define RECONCILE_IRQ_STATE(__rA, __rB) \
+ lbz __rA,PACAIRQHAPPENED(r13); \
+ li __rB,IRQS_DISABLED; \
+ ori __rA,__rA,PACA_IRQ_HARD_DIS; \
+ stb __rB,PACAIRQSOFTMASK(r13); \
+ stb __rA,PACAIRQHAPPENED(r13)
+#endif
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/isa-bridge.h b/arch/powerpc/include/asm/isa-bridge.h
new file mode 100644
index 000000000..47295894b
--- /dev/null
+++ b/arch/powerpc/include/asm/isa-bridge.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ISA_BRIDGE_H
+#define __ISA_BRIDGE_H
+
+#ifdef CONFIG_PPC64
+
+extern void isa_bridge_find_early(struct pci_controller *hose);
+extern void isa_bridge_init_non_pci(struct device_node *np);
+
+static inline int isa_vaddr_is_ioport(void __iomem *address)
+{
+ /* Check if address hits the reserved legacy IO range */
+ unsigned long ea = (unsigned long)address;
+ return ea >= ISA_IO_BASE && ea < ISA_IO_END;
+}
+
+#else
+
+static inline int isa_vaddr_is_ioport(void __iomem *address)
+{
+ /* No specific ISA handling on ppc32 at this stage, it
+ * all goes through PCI
+ */
+ return 0;
+}
+
+#endif
+
+#endif /* __ISA_BRIDGE_H */
+
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
new file mode 100644
index 000000000..93ce3ec25
--- /dev/null
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_JUMP_LABEL_H
+#define _ASM_POWERPC_JUMP_LABEL_H
+
+/*
+ * Copyright 2010 Michael Ellerman, IBM Corp.
+ */
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+#include <asm/feature-fixups.h>
+#include <asm/asm-const.h>
+
+#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
+#define JUMP_LABEL_NOP_SIZE 4
+
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ "nop # arch_static_branch\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".long 1b - ., %l[l_yes] - .\n\t"
+ JUMP_ENTRY_TYPE "%c0 - .\n\t"
+ ".popsection \n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ "b %l[l_yes] # arch_static_branch_jump\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".long 1b - ., %l[l_yes] - .\n\t"
+ JUMP_ENTRY_TYPE "%c0 - .\n\t"
+ ".popsection \n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+#else
+#define ARCH_STATIC_BRANCH(LABEL, KEY) \
+1098: nop; \
+ .pushsection __jump_table, "aw"; \
+ .long 1098b - ., LABEL - .; \
+ FTR_ENTRY_LONG KEY - .; \
+ .popsection
+#endif
+
+#endif /* _ASM_POWERPC_JUMP_LABEL_H */
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
new file mode 100644
index 000000000..92a968202
--- /dev/null
+++ b/arch/powerpc/include/asm/kasan.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifdef CONFIG_KASAN
+#define _GLOBAL_KASAN(fn) _GLOBAL(__##fn)
+#define _GLOBAL_TOC_KASAN(fn) _GLOBAL_TOC(__##fn)
+#define EXPORT_SYMBOL_KASAN(fn) EXPORT_SYMBOL(__##fn)
+#else
+#define _GLOBAL_KASAN(fn) _GLOBAL(fn)
+#define _GLOBAL_TOC_KASAN(fn) _GLOBAL_TOC(fn)
+#define EXPORT_SYMBOL_KASAN(fn)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <asm/page.h>
+#include <linux/sizes.h>
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+#if defined(CONFIG_MODULES) && defined(CONFIG_PPC32)
+#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
+#else
+#define KASAN_KERN_START PAGE_OFFSET
+#endif
+
+#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
+ (KASAN_KERN_START >> KASAN_SHADOW_SCALE_SHIFT))
+
+#define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
+
+#ifdef CONFIG_PPC32
+#define KASAN_SHADOW_END (-(-KASAN_SHADOW_START >> KASAN_SHADOW_SCALE_SHIFT))
+#elif defined(CONFIG_PPC_BOOK3S_64)
+/*
+ * The shadow ends before the highest accessible address
+ * because we don't need a shadow for the shadow. Instead:
+ * c00e000000000000 << 3 + a80e000000000000 = c00fc00000000000
+ */
+#define KASAN_SHADOW_END 0xc00fc00000000000UL
+
+#else
+
+/*
+ * The shadow ends before the highest accessible address
+ * because we don't need a shadow for the shadow.
+ * But it doesn't hurt to have a shadow for the shadow,
+ * keep shadow end aligned eases things.
+ */
+#define KASAN_SHADOW_END 0xc000200000000000UL
+
+#endif
+
+#ifdef CONFIG_KASAN
+#ifdef CONFIG_PPC_BOOK3S_64
+DECLARE_STATIC_KEY_FALSE(powerpc_kasan_enabled_key);
+
+static __always_inline bool kasan_arch_is_ready(void)
+{
+ if (static_branch_likely(&powerpc_kasan_enabled_key))
+ return true;
+ return false;
+}
+
+#define kasan_arch_is_ready kasan_arch_is_ready
+#endif
+
+void kasan_early_init(void);
+void kasan_mmu_init(void);
+void kasan_init(void);
+void kasan_late_init(void);
+#else
+static inline void kasan_init(void) { }
+static inline void kasan_mmu_init(void) { }
+static inline void kasan_late_init(void) { }
+#endif
+
+void kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte);
+int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end);
+int kasan_init_region(void *start, size_t size);
+
+#endif /* __ASSEMBLY */
+#endif
diff --git a/arch/powerpc/include/asm/kdebug.h b/arch/powerpc/include/asm/kdebug.h
new file mode 100644
index 000000000..0f7c1ef37
--- /dev/null
+++ b/arch/powerpc/include/asm/kdebug.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_KDEBUG_H
+#define _ASM_POWERPC_KDEBUG_H
+#ifdef __KERNEL__
+
+/* Grossly misnamed. */
+enum die_val {
+ DIE_OOPS = 1,
+ DIE_IABR_MATCH,
+ DIE_DABR_MATCH,
+ DIE_BPT,
+ DIE_SSTEP,
+};
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KDEBUG_H */
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h
new file mode 100644
index 000000000..fd128d1e5
--- /dev/null
+++ b/arch/powerpc/include/asm/kdump.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PPC64_KDUMP_H
+#define _PPC64_KDUMP_H
+
+#include <asm/page.h>
+
+#define KDUMP_KERNELBASE 0x2000000
+
+/* How many bytes to reserve at zero for kdump. The reserve limit should
+ * be greater or equal to the trampoline's end address.
+ * Reserve to the end of the FWNMI area, see head_64.S */
+#define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */
+
+#ifdef CONFIG_CRASH_DUMP
+
+/*
+ * On PPC64 translation is disabled during trampoline setup, so we use
+ * physical addresses. Though on PPC32 translation is already enabled,
+ * so we can't do the same. Luckily create_trampoline() creates relative
+ * branches, so we can just add the PAGE_OFFSET and don't worry about it.
+ */
+#ifdef __powerpc64__
+#define KDUMP_TRAMPOLINE_START 0x0100
+#define KDUMP_TRAMPOLINE_END 0x3000
+#else
+#define KDUMP_TRAMPOLINE_START (0x0100 + PAGE_OFFSET)
+#define KDUMP_TRAMPOLINE_END (0x3000 + PAGE_OFFSET)
+#endif /* __powerpc64__ */
+
+#define KDUMP_MIN_TCE_ENTRIES 2048
+
+#endif /* CONFIG_CRASH_DUMP */
+
+#ifndef __ASSEMBLY__
+
+#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_NONSTATIC_KERNEL)
+extern void reserve_kdump_trampoline(void);
+extern void setup_kdump_trampoline(void);
+#else
+/* !CRASH_DUMP || !NONSTATIC_KERNEL */
+static inline void reserve_kdump_trampoline(void) { ; }
+static inline void setup_kdump_trampoline(void) { ; }
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __PPC64_KDUMP_H */
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
new file mode 100644
index 000000000..a1ddba01e
--- /dev/null
+++ b/arch/powerpc/include/asm/kexec.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_KEXEC_H
+#define _ASM_POWERPC_KEXEC_H
+#ifdef __KERNEL__
+
+#if defined(CONFIG_PPC_85xx) || defined(CONFIG_44x)
+
+/*
+ * On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory
+ * and therefore we can only deal with memory within this range
+ */
+#define KEXEC_SOURCE_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1)
+#define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1)
+#define KEXEC_CONTROL_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1)
+
+#else
+
+/*
+ * Maximum page that is mapped directly into kernel memory.
+ * XXX: Since we copy virt we can use any page we allocate
+ */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+
+/*
+ * Maximum address we can reach in physical address mode.
+ * XXX: I want to allow initrd in highmem. Otherwise set to rmo on LPAR.
+ */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can use for the control code buffer */
+#ifdef __powerpc64__
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+#else
+/* TASK_SIZE, probably left over from use_mm ?? */
+#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+#endif
+#endif
+
+#define KEXEC_CONTROL_PAGE_SIZE 4096
+
+/* The native architecture */
+#ifdef __powerpc64__
+#define KEXEC_ARCH KEXEC_ARCH_PPC64
+#else
+#define KEXEC_ARCH KEXEC_ARCH_PPC
+#endif
+
+#define KEXEC_STATE_NONE 0
+#define KEXEC_STATE_IRQS_OFF 1
+#define KEXEC_STATE_REAL_MODE 2
+
+#ifndef __ASSEMBLY__
+#include <asm/reg.h>
+
+typedef void (*crash_shutdown_t)(void);
+
+#ifdef CONFIG_KEXEC_CORE
+
+/*
+ * This function is responsible for capturing register states if coming
+ * via panic or invoking dump using sysrq-trigger.
+ */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ if (oldregs)
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ else
+ ppc_save_regs(newregs);
+}
+
+extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
+ master to copy new code to 0 */
+extern int crashing_cpu;
+extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
+extern void crash_ipi_callback(struct pt_regs *);
+extern int crash_wake_offline;
+
+struct kimage;
+struct pt_regs;
+extern void default_machine_kexec(struct kimage *image);
+extern void default_machine_crash_shutdown(struct pt_regs *regs);
+extern int crash_shutdown_register(crash_shutdown_t handler);
+extern int crash_shutdown_unregister(crash_shutdown_t handler);
+
+extern void crash_kexec_prepare(void);
+extern void crash_kexec_secondary(struct pt_regs *regs);
+int __init overlaps_crashkernel(unsigned long start, unsigned long size);
+extern void reserve_crashkernel(void);
+extern void machine_kexec_mask_interrupts(void);
+
+static inline bool kdump_in_progress(void)
+{
+ return crashing_cpu >= 0;
+}
+
+void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
+ unsigned long start_address) __noreturn;
+
+void kexec_copy_flush(struct kimage *image);
+
+#if defined(CONFIG_CRASH_DUMP) && defined(CONFIG_PPC_RTAS)
+void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
+#define crash_free_reserved_phys_range crash_free_reserved_phys_range
+#endif
+
+#ifdef CONFIG_KEXEC_FILE
+extern const struct kexec_file_ops kexec_elf64_ops;
+
+#define ARCH_HAS_KIMAGE_ARCH
+
+struct kimage_arch {
+ struct crash_mem *exclude_ranges;
+
+ unsigned long backup_start;
+ void *backup_buf;
+ void *fdt;
+};
+
+char *setup_kdump_cmdline(struct kimage *image, char *cmdline,
+ unsigned long cmdline_len);
+int setup_purgatory(struct kimage *image, const void *slave_code,
+ const void *fdt, unsigned long kernel_load_addr,
+ unsigned long fdt_load_addr);
+
+#ifdef CONFIG_PPC64
+struct kexec_buf;
+
+int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len);
+#define arch_kexec_kernel_image_probe arch_kexec_kernel_image_probe
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image);
+#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
+
+int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf);
+#define arch_kexec_locate_mem_hole arch_kexec_locate_mem_hole
+
+int load_crashdump_segments_ppc64(struct kimage *image,
+ struct kexec_buf *kbuf);
+int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
+ const void *fdt, unsigned long kernel_load_addr,
+ unsigned long fdt_load_addr);
+unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image);
+int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
+ unsigned long initrd_load_addr,
+ unsigned long initrd_len, const char *cmdline);
+#endif /* CONFIG_PPC64 */
+
+#endif /* CONFIG_KEXEC_FILE */
+
+#else /* !CONFIG_KEXEC_CORE */
+static inline void crash_kexec_secondary(struct pt_regs *regs) { }
+
+static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
+{
+ return 0;
+}
+
+static inline void reserve_crashkernel(void) { ; }
+
+static inline int crash_shutdown_register(crash_shutdown_t handler)
+{
+ return 0;
+}
+
+static inline int crash_shutdown_unregister(crash_shutdown_t handler)
+{
+ return 0;
+}
+
+static inline bool kdump_in_progress(void)
+{
+ return false;
+}
+
+static inline void crash_ipi_callback(struct pt_regs *regs) { }
+
+static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
+{
+}
+
+#endif /* CONFIG_KEXEC_CORE */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/book3s/64/kexec.h>
+#endif
+
+#ifndef reset_sprs
+#define reset_sprs reset_sprs
+static inline void reset_sprs(void)
+{
+}
+#endif
+
+#endif /* ! __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KEXEC_H */
diff --git a/arch/powerpc/include/asm/kexec_ranges.h b/arch/powerpc/include/asm/kexec_ranges.h
new file mode 100644
index 000000000..f83866a19
--- /dev/null
+++ b/arch/powerpc/include/asm/kexec_ranges.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_POWERPC_KEXEC_RANGES_H
+#define _ASM_POWERPC_KEXEC_RANGES_H
+
+#define MEM_RANGE_CHUNK_SZ 2048 /* Memory ranges size chunk */
+
+void sort_memory_ranges(struct crash_mem *mrngs, bool merge);
+struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges);
+int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size);
+int add_tce_mem_ranges(struct crash_mem **mem_ranges);
+int add_initrd_mem_range(struct crash_mem **mem_ranges);
+#ifdef CONFIG_PPC_64S_HASH_MMU
+int add_htab_mem_range(struct crash_mem **mem_ranges);
+#else
+static inline int add_htab_mem_range(struct crash_mem **mem_ranges)
+{
+ return 0;
+}
+#endif
+int add_kernel_mem_range(struct crash_mem **mem_ranges);
+int add_rtas_mem_range(struct crash_mem **mem_ranges);
+int add_opal_mem_range(struct crash_mem **mem_ranges);
+int add_reserved_mem_ranges(struct crash_mem **mem_ranges);
+
+#endif /* _ASM_POWERPC_KEXEC_RANGES_H */
diff --git a/arch/powerpc/include/asm/keylargo.h b/arch/powerpc/include/asm/keylargo.h
new file mode 100644
index 000000000..debdf5480
--- /dev/null
+++ b/arch/powerpc/include/asm/keylargo.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_KEYLARGO_H
+#define _ASM_POWERPC_KEYLARGO_H
+#ifdef __KERNEL__
+/*
+ * keylargo.h: definitions for using the "KeyLargo" I/O controller chip.
+ *
+ */
+
+/* "Pangea" chipset has keylargo device-id 0x25 while core99
+ * has device-id 0x22. The rev. of the pangea one is 0, so we
+ * fake an artificial rev. in keylargo_rev by oring 0x100
+ */
+#define KL_PANGEA_REV 0x100
+
+/* offset from base for feature control registers */
+#define KEYLARGO_MBCR 0x34 /* KL Only, Media bay control/status */
+#define KEYLARGO_FCR0 0x38
+#define KEYLARGO_FCR1 0x3c
+#define KEYLARGO_FCR2 0x40
+#define KEYLARGO_FCR3 0x44
+#define KEYLARGO_FCR4 0x48
+#define KEYLARGO_FCR5 0x4c /* Pangea only */
+
+/* K2 additional FCRs */
+#define K2_FCR6 0x34
+#define K2_FCR7 0x30
+#define K2_FCR8 0x2c
+#define K2_FCR9 0x28
+#define K2_FCR10 0x24
+
+/* GPIO registers */
+#define KEYLARGO_GPIO_LEVELS0 0x50
+#define KEYLARGO_GPIO_LEVELS1 0x54
+#define KEYLARGO_GPIO_EXTINT_0 0x58
+#define KEYLARGO_GPIO_EXTINT_CNT 18
+#define KEYLARGO_GPIO_0 0x6A
+#define KEYLARGO_GPIO_CNT 17
+#define KEYLARGO_GPIO_EXTINT_DUAL_EDGE 0x80
+#define KEYLARGO_GPIO_OUTPUT_ENABLE 0x04
+#define KEYLARGO_GPIO_OUTOUT_DATA 0x01
+#define KEYLARGO_GPIO_INPUT_DATA 0x02
+
+/* K2 does only extint GPIOs and does 51 of them */
+#define K2_GPIO_EXTINT_0 0x58
+#define K2_GPIO_EXTINT_CNT 51
+
+/* Specific GPIO regs */
+
+#define KL_GPIO_MODEM_RESET (KEYLARGO_GPIO_0+0x03)
+#define KL_GPIO_MODEM_POWER (KEYLARGO_GPIO_0+0x02) /* Pangea */
+
+#define KL_GPIO_SOUND_POWER (KEYLARGO_GPIO_0+0x05)
+
+/* Hrm... this one is only to be used on Pismo. It seems to also
+ * control the timebase enable on other machines. Still to be
+ * experimented... --BenH.
+ */
+#define KL_GPIO_FW_CABLE_POWER (KEYLARGO_GPIO_0+0x09)
+#define KL_GPIO_TB_ENABLE (KEYLARGO_GPIO_0+0x09)
+
+#define KL_GPIO_ETH_PHY_RESET (KEYLARGO_GPIO_0+0x10)
+
+#define KL_GPIO_EXTINT_CPU1 (KEYLARGO_GPIO_0+0x0a)
+#define KL_GPIO_EXTINT_CPU1_ASSERT 0x04
+#define KL_GPIO_EXTINT_CPU1_RELEASE 0x38
+
+#define KL_GPIO_RESET_CPU0 (KEYLARGO_GPIO_EXTINT_0+0x03)
+#define KL_GPIO_RESET_CPU1 (KEYLARGO_GPIO_EXTINT_0+0x04)
+#define KL_GPIO_RESET_CPU2 (KEYLARGO_GPIO_EXTINT_0+0x0f)
+#define KL_GPIO_RESET_CPU3 (KEYLARGO_GPIO_EXTINT_0+0x10)
+
+#define KL_GPIO_PMU_MESSAGE_IRQ (KEYLARGO_GPIO_EXTINT_0+0x09)
+#define KL_GPIO_PMU_MESSAGE_BIT KEYLARGO_GPIO_INPUT_DATA
+
+#define KL_GPIO_MEDIABAY_IRQ (KEYLARGO_GPIO_EXTINT_0+0x0e)
+
+#define KL_GPIO_AIRPORT_0 (KEYLARGO_GPIO_EXTINT_0+0x0a)
+#define KL_GPIO_AIRPORT_1 (KEYLARGO_GPIO_EXTINT_0+0x0d)
+#define KL_GPIO_AIRPORT_2 (KEYLARGO_GPIO_0+0x0d)
+#define KL_GPIO_AIRPORT_3 (KEYLARGO_GPIO_0+0x0e)
+#define KL_GPIO_AIRPORT_4 (KEYLARGO_GPIO_0+0x0f)
+
+/*
+ * Bits in feature control register. Those bits different for K2 are
+ * listed separately
+ */
+#define KL_MBCR_MB0_PCI_ENABLE 0x00000800 /* exist ? */
+#define KL_MBCR_MB0_IDE_ENABLE 0x00001000
+#define KL_MBCR_MB0_FLOPPY_ENABLE 0x00002000 /* exist ? */
+#define KL_MBCR_MB0_SOUND_ENABLE 0x00004000 /* hrm... */
+#define KL_MBCR_MB0_DEV_MASK 0x00007800
+#define KL_MBCR_MB0_DEV_POWER 0x00000400
+#define KL_MBCR_MB0_DEV_RESET 0x00000200
+#define KL_MBCR_MB0_ENABLE 0x00000100
+#define KL_MBCR_MB1_PCI_ENABLE 0x08000000 /* exist ? */
+#define KL_MBCR_MB1_IDE_ENABLE 0x10000000
+#define KL_MBCR_MB1_FLOPPY_ENABLE 0x20000000 /* exist ? */
+#define KL_MBCR_MB1_SOUND_ENABLE 0x40000000 /* hrm... */
+#define KL_MBCR_MB1_DEV_MASK 0x78000000
+#define KL_MBCR_MB1_DEV_POWER 0x04000000
+#define KL_MBCR_MB1_DEV_RESET 0x02000000
+#define KL_MBCR_MB1_ENABLE 0x01000000
+
+#define KL0_SCC_B_INTF_ENABLE 0x00000001 /* (KL Only) */
+#define KL0_SCC_A_INTF_ENABLE 0x00000002
+#define KL0_SCC_SLOWPCLK 0x00000004
+#define KL0_SCC_RESET 0x00000008
+#define KL0_SCCA_ENABLE 0x00000010
+#define KL0_SCCB_ENABLE 0x00000020
+#define KL0_SCC_CELL_ENABLE 0x00000040
+#define KL0_IRDA_HIGH_BAND 0x00000100 /* (KL Only) */
+#define KL0_IRDA_SOURCE2_SEL 0x00000200 /* (KL Only) */
+#define KL0_IRDA_SOURCE1_SEL 0x00000400 /* (KL Only) */
+#define KL0_PG_USB0_PMI_ENABLE 0x00000400 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_RESET 0x00000800 /* (KL Only) */
+#define KL0_PG_USB0_REF_SUSPEND_SEL 0x00000800 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_DEFAULT1 0x00001000 /* (KL Only) */
+#define KL0_PG_USB0_REF_SUSPEND 0x00001000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_DEFAULT0 0x00002000 /* (KL Only) */
+#define KL0_PG_USB0_PAD_SUSPEND 0x00002000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_FAST_CONNECT 0x00004000 /* (KL Only) */
+#define KL0_PG_USB1_PMI_ENABLE 0x00004000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_ENABLE 0x00008000 /* (KL Only) */
+#define KL0_PG_USB1_REF_SUSPEND_SEL 0x00008000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_CLK32_ENABLE 0x00010000 /* (KL Only) */
+#define KL0_PG_USB1_REF_SUSPEND 0x00010000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_CLK19_ENABLE 0x00020000 /* (KL Only) */
+#define KL0_PG_USB1_PAD_SUSPEND 0x00020000 /* (Pangea/Intrepid Only) */
+#define KL0_USB0_PAD_SUSPEND0 0x00040000
+#define KL0_USB0_PAD_SUSPEND1 0x00080000
+#define KL0_USB0_CELL_ENABLE 0x00100000
+#define KL0_USB1_PAD_SUSPEND0 0x00400000
+#define KL0_USB1_PAD_SUSPEND1 0x00800000
+#define KL0_USB1_CELL_ENABLE 0x01000000
+#define KL0_USB_REF_SUSPEND 0x10000000 /* (KL Only) */
+
+#define KL0_SERIAL_ENABLE (KL0_SCC_B_INTF_ENABLE | \
+ KL0_SCC_SLOWPCLK | \
+ KL0_SCC_CELL_ENABLE | KL0_SCCA_ENABLE)
+
+#define KL1_USB2_PMI_ENABLE 0x00000001 /* Intrepid only */
+#define KL1_AUDIO_SEL_22MCLK 0x00000002 /* KL/Pangea only */
+#define KL1_USB2_REF_SUSPEND_SEL 0x00000002 /* Intrepid only */
+#define KL1_USB2_REF_SUSPEND 0x00000004 /* Intrepid only */
+#define KL1_AUDIO_CLK_ENABLE_BIT 0x00000008 /* KL/Pangea only */
+#define KL1_USB2_PAD_SUSPEND_SEL 0x00000008 /* Intrepid only */
+#define KL1_USB2_PAD_SUSPEND0 0x00000010 /* Intrepid only */
+#define KL1_AUDIO_CLK_OUT_ENABLE 0x00000020 /* KL/Pangea only */
+#define KL1_USB2_PAD_SUSPEND1 0x00000020 /* Intrepid only */
+#define KL1_AUDIO_CELL_ENABLE 0x00000040 /* KL/Pangea only */
+#define KL1_USB2_CELL_ENABLE 0x00000040 /* Intrepid only */
+#define KL1_AUDIO_CHOOSE 0x00000080 /* KL/Pangea only */
+#define KL1_I2S0_CHOOSE 0x00000200 /* KL Only */
+#define KL1_I2S0_CELL_ENABLE 0x00000400
+#define KL1_I2S0_CLK_ENABLE_BIT 0x00001000
+#define KL1_I2S0_ENABLE 0x00002000
+#define KL1_I2S1_CELL_ENABLE 0x00020000
+#define KL1_I2S1_CLK_ENABLE_BIT 0x00080000
+#define KL1_I2S1_ENABLE 0x00100000
+#define KL1_EIDE0_ENABLE 0x00800000 /* KL/Intrepid Only */
+#define KL1_EIDE0_RESET_N 0x01000000 /* KL/Intrepid Only */
+#define KL1_EIDE1_ENABLE 0x04000000 /* KL Only */
+#define KL1_EIDE1_RESET_N 0x08000000 /* KL Only */
+#define KL1_UIDE_ENABLE 0x20000000 /* KL/Pangea Only */
+#define KL1_UIDE_RESET_N 0x40000000 /* KL/Pangea Only */
+
+#define KL2_IOBUS_ENABLE 0x00000002
+#define KL2_SLEEP_STATE_BIT 0x00000100 /* KL Only */
+#define KL2_PG_STOP_ALL_CLOCKS 0x00000100 /* Pangea Only */
+#define KL2_MPIC_ENABLE 0x00020000
+#define KL2_CARDSLOT_RESET 0x00040000 /* Pangea/Intrepid Only */
+#define KL2_ALT_DATA_OUT 0x02000000 /* KL Only ??? */
+#define KL2_MEM_IS_BIG 0x04000000
+#define KL2_CARDSEL_16 0x08000000
+
+#define KL3_SHUTDOWN_PLL_TOTAL 0x00000001 /* KL/Pangea only */
+#define KL3_SHUTDOWN_PLLKW6 0x00000002 /* KL/Pangea only */
+#define KL3_IT_SHUTDOWN_PLL3 0x00000002 /* Intrepid only */
+#define KL3_SHUTDOWN_PLLKW4 0x00000004 /* KL/Pangea only */
+#define KL3_IT_SHUTDOWN_PLL2 0x00000004 /* Intrepid only */
+#define KL3_SHUTDOWN_PLLKW35 0x00000008 /* KL/Pangea only */
+#define KL3_IT_SHUTDOWN_PLL1 0x00000008 /* Intrepid only */
+#define KL3_SHUTDOWN_PLLKW12 0x00000010 /* KL Only */
+#define KL3_IT_ENABLE_PLL3_SHUTDOWN 0x00000010 /* Intrepid only */
+#define KL3_PLL_RESET 0x00000020 /* KL/Pangea only */
+#define KL3_IT_ENABLE_PLL2_SHUTDOWN 0x00000020 /* Intrepid only */
+#define KL3_IT_ENABLE_PLL1_SHUTDOWN 0x00000010 /* Intrepid only */
+#define KL3_SHUTDOWN_PLL2X 0x00000080 /* KL Only */
+#define KL3_CLK66_ENABLE 0x00000100 /* KL Only */
+#define KL3_CLK49_ENABLE 0x00000200
+#define KL3_CLK45_ENABLE 0x00000400
+#define KL3_CLK31_ENABLE 0x00000800 /* KL/Pangea only */
+#define KL3_TIMER_CLK18_ENABLE 0x00001000
+#define KL3_I2S1_CLK18_ENABLE 0x00002000
+#define KL3_I2S0_CLK18_ENABLE 0x00004000
+#define KL3_VIA_CLK16_ENABLE 0x00008000 /* KL/Pangea only */
+#define KL3_IT_VIA_CLK32_ENABLE 0x00008000 /* Intrepid only */
+#define KL3_STOPPING33_ENABLED 0x00080000 /* KL Only */
+#define KL3_PG_PLL_ENABLE_TEST 0x00080000 /* Pangea Only */
+
+/* Intrepid USB bus 2, port 0,1 */
+#define KL3_IT_PORT_WAKEUP_ENABLE(p) (0x00080000 << ((p)<<3))
+#define KL3_IT_PORT_RESUME_WAKE_EN(p) (0x00040000 << ((p)<<3))
+#define KL3_IT_PORT_CONNECT_WAKE_EN(p) (0x00020000 << ((p)<<3))
+#define KL3_IT_PORT_DISCONNECT_WAKE_EN(p) (0x00010000 << ((p)<<3))
+#define KL3_IT_PORT_RESUME_STAT(p) (0x00300000 << ((p)<<3))
+#define KL3_IT_PORT_CONNECT_STAT(p) (0x00200000 << ((p)<<3))
+#define KL3_IT_PORT_DISCONNECT_STAT(p) (0x00100000 << ((p)<<3))
+
+/* Port 0,1 : bus 0, port 2,3 : bus 1 */
+#define KL4_PORT_WAKEUP_ENABLE(p) (0x00000008 << ((p)<<3))
+#define KL4_PORT_RESUME_WAKE_EN(p) (0x00000004 << ((p)<<3))
+#define KL4_PORT_CONNECT_WAKE_EN(p) (0x00000002 << ((p)<<3))
+#define KL4_PORT_DISCONNECT_WAKE_EN(p) (0x00000001 << ((p)<<3))
+#define KL4_PORT_RESUME_STAT(p) (0x00000040 << ((p)<<3))
+#define KL4_PORT_CONNECT_STAT(p) (0x00000020 << ((p)<<3))
+#define KL4_PORT_DISCONNECT_STAT(p) (0x00000010 << ((p)<<3))
+
+/* Pangea and Intrepid only */
+#define KL5_VIA_USE_CLK31 0000000001 /* Pangea Only */
+#define KL5_SCC_USE_CLK31 0x00000002 /* Pangea Only */
+#define KL5_PWM_CLK32_EN 0x00000004
+#define KL5_CLK3_68_EN 0x00000010
+#define KL5_CLK32_EN 0x00000020
+
+
+/* K2 definitions */
+#define K2_FCR0_USB0_SWRESET 0x00200000
+#define K2_FCR0_USB1_SWRESET 0x02000000
+#define K2_FCR0_RING_PME_DISABLE 0x08000000
+
+#define K2_FCR1_PCI1_BUS_RESET_N 0x00000010
+#define K2_FCR1_PCI1_SLEEP_RESET_EN 0x00000020
+#define K2_FCR1_I2S0_CELL_ENABLE 0x00000400
+#define K2_FCR1_I2S0_RESET 0x00000800
+#define K2_FCR1_I2S0_CLK_ENABLE_BIT 0x00001000
+#define K2_FCR1_I2S0_ENABLE 0x00002000
+#define K2_FCR1_PCI1_CLK_ENABLE 0x00004000
+#define K2_FCR1_FW_CLK_ENABLE 0x00008000
+#define K2_FCR1_FW_RESET_N 0x00010000
+#define K2_FCR1_I2S1_CELL_ENABLE 0x00020000
+#define K2_FCR1_I2S1_CLK_ENABLE_BIT 0x00080000
+#define K2_FCR1_I2S1_ENABLE 0x00100000
+#define K2_FCR1_GMAC_CLK_ENABLE 0x00400000
+#define K2_FCR1_GMAC_POWER_DOWN 0x00800000
+#define K2_FCR1_GMAC_RESET_N 0x01000000
+#define K2_FCR1_SATA_CLK_ENABLE 0x02000000
+#define K2_FCR1_SATA_POWER_DOWN 0x04000000
+#define K2_FCR1_SATA_RESET_N 0x08000000
+#define K2_FCR1_UATA_CLK_ENABLE 0x10000000
+#define K2_FCR1_UATA_RESET_N 0x40000000
+#define K2_FCR1_UATA_CHOOSE_CLK66 0x80000000
+
+/* Shasta definitions */
+#define SH_FCR1_I2S2_CELL_ENABLE 0x00000010
+#define SH_FCR1_I2S2_CLK_ENABLE_BIT 0x00000040
+#define SH_FCR1_I2S2_ENABLE 0x00000080
+#define SH_FCR3_I2S2_CLK18_ENABLE 0x00008000
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KEYLARGO_H */
diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
new file mode 100644
index 000000000..6fd2b4d48
--- /dev/null
+++ b/arch/powerpc/include/asm/kfence.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * powerpc KFENCE support.
+ *
+ * Copyright (C) 2020 CS GROUP France
+ */
+
+#ifndef __ASM_POWERPC_KFENCE_H
+#define __ASM_POWERPC_KFENCE_H
+
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_PPC64_ELF_ABI_V1
+#define ARCH_FUNC_PREFIX "."
+#endif
+
+static inline bool arch_kfence_init_pool(void)
+{
+ return true;
+}
+
+#ifdef CONFIG_PPC64
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ struct page *page = virt_to_page(addr);
+
+ __kernel_map_pages(page, 1, !protect);
+
+ return true;
+}
+#else
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ pte_t *kpte = virt_to_kpte(addr);
+
+ if (protect) {
+ pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ } else {
+ pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
+ }
+
+ return true;
+}
+#endif
+
+#endif /* __ASM_POWERPC_KFENCE_H */
diff --git a/arch/powerpc/include/asm/kgdb.h b/arch/powerpc/include/asm/kgdb.h
new file mode 100644
index 000000000..715c18b75
--- /dev/null
+++ b/arch/powerpc/include/asm/kgdb.h
@@ -0,0 +1,67 @@
+/*
+ * The PowerPC (32/64) specific defines / externs for KGDB. Based on
+ * the previous 32bit and 64bit specific files, which had the following
+ * copyrights:
+ *
+ * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com)
+ * PPC Mods (C) 2004 Tom Rini (trini@mvista.com)
+ * PPC Mods (C) 2003 John Whitney (john.whitney@timesys.com)
+ * PPC Mods (C) 1998 Michael Tesch (tesch@cs.wisc.edu)
+ *
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Author: Tom Rini <trini@kernel.crashing.org>
+ *
+ * 2006 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifdef __KERNEL__
+#ifndef __POWERPC_KGDB_H__
+#define __POWERPC_KGDB_H__
+
+#ifndef __ASSEMBLY__
+
+#define BREAK_INSTR_SIZE 4
+#define BUFMAX ((NUMREGBYTES * 2) + 512)
+#define OUTBUFMAX ((NUMREGBYTES * 2) + 512)
+
+#define BREAK_INSTR 0x7d821008 /* twge r2, r2 */
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm(stringify_in_c(.long BREAK_INSTR));
+}
+#define CACHE_FLUSH_IS_SAFE 1
+#define DBG_MAX_REG_NUM 70
+
+/* The number bytes of registers we have to save depends on a few
+ * things. For 64bit we default to not including vector registers and
+ * vector state registers. */
+#ifdef CONFIG_PPC64
+/*
+ * 64 bit (8 byte) registers:
+ * 32 gpr, 32 fpr, nip, msr, link, ctr
+ * 32 bit (4 byte) registers:
+ * ccr, xer, fpscr
+ */
+#define NUMREGBYTES ((68 * 8) + (3 * 4))
+#define NUMCRITREGBYTES 184
+#else /* CONFIG_PPC32 */
+/* On non-E500 family PPC32 we determine the size by picking the last
+ * register we need, but on E500 we skip sections so we list what we
+ * need to store, and add it up. */
+#ifndef CONFIG_PPC_E500
+#define MAXREG (PT_FPSCR+1)
+#else
+/* 32 GPRs (8 bytes), nip, msr, ccr, link, ctr, xer, acc (8 bytes), spefscr*/
+#define MAXREG ((32*2)+6+2+1)
+#endif
+#define NUMREGBYTES (MAXREG * sizeof(int))
+/* CR/LR, R1, R2, R13-R31 inclusive. */
+#define NUMCRITREGBYTES (23 * sizeof(int))
+#endif /* 32/64 */
+#endif /* !(__ASSEMBLY__) */
+#endif /* !__POWERPC_KGDB_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
new file mode 100644
index 000000000..c8e4b4fd4
--- /dev/null
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_KPROBES_H
+#define _ASM_POWERPC_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+
+#ifdef __KERNEL__
+/*
+ * Kernel Probes (KProbes)
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+ * Probes initial implementation ( includes suggestions from
+ * Rusty Russell).
+ * 2004-Nov Modified for PPC64 by Ananth N Mavinakayanahalli
+ * <ananth@in.ibm.com>
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+#include <linux/module.h>
+#include <asm/probes.h>
+#include <asm/code-patching.h>
+
+#ifdef CONFIG_KPROBES
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+
+struct pt_regs;
+struct kprobe;
+
+typedef u32 kprobe_opcode_t;
+
+extern kprobe_opcode_t optinsn_slot;
+
+/* Optinsn template address */
+extern kprobe_opcode_t optprobe_template_entry[];
+extern kprobe_opcode_t optprobe_template_op_address[];
+extern kprobe_opcode_t optprobe_template_call_handler[];
+extern kprobe_opcode_t optprobe_template_insn[];
+extern kprobe_opcode_t optprobe_template_call_emulate[];
+extern kprobe_opcode_t optprobe_template_ret[];
+extern kprobe_opcode_t optprobe_template_end[];
+
+/* Fixed instruction size for powerpc */
+#define MAX_INSN_SIZE 2
+#define MAX_OPTIMIZED_LENGTH sizeof(kprobe_opcode_t) /* 4 bytes */
+#define MAX_OPTINSN_SIZE (optprobe_template_end - optprobe_template_entry)
+#define RELATIVEJUMP_SIZE sizeof(kprobe_opcode_t) /* 4 bytes */
+
+#define flush_insn_slot(p) do { } while (0)
+#define kretprobe_blacklist_size 0
+
+void __kretprobe_trampoline(void);
+extern void arch_remove_kprobe(struct kprobe *p);
+
+/* Architecture specific copy of original instruction */
+struct arch_specific_insn {
+ /* copy of original instruction */
+ kprobe_opcode_t *insn;
+ /*
+ * Set in kprobes code, initially to 0. If the instruction can be
+ * eumulated, this is set to 1, if not, to -1.
+ */
+ int boostable;
+};
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+ unsigned long saved_msr;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned long kprobe_status;
+ unsigned long kprobe_saved_msr;
+ struct prev_kprobe prev_kprobe;
+};
+
+struct arch_optimized_insn {
+ kprobe_opcode_t copied_insn[1];
+ /* detour buffer */
+ kprobe_opcode_t *insn;
+};
+
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+extern int kprobe_handler(struct pt_regs *regs);
+extern int kprobe_post_handler(struct pt_regs *regs);
+#else
+static inline int kprobe_handler(struct pt_regs *regs) { return 0; }
+static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; }
+#endif /* CONFIG_KPROBES */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KPROBES_H */
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
new file mode 100644
index 000000000..d751ddd08
--- /dev/null
+++ b/arch/powerpc/include/asm/kup.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_KUP_H_
+#define _ASM_POWERPC_KUP_H_
+
+#define KUAP_READ 1
+#define KUAP_WRITE 2
+#define KUAP_READ_WRITE (KUAP_READ | KUAP_WRITE)
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/book3s/64/kup.h>
+#endif
+
+#ifdef CONFIG_PPC_8xx
+#include <asm/nohash/32/kup-8xx.h>
+#endif
+
+#ifdef CONFIG_BOOKE_OR_40x
+#include <asm/nohash/kup-booke.h>
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_32
+#include <asm/book3s/32/kup.h>
+#endif
+
+#ifdef __ASSEMBLY__
+#ifndef CONFIG_PPC_KUAP
+.macro kuap_check_amr gpr1, gpr2
+.endm
+
+#endif
+
+#else /* !__ASSEMBLY__ */
+
+extern bool disable_kuep;
+extern bool disable_kuap;
+
+#include <linux/pgtable.h>
+
+void setup_kup(void);
+void setup_kuep(bool disabled);
+
+#ifdef CONFIG_PPC_KUAP
+void setup_kuap(bool disabled);
+#else
+static inline void setup_kuap(bool disabled) { }
+
+static __always_inline bool kuap_is_disabled(void) { return true; }
+
+static inline bool
+__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ return false;
+}
+
+static inline void __kuap_lock(void) { }
+static inline void __kuap_save_and_lock(struct pt_regs *regs) { }
+static inline void kuap_user_restore(struct pt_regs *regs) { }
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { }
+
+static inline unsigned long __kuap_get_and_assert_locked(void)
+{
+ return 0;
+}
+
+/*
+ * book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
+ * the L1D cache after user accesses. Only include the empty stubs for other
+ * platforms.
+ */
+#ifndef CONFIG_PPC_BOOK3S_64
+static inline void __allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir) { }
+static inline void __prevent_user_access(unsigned long dir) { }
+static inline unsigned long __prevent_user_access_return(void) { return 0UL; }
+static inline void __restore_user_access(unsigned long flags) { }
+#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_KUAP */
+
+static __always_inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ if (kuap_is_disabled())
+ return false;
+
+ return __bad_kuap_fault(regs, address, is_write);
+}
+
+static __always_inline void kuap_assert_locked(void)
+{
+ if (kuap_is_disabled())
+ return;
+
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ __kuap_get_and_assert_locked();
+}
+
+static __always_inline void kuap_lock(void)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __kuap_lock();
+}
+
+static __always_inline void kuap_save_and_lock(struct pt_regs *regs)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __kuap_save_and_lock(regs);
+}
+
+static __always_inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __kuap_kernel_restore(regs, amr);
+}
+
+static __always_inline unsigned long kuap_get_and_assert_locked(void)
+{
+ if (kuap_is_disabled())
+ return 0;
+
+ return __kuap_get_and_assert_locked();
+}
+
+#ifndef CONFIG_PPC_BOOK3S_64
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __allow_user_access(to, from, size, dir);
+}
+
+static __always_inline void prevent_user_access(unsigned long dir)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __prevent_user_access(dir);
+}
+
+static __always_inline unsigned long prevent_user_access_return(void)
+{
+ if (kuap_is_disabled())
+ return 0;
+
+ return __prevent_user_access_return();
+}
+
+static __always_inline void restore_user_access(unsigned long flags)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __restore_user_access(flags);
+}
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+static __always_inline void allow_read_from_user(const void __user *from, unsigned long size)
+{
+ barrier_nospec();
+ allow_user_access(NULL, from, size, KUAP_READ);
+}
+
+static __always_inline void allow_write_to_user(void __user *to, unsigned long size)
+{
+ allow_user_access(to, NULL, size, KUAP_WRITE);
+}
+
+static __always_inline void allow_read_write_user(void __user *to, const void __user *from,
+ unsigned long size)
+{
+ barrier_nospec();
+ allow_user_access(to, from, size, KUAP_READ_WRITE);
+}
+
+static __always_inline void prevent_read_from_user(const void __user *from, unsigned long size)
+{
+ prevent_user_access(KUAP_READ);
+}
+
+static __always_inline void prevent_write_to_user(void __user *to, unsigned long size)
+{
+ prevent_user_access(KUAP_WRITE);
+}
+
+static __always_inline void prevent_read_write_user(void __user *to, const void __user *from,
+ unsigned long size)
+{
+ prevent_user_access(KUAP_READ_WRITE);
+}
+
+static __always_inline void prevent_current_access_user(void)
+{
+ prevent_user_access(KUAP_READ_WRITE);
+}
+
+static __always_inline void prevent_current_read_from_user(void)
+{
+ prevent_user_access(KUAP_READ);
+}
+
+static __always_inline void prevent_current_write_to_user(void)
+{
+ prevent_user_access(KUAP_WRITE);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_KUAP_H_ */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
new file mode 100644
index 000000000..d68d71987
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_ASM_H__
+#define __POWERPC_KVM_ASM_H__
+
+#ifdef __ASSEMBLY__
+#ifdef CONFIG_64BIT
+#define PPC_STD(sreg, offset, areg) std sreg, (offset)(areg)
+#define PPC_LD(treg, offset, areg) ld treg, (offset)(areg)
+#else
+#define PPC_STD(sreg, offset, areg) stw sreg, (offset+4)(areg)
+#define PPC_LD(treg, offset, areg) lwz treg, (offset+4)(areg)
+#endif
+#endif
+
+/* IVPR must be 64KiB-aligned. */
+#define VCPU_SIZE_ORDER 4
+#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
+#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG)
+
+#define BOOKE_INTERRUPT_CRITICAL 0
+#define BOOKE_INTERRUPT_MACHINE_CHECK 1
+#define BOOKE_INTERRUPT_DATA_STORAGE 2
+#define BOOKE_INTERRUPT_INST_STORAGE 3
+#define BOOKE_INTERRUPT_EXTERNAL 4
+#define BOOKE_INTERRUPT_ALIGNMENT 5
+#define BOOKE_INTERRUPT_PROGRAM 6
+#define BOOKE_INTERRUPT_FP_UNAVAIL 7
+#define BOOKE_INTERRUPT_SYSCALL 8
+#define BOOKE_INTERRUPT_AP_UNAVAIL 9
+#define BOOKE_INTERRUPT_DECREMENTER 10
+#define BOOKE_INTERRUPT_FIT 11
+#define BOOKE_INTERRUPT_WATCHDOG 12
+#define BOOKE_INTERRUPT_DTLB_MISS 13
+#define BOOKE_INTERRUPT_ITLB_MISS 14
+#define BOOKE_INTERRUPT_DEBUG 15
+
+/* E500 */
+#ifdef CONFIG_SPE_POSSIBLE
+#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
+#define BOOKE_INTERRUPT_SPE_FP_DATA 33
+#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
+#endif
+
+#ifdef CONFIG_PPC_E500MC
+#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 32
+#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 33
+#endif
+
+#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
+#define BOOKE_INTERRUPT_DOORBELL 36
+#define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37
+
+/* booke_hv */
+#define BOOKE_INTERRUPT_GUEST_DBELL 38
+#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39
+#define BOOKE_INTERRUPT_HV_SYSCALL 40
+#define BOOKE_INTERRUPT_HV_PRIV 41
+#define BOOKE_INTERRUPT_LRAT_ERROR 42
+
+/* book3s */
+
+#define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100
+#define BOOK3S_INTERRUPT_MACHINE_CHECK 0x200
+#define BOOK3S_INTERRUPT_DATA_STORAGE 0x300
+#define BOOK3S_INTERRUPT_DATA_SEGMENT 0x380
+#define BOOK3S_INTERRUPT_INST_STORAGE 0x400
+#define BOOK3S_INTERRUPT_INST_SEGMENT 0x480
+#define BOOK3S_INTERRUPT_EXTERNAL 0x500
+#define BOOK3S_INTERRUPT_EXTERNAL_HV 0x502
+#define BOOK3S_INTERRUPT_ALIGNMENT 0x600
+#define BOOK3S_INTERRUPT_PROGRAM 0x700
+#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
+#define BOOK3S_INTERRUPT_DECREMENTER 0x900
+#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
+#define BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER 0x1980
+#define BOOK3S_INTERRUPT_DOORBELL 0xa00
+#define BOOK3S_INTERRUPT_SYSCALL 0xc00
+#define BOOK3S_INTERRUPT_TRACE 0xd00
+#define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00
+#define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20
+#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
+#define BOOK3S_INTERRUPT_HMI 0xe60
+#define BOOK3S_INTERRUPT_H_DOORBELL 0xe80
+#define BOOK3S_INTERRUPT_H_VIRT 0xea0
+#define BOOK3S_INTERRUPT_PERFMON 0xf00
+#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
+#define BOOK3S_INTERRUPT_VSX 0xf40
+#define BOOK3S_INTERRUPT_FAC_UNAVAIL 0xf60
+#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80
+
+/* book3s_hv */
+
+#define BOOK3S_INTERRUPT_HV_SOFTPATCH 0x1500
+
+/*
+ * Special trap used to indicate to host that this is a
+ * passthrough interrupt that could not be handled
+ * completely in the guest.
+ */
+#define BOOK3S_INTERRUPT_HV_RM_HARD 0x5555
+
+#define BOOK3S_IRQPRIO_SYSTEM_RESET 0
+#define BOOK3S_IRQPRIO_DATA_SEGMENT 1
+#define BOOK3S_IRQPRIO_INST_SEGMENT 2
+#define BOOK3S_IRQPRIO_DATA_STORAGE 3
+#define BOOK3S_IRQPRIO_INST_STORAGE 4
+#define BOOK3S_IRQPRIO_ALIGNMENT 5
+#define BOOK3S_IRQPRIO_PROGRAM 6
+#define BOOK3S_IRQPRIO_FP_UNAVAIL 7
+#define BOOK3S_IRQPRIO_ALTIVEC 8
+#define BOOK3S_IRQPRIO_VSX 9
+#define BOOK3S_IRQPRIO_FAC_UNAVAIL 10
+#define BOOK3S_IRQPRIO_SYSCALL 11
+#define BOOK3S_IRQPRIO_MACHINE_CHECK 12
+#define BOOK3S_IRQPRIO_DEBUG 13
+#define BOOK3S_IRQPRIO_EXTERNAL 14
+#define BOOK3S_IRQPRIO_DECREMENTER 15
+#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 16
+#define BOOK3S_IRQPRIO_MAX 17
+
+#define BOOK3S_HFLAG_DCBZ32 0x1
+#define BOOK3S_HFLAG_SLB 0x2
+#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4
+#define BOOK3S_HFLAG_NATIVE_PS 0x8
+#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10
+#define BOOK3S_HFLAG_NEW_TLBIE 0x20
+#define BOOK3S_HFLAG_SPLIT_HACK 0x40
+
+#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
+#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
+#define RESUME_FLAG_ARCH1 (1<<2)
+#define RESUME_FLAG_ARCH2 (1<<3)
+
+#define RESUME_GUEST 0
+#define RESUME_GUEST_NV RESUME_FLAG_NV
+#define RESUME_HOST RESUME_FLAG_HOST
+#define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV)
+
+#define KVM_GUEST_MODE_NONE 0
+#define KVM_GUEST_MODE_GUEST 1
+#define KVM_GUEST_MODE_SKIP 2
+#define KVM_GUEST_MODE_GUEST_HV 3
+#define KVM_GUEST_MODE_HOST_HV 4
+#define KVM_GUEST_MODE_HV_P9 5 /* ISA >= v3.0 path */
+
+#define KVM_INST_FETCH_FAILED -1
+
+/* Extract PO and XOP opcode fields */
+#define PO_XOP_OPCODE_MASK 0xfc0007fe
+
+#endif /* __POWERPC_KVM_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
new file mode 100644
index 000000000..bbf5e2c5f
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -0,0 +1,486 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_H__
+#define __ASM_KVM_BOOK3S_H__
+
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_book3s_asm.h>
+
+struct kvmppc_bat {
+ u64 raw;
+ u32 bepi;
+ u32 bepi_mask;
+ u32 brpn;
+ u8 wimg;
+ u8 pp;
+ bool vs : 1;
+ bool vp : 1;
+};
+
+struct kvmppc_sid_map {
+ u64 guest_vsid;
+ u64 guest_esid;
+ u64 host_vsid;
+ bool valid : 1;
+};
+
+#define SID_MAP_BITS 9
+#define SID_MAP_NUM (1 << SID_MAP_BITS)
+#define SID_MAP_MASK (SID_MAP_NUM - 1)
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#define SID_CONTEXTS 1
+#else
+#define SID_CONTEXTS 128
+#define VSID_POOL_SIZE (SID_CONTEXTS * 16)
+#endif
+
+struct hpte_cache {
+ struct hlist_node list_pte;
+ struct hlist_node list_pte_long;
+ struct hlist_node list_vpte;
+ struct hlist_node list_vpte_long;
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct hlist_node list_vpte_64k;
+#endif
+ struct rcu_head rcu_head;
+ u64 host_vpn;
+ u64 pfn;
+ ulong slot;
+ struct kvmppc_pte pte;
+ int pagesize;
+};
+
+/*
+ * Struct for a virtual core.
+ * Note: entry_exit_map combines a bitmap of threads that have entered
+ * in the bottom 8 bits and a bitmap of threads that have exited in the
+ * next 8 bits. This is so that we can atomically set the entry bit
+ * iff the exit map is 0 without taking a lock.
+ */
+struct kvmppc_vcore {
+ int n_runnable;
+ int num_threads;
+ int entry_exit_map;
+ int napping_threads;
+ int first_vcpuid;
+ u16 pcpu;
+ u16 last_cpu;
+ u8 vcore_state;
+ u8 in_guest;
+ struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
+ struct list_head preempt_list;
+ spinlock_t lock;
+ struct rcuwait wait;
+ spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
+ u64 stolen_tb;
+ u64 preempt_tb;
+ struct kvm_vcpu *runner;
+ struct kvm *kvm;
+ u64 tb_offset; /* guest timebase - host timebase */
+ u64 tb_offset_applied; /* timebase offset currently in force */
+ ulong lpcr;
+ u32 arch_compat;
+ ulong pcr;
+ ulong dpdes; /* doorbell state (POWER8) */
+ ulong vtb; /* virtual timebase */
+ ulong conferring_threads;
+ unsigned int halt_poll_ns;
+ atomic_t online_count;
+};
+
+struct kvmppc_vcpu_book3s {
+ struct kvmppc_sid_map sid_map[SID_MAP_NUM];
+ struct {
+ u64 esid;
+ u64 vsid;
+ } slb_shadow[64];
+ u8 slb_shadow_max;
+ struct kvmppc_bat ibat[8];
+ struct kvmppc_bat dbat[8];
+ u64 hid[6];
+ u64 gqr[8];
+ u64 sdr1;
+ u64 hior;
+ u64 msr_mask;
+ u64 vtb;
+#ifdef CONFIG_PPC_BOOK3S_32
+ u32 vsid_pool[VSID_POOL_SIZE];
+ u32 vsid_next;
+#else
+ u64 proto_vsid_first;
+ u64 proto_vsid_max;
+ u64 proto_vsid_next;
+#endif
+ int context_id[SID_CONTEXTS];
+
+ bool hior_explicit; /* HIOR is set by ioctl, not PVR */
+
+ struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
+ struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
+ struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
+ struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
+#endif
+ int hpte_cache_count;
+ spinlock_t mmu_lock;
+};
+
+#define VSID_REAL 0x07ffffffffc00000ULL
+#define VSID_BAT 0x07ffffffffb00000ULL
+#define VSID_64K 0x0800000000000000ULL
+#define VSID_1T 0x1000000000000000ULL
+#define VSID_REAL_DR 0x2000000000000000ULL
+#define VSID_REAL_IR 0x4000000000000000ULL
+#define VSID_PR 0x8000000000000000ULL
+
+extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
+extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
+extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
+extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
+extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
+extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
+ bool iswrite);
+extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
+extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
+extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
+extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
+extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
+ unsigned long addr, unsigned long status);
+extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
+ unsigned long slb_v, unsigned long valid);
+extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
+ unsigned long gpa, gva_t ea, int is_store);
+
+extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
+extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
+extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
+extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
+extern int kvmppc_mmu_hpte_sysinit(void);
+extern void kvmppc_mmu_hpte_sysexit(void);
+extern int kvmppc_mmu_hv_init(void);
+extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
+
+extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
+ unsigned long ea, unsigned long dsisr);
+extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
+ gva_t eaddr, void *to, void *from,
+ unsigned long n);
+extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
+ void *to, unsigned long n);
+extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
+ void *from, unsigned long n);
+extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, u64 root,
+ u64 *pte_ret_p);
+extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, u64 table,
+ int table_index, u64 *pte_ret_p);
+extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, bool data, bool iswrite);
+extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
+ unsigned int pshift, unsigned int lpid);
+extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
+ unsigned int shift,
+ const struct kvm_memory_slot *memslot,
+ unsigned int lpid);
+extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
+ bool writing, unsigned long gpa,
+ unsigned int lpid);
+extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
+ unsigned long gpa,
+ struct kvm_memory_slot *memslot,
+ bool writing, bool kvm_ro,
+ pte_t *inserted_pte, unsigned int *levelp);
+extern int kvmppc_init_vm_radix(struct kvm *kvm);
+extern void kvmppc_free_radix(struct kvm *kvm);
+extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
+ unsigned int lpid);
+extern int kvmppc_radix_init(void);
+extern void kvmppc_radix_exit(void);
+extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn);
+extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn);
+extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn);
+extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
+ struct kvm_memory_slot *memslot, unsigned long *map);
+extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot);
+extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
+
+/* XXX remove this export when load_last_inst() is generic */
+extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
+extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
+extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
+ unsigned int vec);
+extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
+extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
+extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
+ bool upper, u32 val);
+extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
+extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
+extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
+ bool writing, bool *writable);
+extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
+ unsigned long *rmap, long pte_index, int realmode);
+extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
+ unsigned long gfn, unsigned long psize);
+extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
+ unsigned long pte_index);
+void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
+ unsigned long pte_index);
+extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
+ unsigned long *nb_ret);
+extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
+ unsigned long gpa, bool dirty);
+extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel,
+ pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
+extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn,
+ unsigned long *hpret);
+extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
+ struct kvm_memory_slot *memslot, unsigned long *map);
+extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
+ struct kvm_memory_slot *memslot,
+ unsigned long *map);
+extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm,
+ unsigned long lpcr);
+extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
+ unsigned long mask);
+extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
+
+extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
+extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
+extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
+
+extern void kvmppc_entry_trampoline(void);
+extern void kvmppc_hv_entry_trampoline(void);
+extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
+extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
+extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
+extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
+extern int kvmppc_hcall_impl_pr(unsigned long cmd);
+extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
+extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
+
+long kvmppc_read_intr(void);
+void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
+void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
+void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
+void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
+void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
+#else
+static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
+#endif
+
+long kvmhv_nested_init(void);
+void kvmhv_nested_exit(void);
+void kvmhv_vm_nested_init(struct kvm *kvm);
+long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
+long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
+void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
+void kvmhv_release_all_nested(struct kvm *kvm);
+long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
+long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
+long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end);
+int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
+ u64 time_limit, unsigned long lpcr);
+void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
+void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
+ struct hv_guest_state *hr);
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
+
+void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
+
+extern int kvm_irq_bypass;
+
+static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.book3s;
+}
+
+/* Also add subarch specific defines */
+
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
+#include <asm/kvm_book3s_32.h>
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/kvm_book3s_64.h>
+#endif
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+ vcpu->arch.regs.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+ return vcpu->arch.regs.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.regs.ccr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.regs.ccr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.regs.xer = val;
+}
+
+static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.regs.xer;
+}
+
+static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.regs.ctr = val;
+}
+
+static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.regs.ctr;
+}
+
+static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.regs.link = val;
+}
+
+static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.regs.link;
+}
+
+static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.regs.nip = val;
+}
+
+static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.regs.nip;
+}
+
+static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
+static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
+{
+ return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
+}
+
+static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault_dar;
+}
+
+/* Expiry time of vcpu DEC relative to host TB */
+static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.dec_expires - vcpu->arch.vcore->tb_offset;
+}
+
+static inline bool is_kvmppc_resume_guest(int r)
+{
+ return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
+}
+
+static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
+static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
+{
+ /* Only PR KVM supports the magic page */
+ return !is_kvmppc_hv_enabled(vcpu->kvm);
+}
+
+extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
+extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
+
+/* Magic register values loaded into r3 and r4 before the 'sc' assembly
+ * instruction for the OSI hypercalls */
+#define OSI_SC_MAGIC_R3 0x113724FA
+#define OSI_SC_MAGIC_R4 0x77810F9B
+
+#define INS_DCBZ 0x7c0007ec
+/* TO = 31 for unconditional trap */
+#define INS_TW 0x7fe00008
+
+#define SPLIT_HACK_MASK 0xff000000
+#define SPLIT_HACK_OFFS 0xfb000000
+
+/*
+ * This packs a VCPU ID from the [0..KVM_MAX_VCPU_IDS) space down to the
+ * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
+ * (but not its actual threading mode, which is not available) to avoid
+ * collisions.
+ *
+ * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
+ * 0) unchanged: if the guest is filling each VCORE completely then it will be
+ * using consecutive IDs and it will fill the space without any packing.
+ *
+ * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
+ * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
+ * added to avoid collisions.
+ *
+ * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
+ * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
+ * can be safely packed into the second half of each VCORE by adding an offset
+ * of (stride / 2).
+ *
+ * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
+ * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
+ * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
+ *
+ * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
+ * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
+ * must be free to use.
+ *
+ * (The offsets for each block are stored in block_offsets[], indexed by the
+ * block number if the stride is 8. For cases where the guest's stride is less
+ * than 8, we can re-use the block_offsets array by multiplying the block
+ * number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
+ */
+static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
+{
+ const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
+ int stride = kvm->arch.emul_smt_mode;
+ int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
+ u32 packed_id;
+
+ if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
+ return 0;
+ packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
+ if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
+ return 0;
+ return packed_id;
+}
+
+#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
new file mode 100644
index 000000000..e9d2e8463
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright SUSE Linux Products GmbH 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_32_H__
+#define __ASM_KVM_BOOK3S_32_H__
+
+static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.shadow_vcpu;
+}
+
+static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
+{
+}
+
+#define PTE_SIZE 12
+#define VSID_ALL 0
+#define SR_INVALID 0x00000001 /* VSID 1 should always be unused */
+#define SR_KP 0x20000000
+#define PTE_V 0x80000000
+#define PTE_SEC 0x00000040
+#define PTE_M 0x00000010
+#define PTE_R 0x00000100
+#define PTE_C 0x00000080
+
+#define SID_SHIFT 28
+#define ESID_MASK 0xf0000000
+#define VSID_MASK 0x00fffffff0000000ULL
+#define VPN_SHIFT 12
+
+#endif /* __ASM_KVM_BOOK3S_32_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
new file mode 100644
index 000000000..d49065af0
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -0,0 +1,682 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright SUSE Linux Products GmbH 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_64_H__
+#define __ASM_KVM_BOOK3S_64_H__
+
+#include <linux/string.h>
+#include <asm/bitops.h>
+#include <asm/book3s/64/mmu-hash.h>
+#include <asm/cpu_has_feature.h>
+#include <asm/ppc-opcode.h>
+#include <asm/pte-walk.h>
+
+/*
+ * Structure for a nested guest, that is, for a guest that is managed by
+ * one of our guests.
+ */
+struct kvm_nested_guest {
+ struct kvm *l1_host; /* L1 VM that owns this nested guest */
+ int l1_lpid; /* lpid L1 guest thinks this guest is */
+ int shadow_lpid; /* real lpid of this nested guest */
+ pgd_t *shadow_pgtable; /* our page table for this guest */
+ u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */
+ u64 process_table; /* process table entry for this guest */
+ long refcnt; /* number of pointers to this struct */
+ struct mutex tlb_lock; /* serialize page faults and tlbies */
+ struct kvm_nested_guest *next;
+ cpumask_t need_tlb_flush;
+ short prev_cpu[NR_CPUS];
+ u8 radix; /* is this nested guest radix */
+};
+
+/*
+ * We define a nested rmap entry as a single 64-bit quantity
+ * 0xFFF0000000000000 12-bit lpid field
+ * 0x000FFFFFFFFFF000 40-bit guest 4k page frame number
+ * 0x0000000000000001 1-bit single entry flag
+ */
+#define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL
+#define RMAP_NESTED_LPID_SHIFT (52)
+#define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL
+#define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL
+
+/* Structure for a nested guest rmap entry */
+struct rmap_nested {
+ struct llist_node list;
+ u64 rmap;
+};
+
+/*
+ * for_each_nest_rmap_safe - iterate over the list of nested rmap entries
+ * safe against removal of the list entry or NULL list
+ * @pos: a (struct rmap_nested *) to use as a loop cursor
+ * @node: pointer to the first entry
+ * NOTE: this can be NULL
+ * @rmapp: an (unsigned long *) in which to return the rmap entries on each
+ * iteration
+ * NOTE: this must point to already allocated memory
+ *
+ * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the
+ * rmap entry in the memslot. The list is always terminated by a "single entry"
+ * stored in the list element of the final entry of the llist. If there is ONLY
+ * a single entry then this is itself in the rmap entry of the memslot, not a
+ * llist head pointer.
+ *
+ * Note that the iterator below assumes that a nested rmap entry is always
+ * non-zero. This is true for our usage because the LPID field is always
+ * non-zero (zero is reserved for the host).
+ *
+ * This should be used to iterate over the list of rmap_nested entries with
+ * processing done on the u64 rmap value given by each iteration. This is safe
+ * against removal of list entries and it is always safe to call free on (pos).
+ *
+ * e.g.
+ * struct rmap_nested *cursor;
+ * struct llist_node *first;
+ * unsigned long rmap;
+ * for_each_nest_rmap_safe(cursor, first, &rmap) {
+ * do_something(rmap);
+ * free(cursor);
+ * }
+ */
+#define for_each_nest_rmap_safe(pos, node, rmapp) \
+ for ((pos) = llist_entry((node), typeof(*(pos)), list); \
+ (node) && \
+ (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
+ ((u64) (node)) : ((pos)->rmap))) && \
+ (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
+ ((struct llist_node *) ((pos) = NULL)) : \
+ (pos)->list.next)), true); \
+ (pos) = llist_entry((node), typeof(*(pos)), list))
+
+struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
+ bool create);
+void kvmhv_put_nested(struct kvm_nested_guest *gp);
+int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid);
+
+/* Encoding of first parameter for H_TLB_INVALIDATE */
+#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
+ ___PPC_R(r))
+
+/* Power architecture requires HPT is at least 256kiB, at most 64TiB */
+#define PPC_MIN_HPT_ORDER 18
+#define PPC_MAX_HPT_ORDER 46
+
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ return &get_paca()->shadow_vcpu;
+}
+
+static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
+{
+ preempt_enable();
+}
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+
+static inline bool kvm_is_radix(struct kvm *kvm)
+{
+ return kvm->arch.radix;
+}
+
+static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
+{
+ bool radix;
+
+ if (vcpu->arch.nested)
+ radix = vcpu->arch.nested->radix;
+ else
+ radix = kvm_is_radix(vcpu->kvm);
+
+ return radix;
+}
+
+unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr);
+
+int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb);
+
+#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
+#endif
+
+/*
+ * Invalid HDSISR value which is used to indicate when HW has not set the reg.
+ * Used to work around an errata.
+ */
+#define HDSISR_CANARY 0x7fff
+
+/*
+ * We use a lock bit in HPTE dword 0 to synchronize updates and
+ * accesses to each HPTE, and another bit to indicate non-present
+ * HPTEs.
+ */
+#define HPTE_V_HVLOCK 0x40UL
+#define HPTE_V_ABSENT 0x20UL
+
+/*
+ * We use this bit in the guest_rpte field of the revmap entry
+ * to indicate a modified HPTE.
+ */
+#define HPTE_GR_MODIFIED (1ul << 62)
+
+/* These bits are reserved in the guest view of the HPTE */
+#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
+
+static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
+{
+ unsigned long tmp, old;
+ __be64 be_lockbit, be_bits;
+
+ /*
+ * We load/store in native endian, but the HTAB is in big endian. If
+ * we byte swap all data we apply on the PTE we're implicitly correct
+ * again.
+ */
+ be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
+ be_bits = cpu_to_be64(bits);
+
+ asm volatile(" ldarx %0,0,%2\n"
+ " and. %1,%0,%3\n"
+ " bne 2f\n"
+ " or %0,%0,%4\n"
+ " stdcx. %0,0,%2\n"
+ " beq+ 2f\n"
+ " mr %1,%3\n"
+ "2: isync"
+ : "=&r" (tmp), "=&r" (old)
+ : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
+ : "cc", "memory");
+ return old == 0;
+}
+
+static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
+{
+ hpte_v &= ~HPTE_V_HVLOCK;
+ asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
+ hpte[0] = cpu_to_be64(hpte_v);
+}
+
+/* Without barrier */
+static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
+{
+ hpte_v &= ~HPTE_V_HVLOCK;
+ hpte[0] = cpu_to_be64(hpte_v);
+}
+
+/*
+ * These functions encode knowledge of the POWER7/8/9 hardware
+ * interpretations of the HPTE LP (large page size) field.
+ */
+static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
+{
+ unsigned int lphi;
+
+ if (!(h & HPTE_V_LARGE))
+ return 12; /* 4kB */
+ lphi = (l >> 16) & 0xf;
+ switch ((l >> 12) & 0xf) {
+ case 0:
+ return !lphi ? 24 : 0; /* 16MB */
+ break;
+ case 1:
+ return 16; /* 64kB */
+ break;
+ case 3:
+ return !lphi ? 34 : 0; /* 16GB */
+ break;
+ case 7:
+ return (16 << 8) + 12; /* 64kB in 4kB */
+ break;
+ case 8:
+ if (!lphi)
+ return (24 << 8) + 16; /* 16MB in 64kkB */
+ if (lphi == 3)
+ return (24 << 8) + 12; /* 16MB in 4kB */
+ break;
+ }
+ return 0;
+}
+
+static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l)
+{
+ return kvmppc_hpte_page_shifts(h, l) & 0xff;
+}
+
+static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l)
+{
+ int tmp = kvmppc_hpte_page_shifts(h, l);
+
+ if (tmp >= 0x100)
+ tmp >>= 8;
+ return tmp;
+}
+
+static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r)
+{
+ int shift = kvmppc_hpte_actual_page_shift(v, r);
+
+ if (shift)
+ return 1ul << shift;
+ return 0;
+}
+
+static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift)
+{
+ switch (base_shift) {
+ case 12:
+ switch (actual_shift) {
+ case 12:
+ return 0;
+ case 16:
+ return 7;
+ case 24:
+ return 0x38;
+ }
+ break;
+ case 16:
+ switch (actual_shift) {
+ case 16:
+ return 1;
+ case 24:
+ return 8;
+ }
+ break;
+ case 24:
+ return 0;
+ }
+ return -1;
+}
+
+static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
+ unsigned long pte_index)
+{
+ int a_pgshift, b_pgshift;
+ unsigned long rb = 0, va_low, sllp;
+
+ b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r);
+ if (a_pgshift >= 0x100) {
+ b_pgshift &= 0xff;
+ a_pgshift >>= 8;
+ }
+
+ /*
+ * Ignore the top 14 bits of va
+ * v have top two bits covering segment size, hence move
+ * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
+ * AVA field in v also have the lower 23 bits ignored.
+ * For base page size 4K we need 14 .. 65 bits (so need to
+ * collect extra 11 bits)
+ * For others we need 14..14+i
+ */
+ /* This covers 14..54 bits of va*/
+ rb = (v & ~0x7fUL) << 16; /* AVA field */
+
+ /*
+ * AVA in v had cleared lower 23 bits. We need to derive
+ * that from pteg index
+ */
+ va_low = pte_index >> 3;
+ if (v & HPTE_V_SECONDARY)
+ va_low = ~va_low;
+ /*
+ * get the vpn bits from va_low using reverse of hashing.
+ * In v we have va with 23 bits dropped and then left shifted
+ * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
+ * right shift it with (SID_SHIFT - (23 - 7))
+ */
+ if (!(v & HPTE_V_1TB_SEG))
+ va_low ^= v >> (SID_SHIFT - 16);
+ else
+ va_low ^= v >> (SID_SHIFT_1T - 16);
+ va_low &= 0x7ff;
+
+ if (b_pgshift <= 12) {
+ if (a_pgshift > 12) {
+ sllp = (a_pgshift == 16) ? 5 : 4;
+ rb |= sllp << 5; /* AP field */
+ }
+ rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
+ } else {
+ int aval_shift;
+ /*
+ * remaining bits of AVA/LP fields
+ * Also contain the rr bits of LP
+ */
+ rb |= (va_low << b_pgshift) & 0x7ff000;
+ /*
+ * Now clear not needed LP bits based on actual psize
+ */
+ rb &= ~((1ul << a_pgshift) - 1);
+ /*
+ * AVAL field 58..77 - base_page_shift bits of va
+ * we have space for 58..64 bits, Missing bits should
+ * be zero filled. +1 is to take care of L bit shift
+ */
+ aval_shift = 64 - (77 - b_pgshift) + 1;
+ rb |= ((va_low << aval_shift) & 0xfe);
+
+ rb |= 1; /* L field */
+ rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
+ }
+ /*
+ * This sets both bits of the B field in the PTE. 0b1x values are
+ * reserved, but those will have been filtered by kvmppc_do_h_enter.
+ */
+ rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
+ return rb;
+}
+
+static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
+{
+ return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
+}
+
+static inline int hpte_is_writable(unsigned long ptel)
+{
+ unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
+
+ return pp != PP_RXRX && pp != PP_RXXX;
+}
+
+static inline unsigned long hpte_make_readonly(unsigned long ptel)
+{
+ if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
+ ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
+ else
+ ptel |= PP_RXRX;
+ return ptel;
+}
+
+static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
+{
+ unsigned int wimg = hptel & HPTE_R_WIMG;
+
+ /* Handle SAO */
+ if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
+ cpu_has_feature(CPU_FTR_ARCH_206))
+ wimg = HPTE_R_M;
+
+ if (!is_ci)
+ return wimg == HPTE_R_M;
+ /*
+ * if host is mapped cache inhibited, make sure hptel also have
+ * cache inhibited.
+ */
+ if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
+ return false;
+ return !!(wimg & HPTE_R_I);
+}
+
+/*
+ * If it's present and writable, atomically set dirty and referenced bits and
+ * return the PTE, otherwise return 0.
+ */
+static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
+{
+ pte_t old_pte, new_pte = __pte(0);
+
+ while (1) {
+ /*
+ * Make sure we don't reload from ptep
+ */
+ old_pte = READ_ONCE(*ptep);
+ /*
+ * wait until H_PAGE_BUSY is clear then set it atomically
+ */
+ if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
+ cpu_relax();
+ continue;
+ }
+ /* If pte is not present return None */
+ if (unlikely(!pte_present(old_pte)))
+ return __pte(0);
+
+ new_pte = pte_mkyoung(old_pte);
+ if (writing && pte_write(old_pte))
+ new_pte = pte_mkdirty(new_pte);
+
+ if (pte_xchg(ptep, old_pte, new_pte))
+ break;
+ }
+ return new_pte;
+}
+
+static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
+{
+ if (key)
+ return PP_RWRX <= pp && pp <= PP_RXRX;
+ return true;
+}
+
+static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
+{
+ if (key)
+ return pp == PP_RWRW;
+ return pp <= PP_RWRW;
+}
+
+static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
+{
+ unsigned long skey;
+
+ skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
+ ((hpte_r & HPTE_R_KEY_LO) >> 9);
+ return (amr >> (62 - 2 * skey)) & 3;
+}
+
+static inline void lock_rmap(unsigned long *rmap)
+{
+ do {
+ while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
+ cpu_relax();
+ } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
+}
+
+static inline void unlock_rmap(unsigned long *rmap)
+{
+ __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
+}
+
+static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
+ unsigned long pagesize)
+{
+ unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
+
+ if (pagesize <= PAGE_SIZE)
+ return true;
+ return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
+}
+
+/*
+ * This works for 4k, 64k and 16M pages on POWER7,
+ * and 4k and 16M pages on PPC970.
+ */
+static inline unsigned long slb_pgsize_encoding(unsigned long psize)
+{
+ unsigned long senc = 0;
+
+ if (psize > 0x1000) {
+ senc = SLB_VSID_L;
+ if (psize == 0x10000)
+ senc |= SLB_VSID_LP_01;
+ }
+ return senc;
+}
+
+static inline int is_vrma_hpte(unsigned long hpte_v)
+{
+ return (hpte_v & ~0xffffffUL) ==
+ (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
+}
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+/*
+ * Note modification of an HPTE; set the HPTE modified bit
+ * if anyone is interested.
+ */
+static inline void note_hpte_modification(struct kvm *kvm,
+ struct revmap_entry *rev)
+{
+ if (atomic_read(&kvm->arch.hpte_mod_interest))
+ rev->guest_rpte |= HPTE_GR_MODIFIED;
+}
+
+/*
+ * Like kvm_memslots(), but for use in real mode when we can't do
+ * any RCU stuff (since the secondary threads are offline from the
+ * kernel's point of view), and we can't print anything.
+ * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
+ */
+static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
+{
+ return rcu_dereference_raw_check(kvm->memslots[0]);
+}
+
+extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
+extern void kvmhv_radix_debugfs_init(struct kvm *kvm);
+
+extern void kvmhv_rm_send_ipi(int cpu);
+
+static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
+{
+ /* HPTEs are 2**4 bytes long */
+ return 1UL << (hpt->order - 4);
+}
+
+static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
+{
+ /* 128 (2**7) bytes in each HPTEG */
+ return (1UL << (hpt->order - 7)) - 1;
+}
+
+/* Set bits in a dirty bitmap, which is in LE format */
+static inline void set_dirty_bits(unsigned long *map, unsigned long i,
+ unsigned long npages)
+{
+
+ if (npages >= 8)
+ memset((char *)map + i / 8, 0xff, npages / 8);
+ else
+ for (; npages; ++i, --npages)
+ __set_bit_le(i, map);
+}
+
+static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
+ unsigned long npages)
+{
+ if (npages >= 8)
+ memset((char *)map + i / 8, 0xff, npages / 8);
+ else
+ for (; npages; ++i, --npages)
+ set_bit_le(i, map);
+}
+
+static inline u64 sanitize_msr(u64 msr)
+{
+ msr &= ~MSR_HV;
+ msr |= MSR_ME;
+ return msr;
+}
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
+ vcpu->arch.regs.xer = vcpu->arch.xer_tm;
+ vcpu->arch.regs.link = vcpu->arch.lr_tm;
+ vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
+ vcpu->arch.amr = vcpu->arch.amr_tm;
+ vcpu->arch.ppr = vcpu->arch.ppr_tm;
+ vcpu->arch.dscr = vcpu->arch.dscr_tm;
+ vcpu->arch.tar = vcpu->arch.tar_tm;
+ memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
+ sizeof(vcpu->arch.regs.gpr));
+ vcpu->arch.fp = vcpu->arch.fp_tm;
+ vcpu->arch.vr = vcpu->arch.vr_tm;
+ vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
+}
+
+static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
+ vcpu->arch.xer_tm = vcpu->arch.regs.xer;
+ vcpu->arch.lr_tm = vcpu->arch.regs.link;
+ vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
+ vcpu->arch.amr_tm = vcpu->arch.amr;
+ vcpu->arch.ppr_tm = vcpu->arch.ppr;
+ vcpu->arch.dscr_tm = vcpu->arch.dscr;
+ vcpu->arch.tar_tm = vcpu->arch.tar;
+ memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
+ sizeof(vcpu->arch.regs.gpr));
+ vcpu->arch.fp_tm = vcpu->arch.fp;
+ vcpu->arch.vr_tm = vcpu->arch.vr;
+ vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
+}
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
+extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
+ unsigned long gpa, unsigned int level,
+ unsigned long mmu_seq, unsigned int lpid,
+ unsigned long *rmapp, struct rmap_nested **n_rmap);
+extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
+ struct rmap_nested **n_rmap);
+extern void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
+ unsigned long clr, unsigned long set,
+ unsigned long hpa, unsigned long nbytes);
+extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot,
+ unsigned long gpa, unsigned long hpa,
+ unsigned long nbytes);
+
+static inline pte_t *
+find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea,
+ unsigned *hshift)
+{
+ pte_t *pte;
+
+ pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
+ return pte;
+}
+
+static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
+ unsigned *hshift)
+{
+ pte_t *pte;
+
+ VM_WARN(!spin_is_locked(&kvm->mmu_lock),
+ "%s called with kvm mmu_lock not held \n", __func__);
+ pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
+
+ return pte;
+}
+
+static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
+ unsigned long ea, unsigned *hshift)
+{
+ pte_t *pte;
+
+ VM_WARN(!spin_is_locked(&kvm->mmu_lock),
+ "%s called with kvm mmu_lock not held \n", __func__);
+
+ if (mmu_invalidate_retry(kvm, mmu_seq))
+ return NULL;
+
+ pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
+
+ return pte;
+}
+
+extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
+ unsigned long ea, unsigned *hshift);
+
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+
+#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
new file mode 100644
index 000000000..c8882d9b8
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_ASM_H__
+#define __ASM_KVM_BOOK3S_ASM_H__
+
+/* XICS ICP register offsets */
+#define XICS_XIRR 4
+#define XICS_MFRR 0xc
+#define XICS_IPI 2 /* interrupt source # for IPIs */
+
+/* Maximum number of threads per physical core */
+#define MAX_SMT_THREADS 8
+
+/* Maximum number of subcores per physical core */
+#define MAX_SUBCORES 4
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_KVM_BOOK3S_HANDLER
+
+#include <asm/kvm_asm.h>
+
+.macro DO_KVM intno
+ .if (\intno == BOOK3S_INTERRUPT_SYSTEM_RESET) || \
+ (\intno == BOOK3S_INTERRUPT_MACHINE_CHECK) || \
+ (\intno == BOOK3S_INTERRUPT_DATA_STORAGE) || \
+ (\intno == BOOK3S_INTERRUPT_INST_STORAGE) || \
+ (\intno == BOOK3S_INTERRUPT_DATA_SEGMENT) || \
+ (\intno == BOOK3S_INTERRUPT_INST_SEGMENT) || \
+ (\intno == BOOK3S_INTERRUPT_EXTERNAL) || \
+ (\intno == BOOK3S_INTERRUPT_EXTERNAL_HV) || \
+ (\intno == BOOK3S_INTERRUPT_ALIGNMENT) || \
+ (\intno == BOOK3S_INTERRUPT_PROGRAM) || \
+ (\intno == BOOK3S_INTERRUPT_FP_UNAVAIL) || \
+ (\intno == BOOK3S_INTERRUPT_DECREMENTER) || \
+ (\intno == BOOK3S_INTERRUPT_SYSCALL) || \
+ (\intno == BOOK3S_INTERRUPT_TRACE) || \
+ (\intno == BOOK3S_INTERRUPT_PERFMON) || \
+ (\intno == BOOK3S_INTERRUPT_ALTIVEC) || \
+ (\intno == BOOK3S_INTERRUPT_VSX)
+
+ b kvmppc_trampoline_\intno
+kvmppc_resume_\intno:
+
+ .endif
+.endm
+
+#else
+
+.macro DO_KVM intno
+.endm
+
+#endif /* CONFIG_KVM_BOOK3S_HANDLER */
+
+#else /*__ASSEMBLY__ */
+
+struct kvmppc_vcore;
+
+/* Struct used for coordinating micro-threading (split-core) mode changes */
+struct kvm_split_mode {
+ unsigned long rpr;
+ unsigned long pmmar;
+ unsigned long ldbar;
+ u8 subcore_size;
+ u8 do_nap;
+ u8 napped[MAX_SMT_THREADS];
+ struct kvmppc_vcore *vc[MAX_SUBCORES];
+};
+
+/*
+ * This struct goes in the PACA on 64-bit processors. It is used
+ * to store host state that needs to be saved when we enter a guest
+ * and restored when we exit, but isn't specific to any particular
+ * guest or vcpu. It also has some scratch fields used by the guest
+ * exit code.
+ */
+struct kvmppc_host_state {
+ ulong host_r1;
+ ulong host_r2;
+ ulong host_msr;
+ ulong vmhandler;
+ ulong scratch0;
+ ulong scratch1;
+ ulong scratch2;
+ u8 in_guest;
+ u8 restore_hid5;
+ u8 napping;
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ u8 hwthread_req;
+ u8 hwthread_state;
+ u8 host_ipi;
+ u8 ptid; /* thread number within subcore when split */
+ u8 fake_suspend;
+ struct kvm_vcpu *kvm_vcpu;
+ struct kvmppc_vcore *kvm_vcore;
+ void __iomem *xics_phys;
+ void __iomem *xive_tima_phys;
+ void __iomem *xive_tima_virt;
+ u32 saved_xirr;
+ u64 dabr;
+ u64 host_mmcr[10]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER, MMCR3, SIER2/3 */
+ u32 host_pmc[8];
+ u64 host_purr;
+ u64 host_spurr;
+ u64 host_dscr;
+ u64 dec_expires;
+ struct kvm_split_mode *kvm_split_mode;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ u64 cfar;
+ u64 ppr;
+ u64 host_fscr;
+#endif
+};
+
+struct kvmppc_book3s_shadow_vcpu {
+ bool in_use;
+ ulong gpr[14];
+ u32 cr;
+ ulong xer;
+ ulong ctr;
+ ulong lr;
+ ulong pc;
+
+ ulong shadow_srr1;
+ ulong fault_dar;
+ u32 fault_dsisr;
+ u32 last_inst;
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ u32 sr[16]; /* Guest SRs */
+
+ struct kvmppc_host_state hstate;
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ u8 slb_max; /* highest used guest slb entry */
+ struct {
+ u64 esid;
+ u64 vsid;
+ } slb[64]; /* guest SLB */
+ u64 shadow_fscr;
+#endif
+};
+
+#endif /*__ASSEMBLY__ */
+
+/* Values for kvm_state */
+#define KVM_HWTHREAD_IN_KERNEL 0
+#define KVM_HWTHREAD_IN_IDLE 1
+#define KVM_HWTHREAD_IN_KVM 2
+
+#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
new file mode 100644
index 000000000..0a6319448
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KVM_BOOK3S_UVMEM_H__
+#define __ASM_KVM_BOOK3S_UVMEM_H__
+
+#ifdef CONFIG_PPC_UV
+int kvmppc_uvmem_init(void);
+void kvmppc_uvmem_free(void);
+bool kvmppc_uvmem_available(void);
+int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot);
+void kvmppc_uvmem_slot_free(struct kvm *kvm,
+ const struct kvm_memory_slot *slot);
+unsigned long kvmppc_h_svm_page_in(struct kvm *kvm,
+ unsigned long gra,
+ unsigned long flags,
+ unsigned long page_shift);
+unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
+ unsigned long gra,
+ unsigned long flags,
+ unsigned long page_shift);
+unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
+unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
+int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
+unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm);
+void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
+ struct kvm *kvm, bool skip_page_out);
+int kvmppc_uvmem_memslot_create(struct kvm *kvm,
+ const struct kvm_memory_slot *new);
+void kvmppc_uvmem_memslot_delete(struct kvm *kvm,
+ const struct kvm_memory_slot *old);
+#else
+static inline int kvmppc_uvmem_init(void)
+{
+ return 0;
+}
+
+static inline void kvmppc_uvmem_free(void) { }
+
+static inline bool kvmppc_uvmem_available(void)
+{
+ return false;
+}
+
+static inline int
+kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
+{
+ return 0;
+}
+
+static inline void
+kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) { }
+
+static inline unsigned long
+kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gra,
+ unsigned long flags, unsigned long page_shift)
+{
+ return H_UNSUPPORTED;
+}
+
+static inline unsigned long
+kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gra,
+ unsigned long flags, unsigned long page_shift)
+{
+ return H_UNSUPPORTED;
+}
+
+static inline unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
+{
+ return H_UNSUPPORTED;
+}
+
+static inline unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
+{
+ return H_UNSUPPORTED;
+}
+
+static inline unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
+{
+ return H_UNSUPPORTED;
+}
+
+static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
+{
+ return -EFAULT;
+}
+
+static inline void
+kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
+ struct kvm *kvm, bool skip_page_out) { }
+
+static inline int kvmppc_uvmem_memslot_create(struct kvm *kvm,
+ const struct kvm_memory_slot *new)
+{
+ return H_UNSUPPORTED;
+}
+
+static inline void kvmppc_uvmem_memslot_delete(struct kvm *kvm,
+ const struct kvm_memory_slot *old) { }
+
+#endif /* CONFIG_PPC_UV */
+#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
new file mode 100644
index 000000000..0c3401b2e
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright SUSE Linux Products GmbH 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOKE_H__
+#define __ASM_KVM_BOOKE_H__
+
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+
+/*
+ * Number of available lpids. Only the low-order 6 bits of LPID rgister are
+ * implemented on e500mc+ cores.
+ */
+#define KVMPPC_NR_LPIDS 64
+
+#define KVMPPC_INST_EHPRIV 0x7c00021c
+#define EHPRIV_OC_SHIFT 11
+/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */
+#define EHPRIV_OC_DEBUG 1
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+ vcpu->arch.regs.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+ return vcpu->arch.regs.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.regs.ccr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.regs.ccr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.regs.xer = val;
+}
+
+static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.regs.xer;
+}
+
+static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
+{
+ /* XXX Would need to check TLB entry */
+ return false;
+}
+
+static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.regs.ctr = val;
+}
+
+static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.regs.ctr;
+}
+
+static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.regs.link = val;
+}
+
+static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.regs.link;
+}
+
+static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.regs.nip = val;
+}
+
+static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.regs.nip;
+}
+
+#ifdef CONFIG_BOOKE
+static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault_dear;
+}
+#endif
+
+static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
+{
+ /* Magic page is only supported on e500v2 */
+#ifdef CONFIG_KVM_E500V2
+ return true;
+#else
+ return false;
+#endif
+}
+#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
new file mode 100644
index 000000000..7487ef582
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef ASM_KVM_BOOKE_HV_ASM_H
+#define ASM_KVM_BOOKE_HV_ASM_H
+
+#include <asm/feature-fixups.h>
+
+#ifdef __ASSEMBLY__
+
+/*
+ * All exceptions from guest state must go through KVM
+ * (except for those which are delivered directly to the guest) --
+ * there are no exceptions for which we fall through directly to
+ * the normal host handler.
+ *
+ * 32-bit host
+ * Expected inputs (normal exceptions):
+ * SCRATCH0 = saved r10
+ * r10 = thread struct
+ * r11 = appropriate SRR1 variant (currently used as scratch)
+ * r13 = saved CR
+ * *(r10 + THREAD_NORMSAVE(0)) = saved r11
+ * *(r10 + THREAD_NORMSAVE(2)) = saved r13
+ *
+ * Expected inputs (crit/mcheck/debug exceptions):
+ * appropriate SCRATCH = saved r8
+ * r8 = exception level stack frame
+ * r9 = *(r8 + _CCR) = saved CR
+ * r11 = appropriate SRR1 variant (currently used as scratch)
+ * *(r8 + GPR9) = saved r9
+ * *(r8 + GPR10) = saved r10 (r10 not yet clobbered)
+ * *(r8 + GPR11) = saved r11
+ *
+ * 64-bit host
+ * Expected inputs (GEN/GDBELL/DBG/CRIT/MC exception types):
+ * r10 = saved CR
+ * r13 = PACA_POINTER
+ * *(r13 + PACA_EX##type + EX_R10) = saved r10
+ * *(r13 + PACA_EX##type + EX_R11) = saved r11
+ * SPRN_SPRG_##type##_SCRATCH = saved r13
+ *
+ * Expected inputs (TLB exception type):
+ * r10 = saved CR
+ * r12 = extlb pointer
+ * r13 = PACA_POINTER
+ * *(r12 + EX_TLB_R10) = saved r10
+ * *(r12 + EX_TLB_R11) = saved r11
+ * *(r12 + EX_TLB_R13) = saved r13
+ * SPRN_SPRG_GEN_SCRATCH = saved r12
+ *
+ * Only the bolted version of TLB miss exception handlers is supported now.
+ */
+.macro DO_KVM intno srr1
+#ifdef CONFIG_KVM_BOOKE_HV
+BEGIN_FTR_SECTION
+ mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */
+ bf 3, 1975f
+ b kvmppc_handler_\intno\()_\srr1
+1975:
+END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+#endif
+.endm
+
+#endif /*__ASSEMBLY__ */
+#endif /* ASM_KVM_BOOKE_HV_ASM_H */
diff --git a/arch/powerpc/include/asm/kvm_fpu.h b/arch/powerpc/include/asm/kvm_fpu.h
new file mode 100644
index 000000000..25df316b7
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_fpu.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright Novell Inc. 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_FPU_H__
+#define __ASM_KVM_FPU_H__
+
+#include <linux/types.h>
+
+extern void fps_fres(u64 *fpscr, u32 *dst, u32 *src1);
+extern void fps_frsqrte(u64 *fpscr, u32 *dst, u32 *src1);
+extern void fps_fsqrts(u64 *fpscr, u32 *dst, u32 *src1);
+
+extern void fps_fadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2);
+extern void fps_fdivs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2);
+extern void fps_fmuls(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2);
+extern void fps_fsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2);
+
+extern void fps_fmadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+extern void fps_fmsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+extern void fps_fnmadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+extern void fps_fnmsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+extern void fps_fsel(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+
+#define FPD_ONE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
+ u64 *dst, u64 *src1);
+#define FPD_TWO_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
+ u64 *dst, u64 *src1, u64 *src2);
+#define FPD_THREE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
+ u64 *dst, u64 *src1, u64 *src2, u64 *src3);
+
+extern void fpd_fcmpu(u64 *fpscr, u32 *cr, u64 *src1, u64 *src2);
+extern void fpd_fcmpo(u64 *fpscr, u32 *cr, u64 *src1, u64 *src2);
+
+FPD_ONE_IN(fsqrts)
+FPD_ONE_IN(frsqrtes)
+FPD_ONE_IN(fres)
+FPD_ONE_IN(frsp)
+FPD_ONE_IN(fctiw)
+FPD_ONE_IN(fctiwz)
+FPD_ONE_IN(fsqrt)
+FPD_ONE_IN(fre)
+FPD_ONE_IN(frsqrte)
+FPD_ONE_IN(fneg)
+FPD_ONE_IN(fabs)
+FPD_TWO_IN(fadds)
+FPD_TWO_IN(fsubs)
+FPD_TWO_IN(fdivs)
+FPD_TWO_IN(fmuls)
+FPD_TWO_IN(fcpsgn)
+FPD_TWO_IN(fdiv)
+FPD_TWO_IN(fadd)
+FPD_TWO_IN(fmul)
+FPD_TWO_IN(fsub)
+FPD_THREE_IN(fmsubs)
+FPD_THREE_IN(fmadds)
+FPD_THREE_IN(fnmsubs)
+FPD_THREE_IN(fnmadds)
+FPD_THREE_IN(fsel)
+FPD_THREE_IN(fmsub)
+FPD_THREE_IN(fmadd)
+FPD_THREE_IN(fnmsub)
+FPD_THREE_IN(fnmadd)
+
+extern void kvm_cvt_fd(u32 *from, u64 *to);
+extern void kvm_cvt_df(u64 *from, u32 *to);
+
+#endif
diff --git a/arch/powerpc/include/asm/kvm_guest.h b/arch/powerpc/include/asm/kvm_guest.h
new file mode 100644
index 000000000..68e499abd
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_guest.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 IBM Corporation
+ */
+
+#ifndef _ASM_POWERPC_KVM_GUEST_H_
+#define _ASM_POWERPC_KVM_GUEST_H_
+
+#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST)
+#include <linux/jump_label.h>
+
+DECLARE_STATIC_KEY_FALSE(kvm_guest);
+
+static inline bool is_kvm_guest(void)
+{
+ return static_branch_unlikely(&kvm_guest);
+}
+
+int __init check_kvm_guest(void);
+#else
+static inline bool is_kvm_guest(void) { return false; }
+static inline int check_kvm_guest(void) { return 0; }
+#endif
+
+#endif /* _ASM_POWERPC_KVM_GUEST_H_ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
new file mode 100644
index 000000000..caea15dcb
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -0,0 +1,889 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_HOST_H__
+#define __POWERPC_KVM_HOST_H__
+
+#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+#include <linux/kvm_para.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <asm/kvm_asm.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/hvcall.h>
+#include <asm/mce.h>
+
+#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
+
+#define KVM_MAX_VCPUS NR_CPUS
+#define KVM_MAX_VCORES NR_CPUS
+
+#include <asm/cputhreads.h>
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+#include <asm/kvm_book3s_asm.h> /* for MAX_SMT_THREADS */
+#define KVM_MAX_VCPU_IDS (MAX_SMT_THREADS * KVM_MAX_VCORES)
+
+/*
+ * Limit the nested partition table to 4096 entries (because that's what
+ * hardware supports). Both guest and host use this value.
+ */
+#define KVM_MAX_NESTED_GUESTS_SHIFT 12
+
+#else
+#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+
+#define __KVM_HAVE_ARCH_INTC_INITIALIZED
+
+#define KVM_HALT_POLL_NS_DEFAULT 10000 /* 10 us */
+
+/* These values are internal and can be increased later */
+#define KVM_NR_IRQCHIPS 1
+#define KVM_IRQCHIP_NUM_PINS 256
+
+/* PPC-specific vcpu->requests bit members */
+#define KVM_REQ_WATCHDOG KVM_ARCH_REQ(0)
+#define KVM_REQ_EPR_EXIT KVM_ARCH_REQ(1)
+#define KVM_REQ_PENDING_TIMER KVM_ARCH_REQ(2)
+
+#include <linux/mmu_notifier.h>
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+
+#define HPTEG_CACHE_NUM (1 << 15)
+#define HPTEG_HASH_BITS_PTE 13
+#define HPTEG_HASH_BITS_PTE_LONG 12
+#define HPTEG_HASH_BITS_VPTE 13
+#define HPTEG_HASH_BITS_VPTE_LONG 5
+#define HPTEG_HASH_BITS_VPTE_64K 11
+#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
+#define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
+#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
+#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
+#define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K)
+
+/* Physical Address Mask - allowed range of real mode RAM access */
+#define KVM_PAM 0x0fffffffffffffffULL
+
+struct lppaca;
+struct slb_shadow;
+struct dtl_entry;
+
+struct kvmppc_vcpu_book3s;
+struct kvmppc_book3s_shadow_vcpu;
+struct kvm_nested_guest;
+
+struct kvm_vm_stat {
+ struct kvm_vm_stat_generic generic;
+ u64 num_2M_pages;
+ u64 num_1G_pages;
+};
+
+struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
+ u64 sum_exits;
+ u64 mmio_exits;
+ u64 signal_exits;
+ u64 light_exits;
+ /* Account for special types of light exits: */
+ u64 itlb_real_miss_exits;
+ u64 itlb_virt_miss_exits;
+ u64 dtlb_real_miss_exits;
+ u64 dtlb_virt_miss_exits;
+ u64 syscall_exits;
+ u64 isi_exits;
+ u64 dsi_exits;
+ u64 emulated_inst_exits;
+ u64 dec_exits;
+ u64 ext_intr_exits;
+ u64 halt_successful_wait;
+ u64 dbell_exits;
+ u64 gdbell_exits;
+ u64 ld;
+ u64 st;
+#ifdef CONFIG_PPC_BOOK3S
+ u64 pf_storage;
+ u64 pf_instruc;
+ u64 sp_storage;
+ u64 sp_instruc;
+ u64 queue_intr;
+ u64 ld_slow;
+ u64 st_slow;
+#endif
+ u64 pthru_all;
+ u64 pthru_host;
+ u64 pthru_bad_aff;
+};
+
+enum kvm_exit_types {
+ MMIO_EXITS,
+ SIGNAL_EXITS,
+ ITLB_REAL_MISS_EXITS,
+ ITLB_VIRT_MISS_EXITS,
+ DTLB_REAL_MISS_EXITS,
+ DTLB_VIRT_MISS_EXITS,
+ SYSCALL_EXITS,
+ ISI_EXITS,
+ DSI_EXITS,
+ EMULATED_INST_EXITS,
+ EMULATED_MTMSRWE_EXITS,
+ EMULATED_WRTEE_EXITS,
+ EMULATED_MTSPR_EXITS,
+ EMULATED_MFSPR_EXITS,
+ EMULATED_MTMSR_EXITS,
+ EMULATED_MFMSR_EXITS,
+ EMULATED_TLBSX_EXITS,
+ EMULATED_TLBWE_EXITS,
+ EMULATED_RFI_EXITS,
+ EMULATED_RFCI_EXITS,
+ EMULATED_RFDI_EXITS,
+ DEC_EXITS,
+ EXT_INTR_EXITS,
+ HALT_WAKEUP,
+ USR_PR_INST,
+ FP_UNAVAIL,
+ DEBUG_EXITS,
+ TIMEINGUEST,
+ DBELL_EXITS,
+ GDBELL_EXITS,
+ __NUMBER_OF_KVM_EXIT_TYPES
+};
+
+/* allow access to big endian 32bit upper/lower parts and 64bit var */
+struct kvmppc_exit_timing {
+ union {
+ u64 tv64;
+ struct {
+ u32 tbu, tbl;
+ } tv32;
+ };
+};
+
+struct kvmppc_pginfo {
+ unsigned long pfn;
+ atomic_t refcnt;
+};
+
+struct kvmppc_spapr_tce_iommu_table {
+ struct rcu_head rcu;
+ struct list_head next;
+ struct iommu_table *tbl;
+ struct kref kref;
+};
+
+#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
+
+struct kvmppc_spapr_tce_table {
+ struct list_head list;
+ struct kvm *kvm;
+ u64 liobn;
+ struct rcu_head rcu;
+ u32 page_shift;
+ u64 offset; /* in pages */
+ u64 size; /* window size in pages */
+ struct list_head iommu_tables;
+ struct mutex alloc_lock;
+ struct page *pages[];
+};
+
+/* XICS components, defined in book3s_xics.c */
+struct kvmppc_xics;
+struct kvmppc_icp;
+extern struct kvm_device_ops kvm_xics_ops;
+
+/* XIVE components, defined in book3s_xive.c */
+struct kvmppc_xive;
+struct kvmppc_xive_vcpu;
+extern struct kvm_device_ops kvm_xive_ops;
+extern struct kvm_device_ops kvm_xive_native_ops;
+
+struct kvmppc_passthru_irqmap;
+
+/*
+ * The reverse mapping array has one entry for each HPTE,
+ * which stores the guest's view of the second word of the HPTE
+ * (including the guest physical address of the mapping),
+ * plus forward and backward pointers in a doubly-linked ring
+ * of HPTEs that map the same host page. The pointers in this
+ * ring are 32-bit HPTE indexes, to save space.
+ */
+struct revmap_entry {
+ unsigned long guest_rpte;
+ unsigned int forw, back;
+};
+
+/*
+ * The rmap array of size number of guest pages is allocated for each memslot.
+ * This array is used to store usage specific information about the guest page.
+ * Below are the encodings of the various possible usage types.
+ */
+/* Free bits which can be used to define a new usage */
+#define KVMPPC_RMAP_TYPE_MASK 0xff00000000000000
+#define KVMPPC_RMAP_NESTED 0xc000000000000000 /* Nested rmap array */
+#define KVMPPC_RMAP_HPT 0x0100000000000000 /* HPT guest */
+
+/*
+ * rmap usage definition for a hash page table (hpt) guest:
+ * 0x0000080000000000 Lock bit
+ * 0x0000018000000000 RC bits
+ * 0x0000000100000000 Present bit
+ * 0x00000000ffffffff HPT index bits
+ * The bottom 32 bits are the index in the guest HPT of a HPTE that points to
+ * the page.
+ */
+#define KVMPPC_RMAP_LOCK_BIT 43
+#define KVMPPC_RMAP_RC_SHIFT 32
+#define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
+#define KVMPPC_RMAP_PRESENT 0x100000000ul
+#define KVMPPC_RMAP_INDEX 0xfffffffful
+
+struct kvm_arch_memory_slot {
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ unsigned long *rmap;
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+};
+
+struct kvm_hpt_info {
+ /* Host virtual (linear mapping) address of guest HPT */
+ unsigned long virt;
+ /* Array of reverse mapping entries for each guest HPTE */
+ struct revmap_entry *rev;
+ /* Guest HPT size is 2**(order) bytes */
+ u32 order;
+ /* 1 if HPT allocated with CMA, 0 otherwise */
+ int cma;
+};
+
+struct kvm_resize_hpt;
+
+/* Flag values for kvm_arch.secure_guest */
+#define KVMPPC_SECURE_INIT_START 0x1 /* H_SVM_INIT_START has been called */
+#define KVMPPC_SECURE_INIT_DONE 0x2 /* H_SVM_INIT_DONE completed */
+#define KVMPPC_SECURE_INIT_ABORT 0x4 /* H_SVM_INIT_ABORT issued */
+
+struct kvm_arch {
+ unsigned int lpid;
+ unsigned int smt_mode; /* # vcpus per virtual core */
+ unsigned int emul_smt_mode; /* emualted SMT mode, on P9 */
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ unsigned int tlb_sets;
+ struct kvm_hpt_info hpt;
+ atomic64_t mmio_update;
+ unsigned int host_lpid;
+ unsigned long host_lpcr;
+ unsigned long sdr1;
+ unsigned long host_sdr1;
+ unsigned long lpcr;
+ unsigned long vrma_slb_v;
+ int mmu_ready;
+ atomic_t vcpus_running;
+ u32 online_vcores;
+ atomic_t hpte_mod_interest;
+ cpumask_t need_tlb_flush;
+ u8 radix;
+ u8 fwnmi_enabled;
+ u8 secure_guest;
+ u8 svm_enabled;
+ bool nested_enable;
+ bool dawr1_enabled;
+ pgd_t *pgtable;
+ u64 process_table;
+ struct kvm_resize_hpt *resize_hpt; /* protected by kvm->lock */
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ struct mutex hpt_mutex;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct list_head spapr_tce_tables;
+ struct list_head rtas_tokens;
+ struct mutex rtas_token_lock;
+ DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
+#endif
+#ifdef CONFIG_KVM_MPIC
+ struct openpic *mpic;
+#endif
+#ifdef CONFIG_KVM_XICS
+ struct kvmppc_xics *xics;
+ struct kvmppc_xics *xics_device;
+ struct kvmppc_xive *xive; /* Current XIVE device in use */
+ struct {
+ struct kvmppc_xive *native;
+ struct kvmppc_xive *xics_on_xive;
+ } xive_devices;
+ struct kvmppc_passthru_irqmap *pimap;
+#endif
+ struct kvmppc_ops *kvm_ops;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ struct mutex uvmem_lock;
+ struct list_head uvmem_pfns;
+ struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */
+ u64 l1_ptcr;
+ struct idr kvm_nested_guest_idr;
+ /* This array can grow quite large, keep it at the end */
+ struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
+#endif
+};
+
+#define VCORE_ENTRY_MAP(vc) ((vc)->entry_exit_map & 0xff)
+#define VCORE_EXIT_MAP(vc) ((vc)->entry_exit_map >> 8)
+#define VCORE_IS_EXITING(vc) (VCORE_EXIT_MAP(vc) != 0)
+
+/* This bit is used when a vcore exit is triggered from outside the vcore */
+#define VCORE_EXIT_REQ 0x10000
+
+/*
+ * Values for vcore_state.
+ * Note that these are arranged such that lower values
+ * (< VCORE_SLEEPING) don't require stolen time accounting
+ * on load/unload, and higher values do.
+ */
+#define VCORE_INACTIVE 0
+#define VCORE_PREEMPT 1
+#define VCORE_PIGGYBACK 2
+#define VCORE_SLEEPING 3
+#define VCORE_RUNNING 4
+#define VCORE_EXITING 5
+#define VCORE_POLLING 6
+
+/*
+ * Struct used to manage memory for a virtual processor area
+ * registered by a PAPR guest. There are three types of area
+ * that a guest can register.
+ */
+struct kvmppc_vpa {
+ unsigned long gpa; /* Current guest phys addr */
+ void *pinned_addr; /* Address in kernel linear mapping */
+ void *pinned_end; /* End of region */
+ unsigned long next_gpa; /* Guest phys addr for update */
+ unsigned long len; /* Number of bytes required */
+ u8 update_pending; /* 1 => update pinned_addr from next_gpa */
+ bool dirty; /* true => area has been modified by kernel */
+};
+
+struct kvmppc_pte {
+ ulong eaddr;
+ u64 vpage;
+ ulong raddr;
+ bool may_read : 1;
+ bool may_write : 1;
+ bool may_execute : 1;
+ unsigned long wimg;
+ unsigned long rc;
+ u8 page_size; /* MMU_PAGE_xxx */
+ u8 page_shift;
+};
+
+struct kvmppc_mmu {
+ /* book3s_64 only */
+ void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
+ u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
+ u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
+ int (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb);
+ void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
+ void (*slbia)(struct kvm_vcpu *vcpu);
+ /* book3s */
+ void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
+ u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
+ int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *pte, bool data, bool iswrite);
+ void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
+ int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
+ u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
+ bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
+};
+
+struct kvmppc_slb {
+ u64 esid;
+ u64 vsid;
+ u64 orige;
+ u64 origv;
+ bool valid : 1;
+ bool Ks : 1;
+ bool Kp : 1;
+ bool nx : 1;
+ bool large : 1; /* PTEs are 16MB */
+ bool tb : 1; /* 1TB segment */
+ bool class : 1;
+ u8 base_page_size; /* MMU_PAGE_xxx */
+};
+
+/* Struct used to accumulate timing information in HV real mode code */
+struct kvmhv_tb_accumulator {
+ u64 seqcount; /* used to synchronize access, also count * 2 */
+ u64 tb_total; /* total time in timebase ticks */
+ u64 tb_min; /* min time */
+ u64 tb_max; /* max time */
+};
+
+#ifdef CONFIG_PPC_BOOK3S_64
+struct kvmppc_irq_map {
+ u32 r_hwirq;
+ u32 v_hwirq;
+ struct irq_desc *desc;
+};
+
+#define KVMPPC_PIRQ_MAPPED 1024
+struct kvmppc_passthru_irqmap {
+ int n_mapped;
+ struct kvmppc_irq_map mapped[KVMPPC_PIRQ_MAPPED];
+};
+#endif
+
+# ifdef CONFIG_PPC_E500
+#define KVMPPC_BOOKE_IAC_NUM 2
+#define KVMPPC_BOOKE_DAC_NUM 2
+# else
+#define KVMPPC_BOOKE_IAC_NUM 4
+#define KVMPPC_BOOKE_DAC_NUM 2
+# endif
+#define KVMPPC_BOOKE_MAX_IAC 4
+#define KVMPPC_BOOKE_MAX_DAC 2
+
+/* KVMPPC_EPR_USER takes precedence over KVMPPC_EPR_KERNEL */
+#define KVMPPC_EPR_NONE 0 /* EPR not supported */
+#define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */
+#define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */
+
+#define KVMPPC_IRQ_DEFAULT 0
+#define KVMPPC_IRQ_MPIC 1
+#define KVMPPC_IRQ_XICS 2 /* Includes a XIVE option */
+#define KVMPPC_IRQ_XIVE 3 /* XIVE native exploitation mode */
+
+#define MMIO_HPTE_CACHE_SIZE 4
+
+struct mmio_hpte_cache_entry {
+ unsigned long hpte_v;
+ unsigned long hpte_r;
+ unsigned long rpte;
+ unsigned long pte_index;
+ unsigned long eaddr;
+ unsigned long slb_v;
+ long mmio_update;
+ unsigned int slb_base_pshift;
+};
+
+struct mmio_hpte_cache {
+ struct mmio_hpte_cache_entry entry[MMIO_HPTE_CACHE_SIZE];
+ unsigned int index;
+};
+
+#define KVMPPC_VSX_COPY_NONE 0
+#define KVMPPC_VSX_COPY_WORD 1
+#define KVMPPC_VSX_COPY_DWORD 2
+#define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3
+#define KVMPPC_VSX_COPY_WORD_LOAD_DUMP 4
+
+#define KVMPPC_VMX_COPY_BYTE 8
+#define KVMPPC_VMX_COPY_HWORD 9
+#define KVMPPC_VMX_COPY_WORD 10
+#define KVMPPC_VMX_COPY_DWORD 11
+
+struct openpic;
+
+/* W0 and W1 of a XIVE thread management context */
+union xive_tma_w01 {
+ struct {
+ u8 nsr;
+ u8 cppr;
+ u8 ipb;
+ u8 lsmfb;
+ u8 ack;
+ u8 inc;
+ u8 age;
+ u8 pipr;
+ };
+ __be64 w01;
+};
+
+struct kvm_vcpu_arch {
+ ulong host_stack;
+ u32 host_pid;
+#ifdef CONFIG_PPC_BOOK3S
+ struct kvmppc_slb slb[64];
+ int slb_max; /* 1 + index of last valid entry in slb[] */
+ int slb_nr; /* total number of entries in SLB */
+ struct kvmppc_mmu mmu;
+ struct kvmppc_vcpu_book3s *book3s;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_32
+ struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
+#endif
+
+ /*
+ * This is passed along to the HV via H_ENTER_NESTED. Align to
+ * prevent it crossing a real 4K page.
+ */
+ struct pt_regs regs __aligned(512);
+
+ struct thread_fp_state fp;
+
+#ifdef CONFIG_SPE
+ ulong evr[32];
+ ulong spefscr;
+ ulong host_spefscr;
+ u64 acc;
+#endif
+#ifdef CONFIG_ALTIVEC
+ struct thread_vr_state vr;
+#endif
+
+#ifdef CONFIG_KVM_BOOKE_HV
+ u32 host_mas4;
+ u32 host_mas6;
+ u32 shadow_epcr;
+ u32 shadow_msrp;
+ u32 eplc;
+ u32 epsc;
+ u32 oldpir;
+#endif
+
+#if defined(CONFIG_BOOKE)
+#if defined(CONFIG_KVM_BOOKE_HV) || defined(CONFIG_64BIT)
+ u32 epcr;
+#endif
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S
+ /* For Gekko paired singles */
+ u32 qpr[32];
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S
+ ulong tar;
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S
+ ulong hflags;
+ ulong guest_owned_ext;
+ ulong purr;
+ ulong spurr;
+ ulong ic;
+ ulong dscr;
+ ulong amr;
+ ulong uamor;
+ ulong iamr;
+ u32 ctrl;
+ u32 dabrx;
+ ulong dabr;
+ ulong dawr0;
+ ulong dawrx0;
+ ulong dawr1;
+ ulong dawrx1;
+ ulong ciabr;
+ ulong cfar;
+ ulong ppr;
+ u32 pspb;
+ u8 load_ebb;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u8 load_tm;
+#endif
+ ulong fscr;
+ ulong shadow_fscr;
+ ulong ebbhr;
+ ulong ebbrr;
+ ulong bescr;
+ ulong csigr;
+ ulong tacr;
+ ulong tcscr;
+ ulong acop;
+ ulong wort;
+ ulong tid;
+ ulong psscr;
+ ulong hfscr;
+ ulong shadow_srr1;
+#endif
+ u32 vrsave; /* also USPRG0 */
+ u32 mmucr;
+ /* shadow_msr is unused for BookE HV */
+ ulong shadow_msr;
+ ulong csrr0;
+ ulong csrr1;
+ ulong dsrr0;
+ ulong dsrr1;
+ ulong mcsrr0;
+ ulong mcsrr1;
+ ulong mcsr;
+ ulong dec;
+#ifdef CONFIG_BOOKE
+ u32 decar;
+#endif
+ /* Time base value when we entered the guest */
+ u64 entry_tb;
+ u64 entry_vtb;
+ u64 entry_ic;
+ u32 tcr;
+ ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
+ u32 ivor[64];
+ ulong ivpr;
+ u32 pvr;
+
+ u32 shadow_pid;
+ u32 shadow_pid1;
+ u32 pid;
+ u32 swap_pid;
+
+ u32 ccr0;
+ u32 ccr1;
+ u32 dbsr;
+
+ u64 mmcr[4]; /* MMCR0, MMCR1, MMCR2, MMCR3 */
+ u64 mmcra;
+ u64 mmcrs;
+ u32 pmc[8];
+ u32 spmc[2];
+ u64 siar;
+ u64 sdar;
+ u64 sier[3];
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u64 tfhar;
+ u64 texasr;
+ u64 tfiar;
+ u64 orig_texasr;
+
+ u32 cr_tm;
+ u64 xer_tm;
+ u64 lr_tm;
+ u64 ctr_tm;
+ u64 amr_tm;
+ u64 ppr_tm;
+ u64 dscr_tm;
+ u64 tar_tm;
+
+ ulong gpr_tm[32];
+
+ struct thread_fp_state fp_tm;
+
+ struct thread_vr_state vr_tm;
+ u32 vrsave_tm; /* also USPRG0 */
+#endif
+
+#ifdef CONFIG_KVM_EXIT_TIMING
+ struct mutex exit_timing_lock;
+ struct kvmppc_exit_timing timing_exit;
+ struct kvmppc_exit_timing timing_last_enter;
+ u32 last_exit_type;
+ u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_last_exit;
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S
+ ulong fault_dar;
+ u32 fault_dsisr;
+ unsigned long intr_msr;
+ /*
+ * POWER9 and later: fault_gpa contains the guest real address of page
+ * fault for a radix guest, or segment descriptor (equivalent to result
+ * from slbmfev of SLB entry that translated the EA) for hash guests.
+ */
+ ulong fault_gpa;
+#endif
+
+#ifdef CONFIG_BOOKE
+ ulong fault_dear;
+ ulong fault_esr;
+ ulong queued_dear;
+ ulong queued_esr;
+ spinlock_t wdt_lock;
+ struct timer_list wdt_timer;
+ u32 tlbcfg[4];
+ u32 tlbps[4];
+ u32 mmucfg;
+ u32 eptcfg;
+ u32 epr;
+ u64 sprg9;
+ u32 pwrmgtcr0;
+ u32 crit_save;
+ /* guest debug registers*/
+ struct debug_reg dbg_reg;
+#endif
+ gpa_t paddr_accessed;
+ gva_t vaddr_accessed;
+ pgd_t *pgdir;
+
+ u16 io_gpr; /* GPR used as IO source/target */
+ u8 mmio_host_swabbed;
+ u8 mmio_sign_extend;
+ /* conversion between single and double precision */
+ u8 mmio_sp64_extend;
+ /*
+ * Number of simulations for vsx.
+ * If we use 2*8bytes to simulate 1*16bytes,
+ * then the number should be 2 and
+ * mmio_copy_type=KVMPPC_VSX_COPY_DWORD.
+ * If we use 4*4bytes to simulate 1*16bytes,
+ * the number should be 4 and
+ * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
+ */
+ u8 mmio_vsx_copy_nums;
+ u8 mmio_vsx_offset;
+ u8 mmio_vmx_copy_nums;
+ u8 mmio_vmx_offset;
+ u8 mmio_copy_type;
+ u8 osi_needed;
+ u8 osi_enabled;
+ u8 papr_enabled;
+ u8 watchdog_enabled;
+ u8 sane;
+ u8 cpu_type;
+ u8 hcall_needed;
+ u8 epr_flags; /* KVMPPC_EPR_xxx */
+ u8 epr_needed;
+ u8 external_oneshot; /* clear external irq after delivery */
+
+ u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
+
+ struct hrtimer dec_timer;
+ u64 dec_jiffies;
+ u64 dec_expires; /* Relative to guest timebase. */
+ unsigned long pending_exceptions;
+ u8 ceded;
+ u8 prodded;
+ u8 doorbell_request;
+ u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
+ u32 last_inst;
+
+ struct rcuwait wait;
+ struct rcuwait *waitp;
+ struct kvmppc_vcore *vcore;
+ int ret;
+ int trap;
+ int state;
+ int ptid;
+ int thread_cpu;
+ int prev_cpu;
+ bool timer_running;
+ wait_queue_head_t cpu_run;
+ struct machine_check_event mce_evt; /* Valid if trap == 0x200 */
+
+ struct kvm_vcpu_arch_shared *shared;
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
+ bool shared_big_endian;
+#endif
+ unsigned long magic_page_pa; /* phys addr to map the magic page to */
+ unsigned long magic_page_ea; /* effect. addr to map the magic page to */
+ bool disable_kernel_nx;
+
+ int irq_type; /* one of KVM_IRQ_* */
+ int irq_cpu_id;
+ struct openpic *mpic; /* KVM_IRQ_MPIC */
+#ifdef CONFIG_KVM_XICS
+ struct kvmppc_icp *icp; /* XICS presentation controller */
+ struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */
+ __be32 xive_cam_word; /* Cooked W2 in proper endian with valid bit */
+ u8 xive_pushed; /* Is the VP pushed on the physical CPU ? */
+ u8 xive_esc_on; /* Is the escalation irq enabled ? */
+ union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */
+ u64 xive_esc_raddr; /* Escalation interrupt ESB real addr */
+ u64 xive_esc_vaddr; /* Escalation interrupt ESB virt addr */
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ struct kvm_vcpu_arch_shared shregs;
+
+ struct mmio_hpte_cache mmio_cache;
+ unsigned long pgfault_addr;
+ long pgfault_index;
+ unsigned long pgfault_hpte[2];
+ struct mmio_hpte_cache_entry *pgfault_cache;
+
+ struct task_struct *run_task;
+
+ spinlock_t vpa_update_lock;
+ struct kvmppc_vpa vpa;
+ struct kvmppc_vpa dtl;
+ struct dtl_entry *dtl_ptr;
+ unsigned long dtl_index;
+ u64 stolen_logged;
+ struct kvmppc_vpa slb_shadow;
+
+ spinlock_t tbacct_lock;
+ u64 busy_stolen;
+ u64 busy_preempt;
+
+ u32 emul_inst;
+
+ u32 online;
+
+ u64 hfscr_permitted; /* A mask of permitted HFSCR facilities */
+
+ /* For support of nested guests */
+ struct kvm_nested_guest *nested;
+ u64 nested_hfscr; /* HFSCR that the L1 requested for the nested guest */
+ u32 nested_vcpu_id;
+ gpa_t nested_io_gpr;
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ struct kvmhv_tb_accumulator *cur_activity; /* What we're timing */
+ u64 cur_tb_start; /* when it started */
+#ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
+ struct kvmhv_tb_accumulator vcpu_entry;
+ struct kvmhv_tb_accumulator vcpu_exit;
+ struct kvmhv_tb_accumulator in_guest;
+ struct kvmhv_tb_accumulator hcall;
+ struct kvmhv_tb_accumulator pg_fault;
+ struct kvmhv_tb_accumulator guest_entry;
+ struct kvmhv_tb_accumulator guest_exit;
+#else
+ struct kvmhv_tb_accumulator rm_entry; /* real-mode entry code */
+ struct kvmhv_tb_accumulator rm_intr; /* real-mode intr handling */
+ struct kvmhv_tb_accumulator rm_exit; /* real-mode exit code */
+ struct kvmhv_tb_accumulator guest_time; /* guest execution */
+ struct kvmhv_tb_accumulator cede_time; /* time napping inside guest */
+#endif
+#endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
+};
+
+#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
+#define VCPU_VSX_FPR(vcpu, i, j) ((vcpu)->arch.fp.fpr[i][j])
+#define VCPU_VSX_VR(vcpu, i) ((vcpu)->arch.vr.vr[i])
+
+/* Values for vcpu->arch.state */
+#define KVMPPC_VCPU_NOTREADY 0
+#define KVMPPC_VCPU_RUNNABLE 1
+#define KVMPPC_VCPU_BUSY_IN_HOST 2
+
+/* Values for vcpu->arch.io_gpr */
+#define KVM_MMIO_REG_MASK 0x003f
+#define KVM_MMIO_REG_EXT_MASK 0xffc0
+#define KVM_MMIO_REG_GPR 0x0000
+#define KVM_MMIO_REG_FPR 0x0040
+#define KVM_MMIO_REG_QPR 0x0080
+#define KVM_MMIO_REG_FQPR 0x00c0
+#define KVM_MMIO_REG_VSX 0x0100
+#define KVM_MMIO_REG_VMX 0x0180
+#define KVM_MMIO_REG_NESTED_GPR 0xffc0
+
+
+#define __KVM_HAVE_ARCH_WQP
+#define __KVM_HAVE_CREATE_DEVICE
+
+static inline void kvm_arch_hardware_disable(void) {}
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
+static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arch_exit(void) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+
+#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
new file mode 100644
index 000000000..abe1b5e82
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+#ifndef __POWERPC_KVM_PARA_H__
+#define __POWERPC_KVM_PARA_H__
+
+#include <asm/kvm_guest.h>
+
+#include <uapi/asm/kvm_para.h>
+
+static inline int kvm_para_available(void)
+{
+ return IS_ENABLED(CONFIG_KVM_GUEST) && is_kvm_guest();
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ unsigned long r;
+
+ if (!kvm_para_available())
+ return 0;
+
+ if(epapr_hypercall0_1(KVM_HCALL_TOKEN(KVM_HC_FEATURES), &r))
+ return 0;
+
+ return r;
+}
+
+static inline unsigned int kvm_arch_para_hints(void)
+{
+ return 0;
+}
+
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
+#endif /* __POWERPC_KVM_PARA_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
new file mode 100644
index 000000000..bfacf1278
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -0,0 +1,1040 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_PPC_H__
+#define __POWERPC_KVM_PPC_H__
+
+/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
+ * dependencies. */
+
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <linux/kvm_host.h>
+#include <linux/bug.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/kvm_book3s.h>
+#else
+#include <asm/kvm_booke.h>
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/paca.h>
+#include <asm/xive.h>
+#include <asm/cpu_has_feature.h>
+#endif
+
+/*
+ * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
+ * for supporting software breakpoint.
+ */
+#define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
+
+enum emulation_result {
+ EMULATE_DONE, /* no further processing */
+ EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
+ EMULATE_FAIL, /* can't emulate this instruction */
+ EMULATE_AGAIN, /* something went wrong. go again */
+ EMULATE_EXIT_USER, /* emulation requires exit to user-space */
+};
+
+enum instruction_fetch_type {
+ INST_GENERIC,
+ INST_SC, /* system call */
+};
+
+enum xlate_instdata {
+ XLATE_INST, /* translate instruction address */
+ XLATE_DATA /* translate data address */
+};
+
+enum xlate_readwrite {
+ XLATE_READ, /* check for read permissions */
+ XLATE_WRITE /* check for write permissions */
+};
+
+extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
+extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
+extern void kvmppc_handler_highmem(void);
+
+extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
+extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes,
+ int is_default_endian);
+extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes,
+ int is_default_endian);
+extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes,
+ int is_default_endian, int mmio_sign_extend);
+extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes, int is_default_endian);
+extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
+ unsigned int rs, unsigned int bytes, int is_default_endian);
+extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
+ u64 val, unsigned int bytes,
+ int is_default_endian);
+extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
+ int rs, unsigned int bytes,
+ int is_default_endian);
+
+extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
+ enum instruction_fetch_type type, u32 *inst);
+
+extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
+ bool data);
+extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
+ bool data);
+extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
+extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
+extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
+extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
+extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
+extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
+
+/* Core-specific hooks */
+
+extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
+ unsigned int gtlb_idx);
+extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
+extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
+extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
+extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
+ gva_t eaddr);
+extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
+extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
+ enum xlate_instdata xlid, enum xlate_readwrite xlrw,
+ struct kvmppc_pte *pte);
+
+extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_check_processor_compat(void);
+extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr);
+
+extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
+
+extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
+extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
+extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq);
+extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
+ ulong esr_flags);
+extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
+ ulong dear_flags,
+ ulong esr_flags);
+extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
+ ulong esr_flags);
+extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
+
+extern int kvmppc_booke_init(void);
+extern void kvmppc_booke_exit(void);
+
+extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
+extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
+
+extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
+extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
+extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
+extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
+extern void kvmppc_rmap_reset(struct kvm *kvm);
+extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
+ struct kvm_memory_slot *memslot, unsigned long porder);
+extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
+extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ struct iommu_group *grp);
+extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
+ struct iommu_group *grp);
+extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
+extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
+extern void kvmppc_setup_partition_table(struct kvm *kvm);
+
+extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+ struct kvm_create_spapr_tce_64 *args);
+#define kvmppc_ioba_validate(stt, ioba, npages) \
+ (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
+ (stt)->size, (ioba), (npages)) ? \
+ H_PARAMETER : H_SUCCESS)
+extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba, unsigned long tce);
+extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ unsigned long liobn, unsigned long ioba,
+ unsigned long tce_list, unsigned long npages);
+extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
+ unsigned long liobn, unsigned long ioba,
+ unsigned long tce_value, unsigned long npages);
+extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba);
+extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
+extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
+extern int kvmppc_core_init_vm(struct kvm *kvm);
+extern void kvmppc_core_destroy_vm(struct kvm *kvm);
+extern void kvmppc_core_free_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot);
+extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change);
+extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change);
+extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
+ struct kvm_ppc_smmu_info *info);
+extern void kvmppc_core_flush_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
+
+extern int kvmppc_bookehv_init(void);
+extern void kvmppc_bookehv_exit(void);
+
+extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
+
+extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
+extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
+ struct kvm_ppc_resize_hpt *rhpt);
+extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
+ struct kvm_ppc_resize_hpt *rhpt);
+
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
+
+extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
+extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
+extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
+
+extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority);
+extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority);
+extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
+extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
+
+void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
+void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
+
+union kvmppc_one_reg {
+ u32 wval;
+ u64 dval;
+ vector128 vval;
+ u64 vsxval[2];
+ u32 vsx32val[4];
+ u16 vsx16val[8];
+ u8 vsx8val[16];
+ struct {
+ u64 addr;
+ u64 length;
+ } vpaval;
+ u64 xive_timaval[2];
+};
+
+struct kvmppc_ops {
+ struct module *owner;
+ int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+ int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+ int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val);
+ int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val);
+ void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
+ void (*vcpu_put)(struct kvm_vcpu *vcpu);
+ void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
+ void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
+ int (*vcpu_run)(struct kvm_vcpu *vcpu);
+ int (*vcpu_create)(struct kvm_vcpu *vcpu);
+ void (*vcpu_free)(struct kvm_vcpu *vcpu);
+ int (*check_requests)(struct kvm_vcpu *vcpu);
+ int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
+ void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
+ int (*prepare_memory_region)(struct kvm *kvm,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change);
+ void (*commit_memory_region)(struct kvm *kvm,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change);
+ bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
+ bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
+ bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
+ bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
+ void (*free_memslot)(struct kvm_memory_slot *slot);
+ int (*init_vm)(struct kvm *kvm);
+ void (*destroy_vm)(struct kvm *kvm);
+ int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
+ int (*emulate_op)(struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance);
+ int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
+ int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
+ void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
+ long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
+ unsigned long arg);
+ int (*hcall_implemented)(unsigned long hcall);
+ int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
+ struct irq_bypass_producer *);
+ void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
+ struct irq_bypass_producer *);
+ int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
+ int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
+ int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
+ unsigned long flags);
+ void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
+ int (*enable_nested)(struct kvm *kvm);
+ int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
+ int size);
+ int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
+ int size);
+ int (*enable_svm)(struct kvm *kvm);
+ int (*svm_off)(struct kvm *kvm);
+ int (*enable_dawr1)(struct kvm *kvm);
+ bool (*hash_v3_possible)(void);
+ int (*create_vm_debugfs)(struct kvm *kvm);
+ int (*create_vcpu_debugfs)(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
+};
+
+extern struct kvmppc_ops *kvmppc_hv_ops;
+extern struct kvmppc_ops *kvmppc_pr_ops;
+
+static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
+ enum instruction_fetch_type type, u32 *inst)
+{
+ int ret = EMULATE_DONE;
+ u32 fetched_inst;
+
+ /* Load the instruction manually if it failed to do so in the
+ * exit path */
+ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
+ ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
+
+ /* Write fetch_failed unswapped if the fetch failed */
+ if (ret == EMULATE_DONE)
+ fetched_inst = kvmppc_need_byteswap(vcpu) ?
+ swab32(vcpu->arch.last_inst) :
+ vcpu->arch.last_inst;
+ else
+ fetched_inst = vcpu->arch.last_inst;
+
+ *inst = fetched_inst;
+ return ret;
+}
+
+static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
+{
+ return kvm->arch.kvm_ops == kvmppc_hv_ops;
+}
+
+extern int kvmppc_hwrng_present(void);
+
+/*
+ * Cuts out inst bits with ordering according to spec.
+ * That means the leftmost bit is zero. All given bits are included.
+ */
+static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
+{
+ u32 r;
+ u32 mask;
+
+ BUG_ON(msb > lsb);
+
+ mask = (1 << (lsb - msb + 1)) - 1;
+ r = (inst >> (63 - lsb)) & mask;
+
+ return r;
+}
+
+/*
+ * Replaces inst bits with ordering according to spec.
+ */
+static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
+{
+ u32 r;
+ u32 mask;
+
+ BUG_ON(msb > lsb);
+
+ mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
+ r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
+
+ return r;
+}
+
+#define one_reg_size(id) \
+ (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+
+#define get_reg_val(id, reg) ({ \
+ union kvmppc_one_reg __u; \
+ switch (one_reg_size(id)) { \
+ case 4: __u.wval = (reg); break; \
+ case 8: __u.dval = (reg); break; \
+ default: BUG(); \
+ } \
+ __u; \
+})
+
+
+#define set_reg_val(id, val) ({ \
+ u64 __v; \
+ switch (one_reg_size(id)) { \
+ case 4: __v = (val).wval; break; \
+ case 8: __v = (val).dval; break; \
+ default: BUG(); \
+ } \
+ __v; \
+})
+
+int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+
+int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+
+int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
+int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
+
+void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
+
+struct openpic;
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+extern void kvm_cma_reserve(void) __init;
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+{
+ paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
+}
+
+static inline void kvmppc_set_xive_tima(int cpu,
+ unsigned long phys_addr,
+ void __iomem *virt_addr)
+{
+ paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
+ paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
+}
+
+static inline u32 kvmppc_get_xics_latch(void)
+{
+ u32 xirr;
+
+ xirr = get_paca()->kvm_hstate.saved_xirr;
+ get_paca()->kvm_hstate.saved_xirr = 0;
+ return xirr;
+}
+
+/*
+ * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
+ * a CPU thread that's running/napping inside of a guest is by default regarded
+ * as a request to wake the CPU (if needed) and continue execution within the
+ * guest, potentially to process new state like externally-generated
+ * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
+ *
+ * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
+ * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
+ * target CPU's PACA. To avoid unnecessary exits to the host, this flag should
+ * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
+ * the receiving side prior to processing the IPI work.
+ *
+ * NOTE:
+ *
+ * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
+ * This is to guard against sequences such as the following:
+ *
+ * CPU
+ * X: smp_muxed_ipi_set_message():
+ * X: smp_mb()
+ * X: message[RESCHEDULE] = 1
+ * X: doorbell_global_ipi(42):
+ * X: kvmppc_set_host_ipi(42)
+ * X: ppc_msgsnd_sync()/smp_mb()
+ * X: ppc_msgsnd() -> 42
+ * 42: doorbell_exception(): // from CPU X
+ * 42: ppc_msgsync()
+ * 105: smp_muxed_ipi_set_message():
+ * 105: smb_mb()
+ * // STORE DEFERRED DUE TO RE-ORDERING
+ * --105: message[CALL_FUNCTION] = 1
+ * | 105: doorbell_global_ipi(42):
+ * | 105: kvmppc_set_host_ipi(42)
+ * | 42: kvmppc_clear_host_ipi(42)
+ * | 42: smp_ipi_demux_relaxed()
+ * | 42: // returns to executing guest
+ * | // RE-ORDERED STORE COMPLETES
+ * ->105: message[CALL_FUNCTION] = 1
+ * 105: ppc_msgsnd_sync()/smp_mb()
+ * 105: ppc_msgsnd() -> 42
+ * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
+ * 105: // hangs waiting on 42 to process messages/call_single_queue
+ *
+ * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
+ * to guard against sequences such as the following (as well as to create
+ * a read-side pairing with the barrier in kvmppc_set_host_ipi()):
+ *
+ * CPU
+ * X: smp_muxed_ipi_set_message():
+ * X: smp_mb()
+ * X: message[RESCHEDULE] = 1
+ * X: doorbell_global_ipi(42):
+ * X: kvmppc_set_host_ipi(42)
+ * X: ppc_msgsnd_sync()/smp_mb()
+ * X: ppc_msgsnd() -> 42
+ * 42: doorbell_exception(): // from CPU X
+ * 42: ppc_msgsync()
+ * // STORE DEFERRED DUE TO RE-ORDERING
+ * -- 42: kvmppc_clear_host_ipi(42)
+ * | 42: smp_ipi_demux_relaxed()
+ * | 105: smp_muxed_ipi_set_message():
+ * | 105: smb_mb()
+ * | 105: message[CALL_FUNCTION] = 1
+ * | 105: doorbell_global_ipi(42):
+ * | 105: kvmppc_set_host_ipi(42)
+ * | // RE-ORDERED STORE COMPLETES
+ * -> 42: kvmppc_clear_host_ipi(42)
+ * 42: // returns to executing guest
+ * 105: ppc_msgsnd_sync()/smp_mb()
+ * 105: ppc_msgsnd() -> 42
+ * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
+ * 105: // hangs waiting on 42 to process messages/call_single_queue
+ */
+static inline void kvmppc_set_host_ipi(int cpu)
+{
+ /*
+ * order stores of IPI messages vs. setting of host_ipi flag
+ *
+ * pairs with the barrier in kvmppc_clear_host_ipi()
+ */
+ smp_mb();
+ paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
+}
+
+static inline void kvmppc_clear_host_ipi(int cpu)
+{
+ paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
+ /*
+ * order clearing of host_ipi flag vs. processing of IPI messages
+ *
+ * pairs with the barrier in kvmppc_set_host_ipi()
+ */
+ smp_mb();
+}
+
+static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
+}
+
+extern void kvm_hv_vm_activated(void);
+extern void kvm_hv_vm_deactivated(void);
+extern bool kvm_hv_mode_active(void);
+
+extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu);
+
+#else
+static inline void __init kvm_cma_reserve(void)
+{}
+
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+{}
+
+static inline void kvmppc_set_xive_tima(int cpu,
+ unsigned long phys_addr,
+ void __iomem *virt_addr)
+{}
+
+static inline u32 kvmppc_get_xics_latch(void)
+{
+ return 0;
+}
+
+static inline void kvmppc_set_host_ipi(int cpu)
+{}
+
+static inline void kvmppc_clear_host_ipi(int cpu)
+{}
+
+static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_kick(vcpu);
+}
+
+static inline bool kvm_hv_mode_active(void) { return false; }
+
+#endif
+
+#ifdef CONFIG_PPC_PSERIES
+static inline bool kvmhv_on_pseries(void)
+{
+ return !cpu_has_feature(CPU_FTR_HVMODE);
+}
+#else
+static inline bool kvmhv_on_pseries(void)
+{
+ return false;
+}
+#endif
+
+#ifdef CONFIG_KVM_XICS
+static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
+}
+
+static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
+ struct kvm *kvm)
+{
+ if (kvm && kvm_irq_bypass)
+ return kvm->arch.pimap;
+ return NULL;
+}
+
+extern void kvmppc_alloc_host_rm_ops(void);
+extern void kvmppc_free_host_rm_ops(void);
+extern void kvmppc_free_pimap(struct kvm *kvm);
+extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
+extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
+extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
+extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
+extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu);
+extern void kvmppc_xics_ipi_action(void);
+extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+ unsigned long host_irq);
+extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+ unsigned long host_irq);
+extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
+ struct kvmppc_irq_map *irq_map,
+ struct kvmppc_passthru_irqmap *pimap,
+ bool *again);
+
+extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+ int level, bool line_status);
+
+extern int h_ipi_redirect;
+#else
+static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
+ struct kvm *kvm)
+ { return NULL; }
+static inline void kvmppc_alloc_host_rm_ops(void) {}
+static inline void kvmppc_free_host_rm_ops(void) {}
+static inline void kvmppc_free_pimap(struct kvm *kvm) {}
+static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
+ { return 0; }
+static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
+ { return 0; }
+static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
+static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
+ { return 0; }
+static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
+ { return 0; }
+#endif
+
+#ifdef CONFIG_KVM_XIVE
+/*
+ * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
+ * ie. P9 new interrupt controller, while the second "xive" is the legacy
+ * "eXternal Interrupt Vector Entry" which is the configuration of an
+ * interrupt on the "xics" interrupt controller on P8 and earlier. Those
+ * two function consume or produce a legacy "XIVE" state from the
+ * new "XIVE" interrupt controller.
+ */
+extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority);
+extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority);
+extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
+extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
+
+extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu);
+extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
+extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+ unsigned long host_irq);
+extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+ unsigned long host_irq);
+extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
+
+extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+ int level, bool line_status);
+extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
+extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
+
+static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
+}
+
+extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu);
+extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
+extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
+ union kvmppc_one_reg *val);
+extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
+ union kvmppc_one_reg *val);
+extern bool kvmppc_xive_native_supported(void);
+
+#else
+static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority) { return -1; }
+static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority) { return -1; }
+static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
+static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
+
+static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
+static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
+static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc) { return -ENODEV; }
+static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc) { return -ENODEV; }
+static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
+static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
+
+static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+ int level, bool line_status) { return -ENODEV; }
+static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
+static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
+static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; }
+
+static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
+ { return 0; }
+static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
+static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
+static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
+ union kvmppc_one_reg *val)
+{ return 0; }
+static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
+ union kvmppc_one_reg *val)
+{ return -ENOENT; }
+
+#endif /* CONFIG_KVM_XIVE */
+
+#if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
+static inline bool xics_on_xive(void)
+{
+ return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
+}
+#else
+static inline bool xics_on_xive(void)
+{
+ return false;
+}
+#endif
+
+/*
+ * Prototypes for functions called only from assembler code.
+ * Having prototypes reduces sparse errors.
+ */
+long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba, unsigned long tce);
+long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ unsigned long liobn, unsigned long ioba,
+ unsigned long tce_list, unsigned long npages);
+long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
+ unsigned long liobn, unsigned long ioba,
+ unsigned long tce_value, unsigned long npages);
+long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
+ unsigned int yield_count);
+long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
+void kvmhv_commence_exit(int trap);
+void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
+void kvmppc_subcore_enter_guest(void);
+void kvmppc_subcore_exit_guest(void);
+long kvmppc_realmode_hmi_handler(void);
+long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu);
+long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel);
+long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn);
+long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
+long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn);
+long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index);
+long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index);
+long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index);
+long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long dest, unsigned long src);
+long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
+ unsigned long slb_v, unsigned int status, bool data);
+void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
+
+/*
+ * Host-side operations we want to set up while running in real
+ * mode in the guest operating on the xics.
+ * Currently only VCPU wakeup is supported.
+ */
+
+union kvmppc_rm_state {
+ unsigned long raw;
+ struct {
+ u32 in_host;
+ u32 rm_action;
+ };
+};
+
+struct kvmppc_host_rm_core {
+ union kvmppc_rm_state rm_state;
+ void *rm_data;
+ char pad[112];
+};
+
+struct kvmppc_host_rm_ops {
+ struct kvmppc_host_rm_core *rm_core;
+ void (*vcpu_kick)(struct kvm_vcpu *vcpu);
+};
+
+extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
+
+static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ return mfspr(SPRN_GEPR);
+#elif defined(CONFIG_BOOKE)
+ return vcpu->arch.epr;
+#else
+ return 0;
+#endif
+}
+
+static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GEPR, epr);
+#elif defined(CONFIG_BOOKE)
+ vcpu->arch.epr = epr;
+#endif
+}
+
+#ifdef CONFIG_KVM_MPIC
+
+void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
+int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
+ u32 cpu);
+void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
+
+#else
+
+static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu)
+{
+ return -EINVAL;
+}
+
+static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
+ struct kvm_vcpu *vcpu)
+{
+}
+
+#endif /* CONFIG_KVM_MPIC */
+
+int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
+ struct kvm_config_tlb *cfg);
+int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
+ struct kvm_dirty_tlb *cfg);
+
+long kvmppc_alloc_lpid(void);
+void kvmppc_free_lpid(long lpid);
+void kvmppc_init_lpid(unsigned long nr_lpids);
+
+static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
+{
+ struct page *page;
+ /*
+ * We can only access pages that the kernel maps
+ * as memory. Bail out for unmapped ones.
+ */
+ if (!pfn_valid(pfn))
+ return;
+
+ /* Clear i-cache for new pages */
+ page = pfn_to_page(pfn);
+ if (!test_bit(PG_dcache_clean, &page->flags)) {
+ flush_dcache_icache_page(page);
+ set_bit(PG_dcache_clean, &page->flags);
+ }
+}
+
+/*
+ * Shared struct helpers. The shared struct can be little or big endian,
+ * depending on the guest endianness. So expose helpers to all of them.
+ */
+static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
+{
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
+ /* Only Book3S_64 PR supports bi-endian for now */
+ return vcpu->arch.shared_big_endian;
+#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
+ /* Book3s_64 HV on little endian is always little endian */
+ return false;
+#else
+ return true;
+#endif
+}
+
+#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
+static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+{ \
+ return mfspr(bookehv_spr); \
+} \
+
+#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
+{ \
+ mtspr(bookehv_spr, val); \
+} \
+
+#define SHARED_WRAPPER_GET(reg, size) \
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+{ \
+ if (kvmppc_shared_big_endian(vcpu)) \
+ return be##size##_to_cpu(vcpu->arch.shared->reg); \
+ else \
+ return le##size##_to_cpu(vcpu->arch.shared->reg); \
+} \
+
+#define SHARED_WRAPPER_SET(reg, size) \
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ if (kvmppc_shared_big_endian(vcpu)) \
+ vcpu->arch.shared->reg = cpu_to_be##size(val); \
+ else \
+ vcpu->arch.shared->reg = cpu_to_le##size(val); \
+} \
+
+#define SHARED_WRAPPER(reg, size) \
+ SHARED_WRAPPER_GET(reg, size) \
+ SHARED_WRAPPER_SET(reg, size) \
+
+#define SPRNG_WRAPPER(reg, bookehv_spr) \
+ SPRNG_WRAPPER_GET(reg, bookehv_spr) \
+ SPRNG_WRAPPER_SET(reg, bookehv_spr) \
+
+#ifdef CONFIG_KVM_BOOKE_HV
+
+#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
+ SPRNG_WRAPPER(reg, bookehv_spr) \
+
+#else
+
+#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
+ SHARED_WRAPPER(reg, size) \
+
+#endif
+
+SHARED_WRAPPER(critical, 64)
+SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
+SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
+SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
+SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
+SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
+SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
+SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
+SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
+SHARED_WRAPPER_GET(msr, 64)
+static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
+{
+ if (kvmppc_shared_big_endian(vcpu))
+ vcpu->arch.shared->msr = cpu_to_be64(val);
+ else
+ vcpu->arch.shared->msr = cpu_to_le64(val);
+}
+SHARED_WRAPPER(dsisr, 32)
+SHARED_WRAPPER(int_pending, 32)
+SHARED_WRAPPER(sprg4, 64)
+SHARED_WRAPPER(sprg5, 64)
+SHARED_WRAPPER(sprg6, 64)
+SHARED_WRAPPER(sprg7, 64)
+
+static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
+{
+ if (kvmppc_shared_big_endian(vcpu))
+ return be32_to_cpu(vcpu->arch.shared->sr[nr]);
+ else
+ return le32_to_cpu(vcpu->arch.shared->sr[nr]);
+}
+
+static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
+{
+ if (kvmppc_shared_big_endian(vcpu))
+ vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
+ else
+ vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
+}
+
+/*
+ * Please call after prepare_to_enter. This function puts the lazy ee and irq
+ * disabled tracking state back to normal mode, without actually enabling
+ * interrupts.
+ */
+static inline void kvmppc_fix_ee_before_entry(void)
+{
+ trace_hardirqs_on();
+
+#ifdef CONFIG_PPC64
+ /*
+ * To avoid races, the caller must have gone directly from having
+ * interrupts fully-enabled to hard-disabled.
+ */
+ WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
+
+ /* Only need to enable IRQs by hard enabling them after this */
+ local_paca->irq_happened = 0;
+ irq_soft_mask_set(IRQS_ENABLED);
+#endif
+}
+
+static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
+{
+ ulong ea;
+ ulong msr_64bit = 0;
+
+ ea = kvmppc_get_gpr(vcpu, rb);
+ if (ra)
+ ea += kvmppc_get_gpr(vcpu, ra);
+
+#if defined(CONFIG_PPC_BOOK3E_64)
+ msr_64bit = MSR_CM;
+#elif defined(CONFIG_PPC_BOOK3S_64)
+ msr_64bit = MSR_SF;
+#endif
+
+ if (!(kvmppc_get_msr(vcpu) & msr_64bit))
+ ea = (uint32_t)ea;
+
+ return ea;
+}
+
+extern void xics_wake_cpu(int cpu);
+
+#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/libata-portmap.h b/arch/powerpc/include/asm/libata-portmap.h
new file mode 100644
index 000000000..7c602da62
--- /dev/null
+++ b/arch/powerpc/include/asm/libata-portmap.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_LIBATA_PORTMAP_H
+#define __ASM_POWERPC_LIBATA_PORTMAP_H
+
+#define ATA_PRIMARY_IRQ(dev) pci_get_legacy_ide_irq(dev, 0)
+
+#define ATA_SECONDARY_IRQ(dev) pci_get_legacy_ide_irq(dev, 1)
+
+#endif
diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h
new file mode 100644
index 000000000..b71b9582e
--- /dev/null
+++ b/arch/powerpc/include/asm/linkage.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_LINKAGE_H
+#define _ASM_POWERPC_LINKAGE_H
+
+#include <asm/types.h>
+
+#ifdef CONFIG_PPC64_ELF_ABI_V1
+#define cond_syscall(x) \
+ asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \
+ "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n")
+#define SYSCALL_ALIAS(alias, name) \
+ asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \
+ "\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
+#endif
+
+#endif /* _ASM_POWERPC_LINKAGE_H */
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
new file mode 100644
index 000000000..d044a1fd4
--- /dev/null
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * livepatch.h - powerpc-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2015-2016, SUSE, IBM Corp.
+ */
+#ifndef _ASM_POWERPC_LIVEPATCH_H
+#define _ASM_POWERPC_LIVEPATCH_H
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+#ifdef CONFIG_LIVEPATCH_64
+static inline void klp_init_thread_info(struct task_struct *p)
+{
+ /* + 1 to account for STACK_END_MAGIC */
+ task_thread_info(p)->livepatch_sp = end_of_stack(p) + 1;
+}
+#else
+static inline void klp_init_thread_info(struct task_struct *p) { }
+#endif
+
+#endif /* _ASM_POWERPC_LIVEPATCH_H */
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
new file mode 100644
index 000000000..bc4bd19b7
--- /dev/null
+++ b/arch/powerpc/include/asm/local.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARCH_POWERPC_LOCAL_H
+#define _ARCH_POWERPC_LOCAL_H
+
+#ifdef CONFIG_PPC_BOOK3S_64
+
+#include <linux/percpu.h>
+#include <linux/atomic.h>
+#include <linux/irqflags.h>
+
+#include <asm/hw_irq.h>
+
+typedef struct
+{
+ long v;
+} local_t;
+
+#define LOCAL_INIT(i) { (i) }
+
+static __inline__ long local_read(const local_t *l)
+{
+ return READ_ONCE(l->v);
+}
+
+static __inline__ void local_set(local_t *l, long i)
+{
+ WRITE_ONCE(l->v, i);
+}
+
+#define LOCAL_OP(op, c_op) \
+static __inline__ void local_##op(long i, local_t *l) \
+{ \
+ unsigned long flags; \
+ \
+ powerpc_local_irq_pmu_save(flags); \
+ l->v c_op i; \
+ powerpc_local_irq_pmu_restore(flags); \
+}
+
+#define LOCAL_OP_RETURN(op, c_op) \
+static __inline__ long local_##op##_return(long a, local_t *l) \
+{ \
+ long t; \
+ unsigned long flags; \
+ \
+ powerpc_local_irq_pmu_save(flags); \
+ t = (l->v c_op a); \
+ powerpc_local_irq_pmu_restore(flags); \
+ \
+ return t; \
+}
+
+#define LOCAL_OPS(op, c_op) \
+ LOCAL_OP(op, c_op) \
+ LOCAL_OP_RETURN(op, c_op)
+
+LOCAL_OPS(add, +=)
+LOCAL_OPS(sub, -=)
+
+#define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
+#define local_inc_return(l) local_add_return(1LL, l)
+#define local_inc(l) local_inc_return(l)
+
+/*
+ * local_inc_and_test - increment and test
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_inc_and_test(l) (local_inc_return(l) == 0)
+
+#define local_dec_return(l) local_sub_return(1LL, l)
+#define local_dec(l) local_dec_return(l)
+#define local_sub_and_test(a, l) (local_sub_return((a), (l)) == 0)
+#define local_dec_and_test(l) (local_dec_return((l)) == 0)
+
+static __inline__ long local_cmpxchg(local_t *l, long o, long n)
+{
+ long t;
+ unsigned long flags;
+
+ powerpc_local_irq_pmu_save(flags);
+ t = l->v;
+ if (t == o)
+ l->v = n;
+ powerpc_local_irq_pmu_restore(flags);
+
+ return t;
+}
+
+static __inline__ long local_xchg(local_t *l, long n)
+{
+ long t;
+ unsigned long flags;
+
+ powerpc_local_irq_pmu_save(flags);
+ t = l->v;
+ l->v = n;
+ powerpc_local_irq_pmu_restore(flags);
+
+ return t;
+}
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+static __inline__ int local_add_unless(local_t *l, long a, long u)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ powerpc_local_irq_pmu_save(flags);
+ if (l->v != u) {
+ l->v += a;
+ ret = 1;
+ }
+ powerpc_local_irq_pmu_restore(flags);
+
+ return ret;
+}
+
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations. Note they take
+ * a variable, not an address.
+ */
+
+#define __local_inc(l) ((l)->v++)
+#define __local_dec(l) ((l)->v++)
+#define __local_add(i,l) ((l)->v+=(i))
+#define __local_sub(i,l) ((l)->v-=(i))
+
+#else /* CONFIG_PPC64 */
+
+#include <asm-generic/local.h>
+
+#endif /* CONFIG_PPC64 */
+
+#endif /* _ARCH_POWERPC_LOCAL_H */
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
new file mode 100644
index 000000000..ee1488d38
--- /dev/null
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * lppaca.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ */
+#ifndef _ASM_POWERPC_LPPACA_H
+#define _ASM_POWERPC_LPPACA_H
+
+/*
+ * The below VPHN macros are outside the __KERNEL__ check since these are
+ * used for compiling the vphn selftest in userspace
+ */
+
+/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers. */
+#define VPHN_REGISTER_COUNT 6
+
+/*
+ * 6 64-bit registers unpacked into up to 24 be32 associativity values. To
+ * form the complete property we have to add the length in the first cell.
+ */
+#define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1)
+
+/*
+ * The H_HOME_NODE_ASSOCIATIVITY hcall takes two values for flags:
+ * 1 for retrieving associativity information for a guest cpu
+ * 2 for retrieving associativity information for a host/hypervisor cpu
+ */
+#define VPHN_FLAG_VCPU 1
+#define VPHN_FLAG_PCPU 2
+
+#ifdef __KERNEL__
+
+/*
+ * These definitions relate to hypervisors that only exist when using
+ * a server type processor
+ */
+#ifdef CONFIG_PPC_BOOK3S
+
+/*
+ * This control block contains the data that is shared between the
+ * hypervisor and the OS.
+ */
+#include <linux/cache.h>
+#include <linux/threads.h>
+#include <asm/types.h>
+#include <asm/mmu.h>
+#include <asm/firmware.h>
+#include <asm/paca.h>
+
+/*
+ * The lppaca is the "virtual processor area" registered with the hypervisor,
+ * H_REGISTER_VPA etc.
+ *
+ * According to PAPR, the structure is 640 bytes long, must be L1 cache line
+ * aligned, and must not cross a 4kB boundary. Its size field must be at
+ * least 640 bytes (but may be more).
+ *
+ * Pre-v4.14 KVM hypervisors reject the VPA if its size field is smaller than
+ * 1kB, so we dynamically allocate 1kB and advertise size as 1kB, but keep
+ * this structure as the canonical 640 byte size.
+ */
+struct lppaca {
+ /* cacheline 1 contains read-only data */
+
+ __be32 desc; /* Eye catcher 0xD397D781 */
+ __be16 size; /* Size of this struct */
+ u8 reserved1[3];
+ u8 __old_status; /* Old status, including shared proc */
+ u8 reserved3[14];
+ volatile __be32 dyn_hw_node_id; /* Dynamic hardware node id */
+ volatile __be32 dyn_hw_proc_id; /* Dynamic hardware proc id */
+ u8 reserved4[56];
+ volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */
+ /* associativity change counters */
+ u8 reserved5[32];
+
+ /* cacheline 2 contains local read-write data */
+
+ u8 reserved6[48];
+ u8 cede_latency_hint;
+ u8 ebb_regs_in_use;
+ u8 reserved7[6];
+ u8 dtl_enable_mask; /* Dispatch Trace Log mask */
+ u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */
+ u8 fpregs_in_use;
+ u8 pmcregs_in_use;
+ u8 reserved8[28];
+ __be64 wait_state_cycles; /* Wait cycles for this proc */
+ u8 reserved9[28];
+ __be16 slb_count; /* # of SLBs to maintain */
+ u8 idle; /* Indicate OS is idle */
+ u8 vmxregs_in_use;
+
+ /* cacheline 3 is shared with other processors */
+
+ /*
+ * This is the yield_count. An "odd" value (low bit on) means that
+ * the processor is yielded (either because of an OS yield or a
+ * hypervisor preempt). An even value implies that the processor is
+ * currently executing.
+ * NOTE: Even dedicated processor partitions can yield so this
+ * field cannot be used to determine if we are shared or dedicated.
+ */
+ volatile __be32 yield_count;
+ volatile __be32 dispersion_count; /* dispatch changed physical cpu */
+ volatile __be64 cmo_faults; /* CMO page fault count */
+ volatile __be64 cmo_fault_time; /* CMO page fault time */
+ u8 reserved10[64]; /* [S]PURR expropriated/donated */
+ volatile __be64 enqueue_dispatch_tb; /* Total TB enqueue->dispatch */
+ volatile __be64 ready_enqueue_tb; /* Total TB ready->enqueue */
+ volatile __be64 wait_ready_tb; /* Total TB wait->ready */
+ u8 reserved11[16];
+
+ /* cacheline 4-5 */
+
+ __be32 page_ins; /* CMO Hint - # page ins by OS */
+ u8 reserved12[148];
+ volatile __be64 dtl_idx; /* Dispatch Trace Log head index */
+ u8 reserved13[96];
+} ____cacheline_aligned;
+
+#define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr)
+
+/*
+ * We are using a non architected field to determine if a partition is
+ * shared or dedicated. This currently works on both KVM and PHYP, but
+ * we will have to transition to something better.
+ */
+#define LPPACA_OLD_SHARED_PROC 2
+
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * All CPUs should have the same shared proc value, so directly access the PACA
+ * to avoid false positives from DEBUG_PREEMPT.
+ */
+static inline bool lppaca_shared_proc(void)
+{
+ struct lppaca *l = local_paca->lppaca_ptr;
+
+ if (!firmware_has_feature(FW_FEATURE_SPLPAR))
+ return false;
+ return !!(l->__old_status & LPPACA_OLD_SHARED_PROC);
+}
+
+#define get_lppaca() (get_paca()->lppaca_ptr)
+#endif
+
+/*
+ * SLB shadow buffer structure as defined in the PAPR. The save_area
+ * contains adjacent ESID and VSID pairs for each shadowed SLB. The
+ * ESID is stored in the lower 64bits, then the VSID.
+ */
+struct slb_shadow {
+ __be32 persistent; /* Number of persistent SLBs */
+ __be32 buffer_length; /* Total shadow buffer length */
+ __be64 reserved;
+ struct {
+ __be64 esid;
+ __be64 vsid;
+ } save_area[SLB_NUM_BOLTED];
+} ____cacheline_aligned;
+
+extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
+
+#endif /* CONFIG_PPC_BOOK3S */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_LPPACA_H */
diff --git a/arch/powerpc/include/asm/lv1call.h b/arch/powerpc/include/asm/lv1call.h
new file mode 100644
index 000000000..b11501b30
--- /dev/null
+++ b/arch/powerpc/include/asm/lv1call.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PS3 hvcall interface.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ * Copyright 2003, 2004 (c) MontaVista Software, Inc.
+ */
+
+#if !defined(_ASM_POWERPC_LV1CALL_H)
+#define _ASM_POWERPC_LV1CALL_H
+
+#if !defined(__ASSEMBLY__)
+
+#include <linux/types.h>
+#include <linux/export.h>
+
+/* lv1 call declaration macros */
+
+#define LV1_1_IN_ARG_DECL u64 in_1
+#define LV1_2_IN_ARG_DECL LV1_1_IN_ARG_DECL, u64 in_2
+#define LV1_3_IN_ARG_DECL LV1_2_IN_ARG_DECL, u64 in_3
+#define LV1_4_IN_ARG_DECL LV1_3_IN_ARG_DECL, u64 in_4
+#define LV1_5_IN_ARG_DECL LV1_4_IN_ARG_DECL, u64 in_5
+#define LV1_6_IN_ARG_DECL LV1_5_IN_ARG_DECL, u64 in_6
+#define LV1_7_IN_ARG_DECL LV1_6_IN_ARG_DECL, u64 in_7
+#define LV1_8_IN_ARG_DECL LV1_7_IN_ARG_DECL, u64 in_8
+#define LV1_1_OUT_ARG_DECL u64 *out_1
+#define LV1_2_OUT_ARG_DECL LV1_1_OUT_ARG_DECL, u64 *out_2
+#define LV1_3_OUT_ARG_DECL LV1_2_OUT_ARG_DECL, u64 *out_3
+#define LV1_4_OUT_ARG_DECL LV1_3_OUT_ARG_DECL, u64 *out_4
+#define LV1_5_OUT_ARG_DECL LV1_4_OUT_ARG_DECL, u64 *out_5
+#define LV1_6_OUT_ARG_DECL LV1_5_OUT_ARG_DECL, u64 *out_6
+#define LV1_7_OUT_ARG_DECL LV1_6_OUT_ARG_DECL, u64 *out_7
+
+#define LV1_0_IN_0_OUT_ARG_DECL void
+#define LV1_1_IN_0_OUT_ARG_DECL LV1_1_IN_ARG_DECL
+#define LV1_2_IN_0_OUT_ARG_DECL LV1_2_IN_ARG_DECL
+#define LV1_3_IN_0_OUT_ARG_DECL LV1_3_IN_ARG_DECL
+#define LV1_4_IN_0_OUT_ARG_DECL LV1_4_IN_ARG_DECL
+#define LV1_5_IN_0_OUT_ARG_DECL LV1_5_IN_ARG_DECL
+#define LV1_6_IN_0_OUT_ARG_DECL LV1_6_IN_ARG_DECL
+#define LV1_7_IN_0_OUT_ARG_DECL LV1_7_IN_ARG_DECL
+
+#define LV1_0_IN_1_OUT_ARG_DECL LV1_1_OUT_ARG_DECL
+#define LV1_1_IN_1_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_2_IN_1_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_3_IN_1_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_4_IN_1_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_5_IN_1_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_6_IN_1_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_7_IN_1_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_8_IN_1_OUT_ARG_DECL LV1_8_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+
+#define LV1_0_IN_2_OUT_ARG_DECL LV1_2_OUT_ARG_DECL
+#define LV1_1_IN_2_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_2_IN_2_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_3_IN_2_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_4_IN_2_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_5_IN_2_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_6_IN_2_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_7_IN_2_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+
+#define LV1_0_IN_3_OUT_ARG_DECL LV1_3_OUT_ARG_DECL
+#define LV1_1_IN_3_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_2_IN_3_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_3_IN_3_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_4_IN_3_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_5_IN_3_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_6_IN_3_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_7_IN_3_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+
+#define LV1_0_IN_4_OUT_ARG_DECL LV1_4_OUT_ARG_DECL
+#define LV1_1_IN_4_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_2_IN_4_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_3_IN_4_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_4_IN_4_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_5_IN_4_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_6_IN_4_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_7_IN_4_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+
+#define LV1_0_IN_5_OUT_ARG_DECL LV1_5_OUT_ARG_DECL
+#define LV1_1_IN_5_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_2_IN_5_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_3_IN_5_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_4_IN_5_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_5_IN_5_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_6_IN_5_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_7_IN_5_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+
+#define LV1_0_IN_6_OUT_ARG_DECL LV1_6_OUT_ARG_DECL
+#define LV1_1_IN_6_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_2_IN_6_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_3_IN_6_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_4_IN_6_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_5_IN_6_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_6_IN_6_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_7_IN_6_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+
+#define LV1_0_IN_7_OUT_ARG_DECL LV1_7_OUT_ARG_DECL
+#define LV1_1_IN_7_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_2_IN_7_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_3_IN_7_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_4_IN_7_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_5_IN_7_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_6_IN_7_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_7_IN_7_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+
+#define LV1_1_IN_ARGS in_1
+#define LV1_2_IN_ARGS LV1_1_IN_ARGS, in_2
+#define LV1_3_IN_ARGS LV1_2_IN_ARGS, in_3
+#define LV1_4_IN_ARGS LV1_3_IN_ARGS, in_4
+#define LV1_5_IN_ARGS LV1_4_IN_ARGS, in_5
+#define LV1_6_IN_ARGS LV1_5_IN_ARGS, in_6
+#define LV1_7_IN_ARGS LV1_6_IN_ARGS, in_7
+#define LV1_8_IN_ARGS LV1_7_IN_ARGS, in_8
+
+#define LV1_1_OUT_ARGS out_1
+#define LV1_2_OUT_ARGS LV1_1_OUT_ARGS, out_2
+#define LV1_3_OUT_ARGS LV1_2_OUT_ARGS, out_3
+#define LV1_4_OUT_ARGS LV1_3_OUT_ARGS, out_4
+#define LV1_5_OUT_ARGS LV1_4_OUT_ARGS, out_5
+#define LV1_6_OUT_ARGS LV1_5_OUT_ARGS, out_6
+#define LV1_7_OUT_ARGS LV1_6_OUT_ARGS, out_7
+
+#define LV1_0_IN_0_OUT_ARGS
+#define LV1_1_IN_0_OUT_ARGS LV1_1_IN_ARGS
+#define LV1_2_IN_0_OUT_ARGS LV1_2_IN_ARGS
+#define LV1_3_IN_0_OUT_ARGS LV1_3_IN_ARGS
+#define LV1_4_IN_0_OUT_ARGS LV1_4_IN_ARGS
+#define LV1_5_IN_0_OUT_ARGS LV1_5_IN_ARGS
+#define LV1_6_IN_0_OUT_ARGS LV1_6_IN_ARGS
+#define LV1_7_IN_0_OUT_ARGS LV1_7_IN_ARGS
+
+#define LV1_0_IN_1_OUT_ARGS LV1_1_OUT_ARGS
+#define LV1_1_IN_1_OUT_ARGS LV1_1_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_2_IN_1_OUT_ARGS LV1_2_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_3_IN_1_OUT_ARGS LV1_3_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_4_IN_1_OUT_ARGS LV1_4_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_5_IN_1_OUT_ARGS LV1_5_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_6_IN_1_OUT_ARGS LV1_6_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_7_IN_1_OUT_ARGS LV1_7_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_8_IN_1_OUT_ARGS LV1_8_IN_ARGS, LV1_1_OUT_ARGS
+
+#define LV1_0_IN_2_OUT_ARGS LV1_2_OUT_ARGS
+#define LV1_1_IN_2_OUT_ARGS LV1_1_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_2_IN_2_OUT_ARGS LV1_2_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_3_IN_2_OUT_ARGS LV1_3_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_4_IN_2_OUT_ARGS LV1_4_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_5_IN_2_OUT_ARGS LV1_5_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_6_IN_2_OUT_ARGS LV1_6_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_7_IN_2_OUT_ARGS LV1_7_IN_ARGS, LV1_2_OUT_ARGS
+
+#define LV1_0_IN_3_OUT_ARGS LV1_3_OUT_ARGS
+#define LV1_1_IN_3_OUT_ARGS LV1_1_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_2_IN_3_OUT_ARGS LV1_2_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_3_IN_3_OUT_ARGS LV1_3_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_4_IN_3_OUT_ARGS LV1_4_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_5_IN_3_OUT_ARGS LV1_5_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_6_IN_3_OUT_ARGS LV1_6_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_7_IN_3_OUT_ARGS LV1_7_IN_ARGS, LV1_3_OUT_ARGS
+
+#define LV1_0_IN_4_OUT_ARGS LV1_4_OUT_ARGS
+#define LV1_1_IN_4_OUT_ARGS LV1_1_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_2_IN_4_OUT_ARGS LV1_2_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_3_IN_4_OUT_ARGS LV1_3_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_4_IN_4_OUT_ARGS LV1_4_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_5_IN_4_OUT_ARGS LV1_5_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_6_IN_4_OUT_ARGS LV1_6_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_7_IN_4_OUT_ARGS LV1_7_IN_ARGS, LV1_4_OUT_ARGS
+
+#define LV1_0_IN_5_OUT_ARGS LV1_5_OUT_ARGS
+#define LV1_1_IN_5_OUT_ARGS LV1_1_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_2_IN_5_OUT_ARGS LV1_2_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_3_IN_5_OUT_ARGS LV1_3_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_4_IN_5_OUT_ARGS LV1_4_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_5_IN_5_OUT_ARGS LV1_5_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_6_IN_5_OUT_ARGS LV1_6_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_7_IN_5_OUT_ARGS LV1_7_IN_ARGS, LV1_5_OUT_ARGS
+
+#define LV1_0_IN_6_OUT_ARGS LV1_6_OUT_ARGS
+#define LV1_1_IN_6_OUT_ARGS LV1_1_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_2_IN_6_OUT_ARGS LV1_2_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_3_IN_6_OUT_ARGS LV1_3_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_4_IN_6_OUT_ARGS LV1_4_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_5_IN_6_OUT_ARGS LV1_5_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_6_IN_6_OUT_ARGS LV1_6_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_7_IN_6_OUT_ARGS LV1_7_IN_ARGS, LV1_6_OUT_ARGS
+
+#define LV1_0_IN_7_OUT_ARGS LV1_7_OUT_ARGS
+#define LV1_1_IN_7_OUT_ARGS LV1_1_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_2_IN_7_OUT_ARGS LV1_2_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_3_IN_7_OUT_ARGS LV1_3_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_4_IN_7_OUT_ARGS LV1_4_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_5_IN_7_OUT_ARGS LV1_5_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_6_IN_7_OUT_ARGS LV1_6_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_7_IN_7_OUT_ARGS LV1_7_IN_ARGS, LV1_7_OUT_ARGS
+
+/*
+ * This LV1_CALL() macro is for use by callers. It expands into an
+ * inline call wrapper and an underscored HV call declaration. The
+ * wrapper can be used to instrument the lv1 call interface. The
+ * file lv1call.S defines its own LV1_CALL() macro to expand into
+ * the actual underscored call definition.
+ */
+
+#if !defined(LV1_CALL)
+#define LV1_CALL(name, in, out, num) \
+ extern s64 _lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL); \
+ static inline int lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL) \
+ {return _lv1_##name(LV1_##in##_IN_##out##_OUT_ARGS);}
+#endif
+
+#endif /* !defined(__ASSEMBLY__) */
+
+/* lv1 call table */
+
+LV1_CALL(allocate_memory, 4, 2, 0 )
+LV1_CALL(write_htab_entry, 4, 0, 1 )
+LV1_CALL(construct_virtual_address_space, 3, 2, 2 )
+LV1_CALL(invalidate_htab_entries, 5, 0, 3 )
+LV1_CALL(get_virtual_address_space_id_of_ppe, 0, 1, 4 )
+LV1_CALL(query_logical_partition_address_region_info, 1, 5, 6 )
+LV1_CALL(select_virtual_address_space, 1, 0, 7 )
+LV1_CALL(pause, 1, 0, 9 )
+LV1_CALL(destruct_virtual_address_space, 1, 0, 10 )
+LV1_CALL(configure_irq_state_bitmap, 3, 0, 11 )
+LV1_CALL(connect_irq_plug_ext, 5, 0, 12 )
+LV1_CALL(release_memory, 1, 0, 13 )
+LV1_CALL(put_iopte, 5, 0, 15 )
+LV1_CALL(disconnect_irq_plug_ext, 3, 0, 17 )
+LV1_CALL(construct_event_receive_port, 0, 1, 18 )
+LV1_CALL(destruct_event_receive_port, 1, 0, 19 )
+LV1_CALL(send_event_locally, 1, 0, 24 )
+LV1_CALL(end_of_interrupt, 1, 0, 27 )
+LV1_CALL(connect_irq_plug, 2, 0, 28 )
+LV1_CALL(disconnect_irq_plug, 1, 0, 29 )
+LV1_CALL(end_of_interrupt_ext, 3, 0, 30 )
+LV1_CALL(did_update_interrupt_mask, 2, 0, 31 )
+LV1_CALL(shutdown_logical_partition, 1, 0, 44 )
+LV1_CALL(destruct_logical_spe, 1, 0, 54 )
+LV1_CALL(construct_logical_spe, 7, 6, 57 )
+LV1_CALL(set_spe_interrupt_mask, 3, 0, 61 )
+LV1_CALL(set_spe_transition_notifier, 3, 0, 64 )
+LV1_CALL(disable_logical_spe, 2, 0, 65 )
+LV1_CALL(clear_spe_interrupt_status, 4, 0, 66 )
+LV1_CALL(get_spe_interrupt_status, 2, 1, 67 )
+LV1_CALL(get_logical_ppe_id, 0, 1, 69 )
+LV1_CALL(set_interrupt_mask, 5, 0, 73 )
+LV1_CALL(get_logical_partition_id, 0, 1, 74 )
+LV1_CALL(configure_execution_time_variable, 1, 0, 77 )
+LV1_CALL(get_spe_irq_outlet, 2, 1, 78 )
+LV1_CALL(set_spe_privilege_state_area_1_register, 3, 0, 79 )
+LV1_CALL(create_repository_node, 6, 0, 90 )
+LV1_CALL(read_repository_node, 5, 2, 91 )
+LV1_CALL(write_repository_node, 6, 0, 92 )
+LV1_CALL(delete_repository_node, 4, 0, 93 )
+LV1_CALL(read_htab_entries, 2, 5, 95 )
+LV1_CALL(set_dabr, 2, 0, 96 )
+LV1_CALL(get_total_execution_time, 2, 1, 103 )
+LV1_CALL(allocate_io_segment, 3, 1, 116 )
+LV1_CALL(release_io_segment, 2, 0, 117 )
+LV1_CALL(construct_io_irq_outlet, 1, 1, 120 )
+LV1_CALL(destruct_io_irq_outlet, 1, 0, 121 )
+LV1_CALL(map_htab, 1, 1, 122 )
+LV1_CALL(unmap_htab, 1, 0, 123 )
+LV1_CALL(get_version_info, 0, 2, 127 )
+LV1_CALL(insert_htab_entry, 6, 3, 158 )
+LV1_CALL(read_virtual_uart, 3, 1, 162 )
+LV1_CALL(write_virtual_uart, 3, 1, 163 )
+LV1_CALL(set_virtual_uart_param, 3, 0, 164 )
+LV1_CALL(get_virtual_uart_param, 2, 1, 165 )
+LV1_CALL(configure_virtual_uart_irq, 1, 1, 166 )
+LV1_CALL(open_device, 3, 0, 170 )
+LV1_CALL(close_device, 2, 0, 171 )
+LV1_CALL(map_device_mmio_region, 5, 1, 172 )
+LV1_CALL(unmap_device_mmio_region, 3, 0, 173 )
+LV1_CALL(allocate_device_dma_region, 5, 1, 174 )
+LV1_CALL(free_device_dma_region, 3, 0, 175 )
+LV1_CALL(map_device_dma_region, 6, 0, 176 )
+LV1_CALL(unmap_device_dma_region, 4, 0, 177 )
+LV1_CALL(net_add_multicast_address, 4, 0, 185 )
+LV1_CALL(net_remove_multicast_address, 4, 0, 186 )
+LV1_CALL(net_start_tx_dma, 4, 0, 187 )
+LV1_CALL(net_stop_tx_dma, 2, 0, 188 )
+LV1_CALL(net_start_rx_dma, 4, 0, 189 )
+LV1_CALL(net_stop_rx_dma, 2, 0, 190 )
+LV1_CALL(net_set_interrupt_status_indicator, 4, 0, 191 )
+LV1_CALL(net_set_interrupt_mask, 4, 0, 193 )
+LV1_CALL(net_control, 6, 2, 194 )
+LV1_CALL(connect_interrupt_event_receive_port, 4, 0, 197 )
+LV1_CALL(disconnect_interrupt_event_receive_port, 4, 0, 198 )
+LV1_CALL(get_spe_all_interrupt_statuses, 1, 1, 199 )
+LV1_CALL(deconfigure_virtual_uart_irq, 0, 0, 202 )
+LV1_CALL(enable_logical_spe, 2, 0, 207 )
+LV1_CALL(gpu_open, 1, 0, 210 )
+LV1_CALL(gpu_close, 0, 0, 211 )
+LV1_CALL(gpu_device_map, 1, 2, 212 )
+LV1_CALL(gpu_device_unmap, 1, 0, 213 )
+LV1_CALL(gpu_memory_allocate, 5, 2, 214 )
+LV1_CALL(gpu_memory_free, 1, 0, 216 )
+LV1_CALL(gpu_context_allocate, 2, 5, 217 )
+LV1_CALL(gpu_context_free, 1, 0, 218 )
+LV1_CALL(gpu_context_iomap, 5, 0, 221 )
+LV1_CALL(gpu_context_attribute, 6, 0, 225 )
+LV1_CALL(gpu_context_intr, 1, 1, 227 )
+LV1_CALL(gpu_attribute, 3, 0, 228 )
+LV1_CALL(get_rtc, 0, 2, 232 )
+LV1_CALL(set_ppe_periodic_tracer_frequency, 1, 0, 240 )
+LV1_CALL(start_ppe_periodic_tracer, 5, 0, 241 )
+LV1_CALL(stop_ppe_periodic_tracer, 1, 1, 242 )
+LV1_CALL(storage_read, 6, 1, 245 )
+LV1_CALL(storage_write, 6, 1, 246 )
+LV1_CALL(storage_send_device_command, 6, 1, 248 )
+LV1_CALL(storage_get_async_status, 1, 2, 249 )
+LV1_CALL(storage_check_async_status, 2, 1, 254 )
+LV1_CALL(panic, 1, 0, 255 )
+LV1_CALL(construct_lpm, 6, 3, 140 )
+LV1_CALL(destruct_lpm, 1, 0, 141 )
+LV1_CALL(start_lpm, 1, 0, 142 )
+LV1_CALL(stop_lpm, 1, 1, 143 )
+LV1_CALL(copy_lpm_trace_buffer, 3, 1, 144 )
+LV1_CALL(add_lpm_event_bookmark, 5, 0, 145 )
+LV1_CALL(delete_lpm_event_bookmark, 3, 0, 146 )
+LV1_CALL(set_lpm_interrupt_mask, 3, 1, 147 )
+LV1_CALL(get_lpm_interrupt_status, 1, 1, 148 )
+LV1_CALL(set_lpm_general_control, 5, 2, 149 )
+LV1_CALL(set_lpm_interval, 3, 1, 150 )
+LV1_CALL(set_lpm_trigger_control, 3, 1, 151 )
+LV1_CALL(set_lpm_counter_control, 4, 1, 152 )
+LV1_CALL(set_lpm_group_control, 3, 1, 153 )
+LV1_CALL(set_lpm_debug_bus_control, 3, 1, 154 )
+LV1_CALL(set_lpm_counter, 5, 2, 155 )
+LV1_CALL(set_lpm_signal, 7, 0, 156 )
+LV1_CALL(set_lpm_spr_trigger, 2, 0, 157 )
+
+#endif
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
new file mode 100644
index 000000000..378b8d583
--- /dev/null
+++ b/arch/powerpc/include/asm/machdep.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_MACHDEP_H
+#define _ASM_POWERPC_MACHDEP_H
+#ifdef __KERNEL__
+
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+
+struct pt_regs;
+struct pci_bus;
+struct device_node;
+struct iommu_table;
+struct rtc_time;
+struct file;
+struct pci_controller;
+struct kimage;
+struct pci_host_bridge;
+
+struct machdep_calls {
+ char *name;
+#ifdef CONFIG_PPC64
+#ifdef CONFIG_PM
+ void (*iommu_restore)(void);
+#endif
+#ifdef CONFIG_MEMORY_HOTPLUG
+ unsigned long (*memory_block_size)(void);
+#endif
+#endif /* CONFIG_PPC64 */
+
+ void (*dma_set_mask)(struct device *dev, u64 dma_mask);
+
+ int (*probe)(void);
+ void (*setup_arch)(void); /* Optional, may be NULL */
+ /* Optional, may be NULL. */
+ void (*show_cpuinfo)(struct seq_file *m);
+ /* Returns the current operating frequency of "cpu" in Hz */
+ unsigned long (*get_proc_freq)(unsigned int cpu);
+
+ void (*init_IRQ)(void);
+
+ /* Return an irq, or 0 to indicate there are none pending. */
+ unsigned int (*get_irq)(void);
+
+ /* PCI stuff */
+ /* Called after allocating resources */
+ void (*pcibios_fixup)(void);
+ void (*pci_irq_fixup)(struct pci_dev *dev);
+ int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
+ *bridge);
+
+ /* finds all the pci_controllers present at boot */
+ void (*discover_phbs)(void);
+
+ /* To setup PHBs when using automatic OF platform driver for PCI */
+ int (*pci_setup_phb)(struct pci_controller *host);
+
+ void __noreturn (*restart)(char *cmd);
+ void __noreturn (*halt)(void);
+ void (*panic)(char *str);
+
+ long (*time_init)(void); /* Optional, may be NULL */
+
+ int (*set_rtc_time)(struct rtc_time *);
+ void (*get_rtc_time)(struct rtc_time *);
+ time64_t (*get_boot_time)(void);
+
+ void (*calibrate_decr)(void);
+
+ void (*progress)(char *, unsigned short);
+
+ /* Interface for platform error logging */
+ void (*log_error)(char *buf, unsigned int err_type, int fatal);
+
+ unsigned char (*nvram_read_val)(int addr);
+ void (*nvram_write_val)(int addr, unsigned char val);
+ ssize_t (*nvram_write)(char *buf, size_t count, loff_t *index);
+ ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index);
+ ssize_t (*nvram_size)(void);
+ void (*nvram_sync)(void);
+
+ /* Exception handlers */
+ int (*system_reset_exception)(struct pt_regs *regs);
+ int (*machine_check_exception)(struct pt_regs *regs);
+ int (*handle_hmi_exception)(struct pt_regs *regs);
+
+ /* Early exception handlers called in realmode */
+ int (*hmi_exception_early)(struct pt_regs *regs);
+ long (*machine_check_early)(struct pt_regs *regs);
+
+ /* Called during machine check exception to retrive fixup address. */
+ bool (*mce_check_early_recovery)(struct pt_regs *regs);
+
+ void (*machine_check_log_err)(void);
+
+ /* Motherboard/chipset features. This is a kind of general purpose
+ * hook used to control some machine specific features (like reset
+ * lines, chip power control, etc...).
+ */
+ long (*feature_call)(unsigned int feature, ...);
+
+ /* Get legacy PCI/IDE interrupt mapping */
+ int (*pci_get_legacy_ide_irq)(struct pci_dev *dev, int channel);
+
+ /* Get access protection for /dev/mem */
+ pgprot_t (*phys_mem_access_prot)(struct file *file,
+ unsigned long pfn,
+ unsigned long size,
+ pgprot_t vma_prot);
+
+ /*
+ * Function for waiting for work with reduced power in idle loop;
+ * called with interrupts disabled.
+ */
+ void (*power_save)(void);
+
+ /* Function to enable performance monitor counters for this
+ platform, called once per cpu. */
+ void (*enable_pmcs)(void);
+
+ /* Set DABR for this platform, leave empty for default implementation */
+ int (*set_dabr)(unsigned long dabr,
+ unsigned long dabrx);
+
+ /* Set DAWR for this platform, leave empty for default implementation */
+ int (*set_dawr)(int nr, unsigned long dawr,
+ unsigned long dawrx);
+
+#ifdef CONFIG_PPC32 /* XXX for now */
+ /* A general init function, called by ppc_init in init/main.c.
+ May be NULL. */
+ void (*init)(void);
+
+ /*
+ * optional PCI "hooks"
+ */
+ /* Called at then very end of pcibios_init() */
+ void (*pcibios_after_init)(void);
+
+#endif /* CONFIG_PPC32 */
+
+ /* Called in indirect_* to avoid touching devices */
+ int (*pci_exclude_device)(struct pci_controller *, unsigned char, unsigned char);
+
+ /* Called after PPC generic resource fixup to perform
+ machine specific fixups */
+ void (*pcibios_fixup_resources)(struct pci_dev *);
+
+ /* Called for each PCI bus in the system when it's probed */
+ void (*pcibios_fixup_bus)(struct pci_bus *);
+
+ /* Called after scan and before resource survey */
+ void (*pcibios_fixup_phb)(struct pci_controller *hose);
+
+ /*
+ * Called after device has been added to bus and
+ * before sysfs has been created.
+ */
+ void (*pcibios_bus_add_device)(struct pci_dev *pdev);
+
+ resource_size_t (*pcibios_default_alignment)(void);
+
+#ifdef CONFIG_PCI_IOV
+ void (*pcibios_fixup_sriov)(struct pci_dev *pdev);
+ resource_size_t (*pcibios_iov_resource_alignment)(struct pci_dev *, int resno);
+ int (*pcibios_sriov_enable)(struct pci_dev *pdev, u16 num_vfs);
+ int (*pcibios_sriov_disable)(struct pci_dev *pdev);
+#endif /* CONFIG_PCI_IOV */
+
+ /* Called to shutdown machine specific hardware not already controlled
+ * by other drivers.
+ */
+ void (*machine_shutdown)(void);
+
+#ifdef CONFIG_KEXEC_CORE
+ void (*kexec_cpu_down)(int crash_shutdown, int secondary);
+
+ /* Called to perform the _real_ kexec.
+ * Do NOT allocate memory or fail here. We are past the point of
+ * no return.
+ */
+ void (*machine_kexec)(struct kimage *image);
+#endif /* CONFIG_KEXEC_CORE */
+
+#ifdef CONFIG_SUSPEND
+ /* These are called to disable and enable, respectively, IRQs when
+ * entering a suspend state. If NULL, then the generic versions
+ * will be called. The generic versions disable/enable the
+ * decrementer along with interrupts.
+ */
+ void (*suspend_disable_irqs)(void);
+ void (*suspend_enable_irqs)(void);
+#endif
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+ ssize_t (*cpu_probe)(const char *, size_t);
+ ssize_t (*cpu_release)(const char *, size_t);
+#endif
+
+ int (*get_random_seed)(unsigned long *v);
+};
+
+extern void e500_idle(void);
+extern void power4_idle(void);
+extern void ppc6xx_idle(void);
+
+/*
+ * ppc_md contains a copy of the machine description structure for the
+ * current platform. machine_id contains the initial address where the
+ * description was found during boot.
+ */
+extern struct machdep_calls ppc_md;
+extern struct machdep_calls *machine_id;
+
+#define __machine_desc __section(".machine.desc")
+
+#define define_machine(name) \
+ extern struct machdep_calls mach_##name; \
+ EXPORT_SYMBOL(mach_##name); \
+ struct machdep_calls mach_##name __machine_desc =
+
+#define machine_is(name) \
+ ({ \
+ extern struct machdep_calls mach_##name \
+ __attribute__((weak)); \
+ machine_id == &mach_##name; \
+ })
+
+static inline void log_error(char *buf, unsigned int err_type, int fatal)
+{
+ if (ppc_md.log_error)
+ ppc_md.log_error(buf, err_type, fatal);
+}
+
+#define __define_machine_initcall(mach, fn, id) \
+ static int __init __machine_initcall_##mach##_##fn(void) { \
+ if (machine_is(mach)) return fn(); \
+ return 0; \
+ } \
+ __define_initcall(__machine_initcall_##mach##_##fn, id);
+
+#define machine_early_initcall(mach, fn) __define_machine_initcall(mach, fn, early)
+#define machine_core_initcall(mach, fn) __define_machine_initcall(mach, fn, 1)
+#define machine_core_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 1s)
+#define machine_postcore_initcall(mach, fn) __define_machine_initcall(mach, fn, 2)
+#define machine_postcore_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 2s)
+#define machine_arch_initcall(mach, fn) __define_machine_initcall(mach, fn, 3)
+#define machine_arch_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 3s)
+#define machine_subsys_initcall(mach, fn) __define_machine_initcall(mach, fn, 4)
+#define machine_subsys_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 4s)
+#define machine_fs_initcall(mach, fn) __define_machine_initcall(mach, fn, 5)
+#define machine_fs_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 5s)
+#define machine_rootfs_initcall(mach, fn) __define_machine_initcall(mach, fn, rootfs)
+#define machine_device_initcall(mach, fn) __define_machine_initcall(mach, fn, 6)
+#define machine_device_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 6s)
+#define machine_late_initcall(mach, fn) __define_machine_initcall(mach, fn, 7)
+#define machine_late_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 7s)
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MACHDEP_H */
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h
new file mode 100644
index 000000000..ff5fd82d9
--- /dev/null
+++ b/arch/powerpc/include/asm/macio.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __MACIO_ASIC_H__
+#define __MACIO_ASIC_H__
+#ifdef __KERNEL__
+
+#include <linux/of_device.h>
+
+extern struct bus_type macio_bus_type;
+
+/* MacIO device driver is defined later */
+struct macio_driver;
+struct macio_chip;
+
+#define MACIO_DEV_COUNT_RESOURCES 8
+#define MACIO_DEV_COUNT_IRQS 8
+
+/*
+ * the macio_bus structure is used to describe a "virtual" bus
+ * within a MacIO ASIC. It's typically provided by a macio_pci_asic
+ * PCI device, but could be provided differently as well (nubus
+ * machines using a fake OF tree).
+ *
+ * The pdev field can be NULL on non-PCI machines
+ */
+struct macio_bus
+{
+ struct macio_chip *chip; /* macio_chip (private use) */
+ int index; /* macio chip index in system */
+#ifdef CONFIG_PCI
+ struct pci_dev *pdev; /* PCI device hosting this bus */
+#endif
+};
+
+/*
+ * the macio_dev structure is used to describe a device
+ * within an Apple MacIO ASIC.
+ */
+struct macio_dev
+{
+ struct macio_bus *bus; /* macio bus this device is on */
+ struct macio_dev *media_bay; /* Device is part of a media bay */
+ struct platform_device ofdev;
+ struct device_dma_parameters dma_parms; /* ide needs that */
+ int n_resources;
+ struct resource resource[MACIO_DEV_COUNT_RESOURCES];
+ int n_interrupts;
+ struct resource interrupt[MACIO_DEV_COUNT_IRQS];
+};
+#define to_macio_device(d) container_of(d, struct macio_dev, ofdev.dev)
+#define of_to_macio_device(d) container_of(d, struct macio_dev, ofdev)
+
+extern struct macio_dev *macio_dev_get(struct macio_dev *dev);
+extern void macio_dev_put(struct macio_dev *dev);
+
+/*
+ * Accessors to resources & interrupts and other device
+ * fields
+ */
+
+static inline int macio_resource_count(struct macio_dev *dev)
+{
+ return dev->n_resources;
+}
+
+static inline unsigned long macio_resource_start(struct macio_dev *dev, int resource_no)
+{
+ return dev->resource[resource_no].start;
+}
+
+static inline unsigned long macio_resource_end(struct macio_dev *dev, int resource_no)
+{
+ return dev->resource[resource_no].end;
+}
+
+static inline unsigned long macio_resource_len(struct macio_dev *dev, int resource_no)
+{
+ struct resource *res = &dev->resource[resource_no];
+ if (res->start == 0 || res->end == 0 || res->end < res->start)
+ return 0;
+ return resource_size(res);
+}
+
+extern int macio_enable_devres(struct macio_dev *dev);
+
+extern int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name);
+extern void macio_release_resource(struct macio_dev *dev, int resource_no);
+extern int macio_request_resources(struct macio_dev *dev, const char *name);
+extern void macio_release_resources(struct macio_dev *dev);
+
+static inline int macio_irq_count(struct macio_dev *dev)
+{
+ return dev->n_interrupts;
+}
+
+static inline int macio_irq(struct macio_dev *dev, int irq_no)
+{
+ return dev->interrupt[irq_no].start;
+}
+
+static inline void macio_set_drvdata(struct macio_dev *dev, void *data)
+{
+ dev_set_drvdata(&dev->ofdev.dev, data);
+}
+
+static inline void* macio_get_drvdata(struct macio_dev *dev)
+{
+ return dev_get_drvdata(&dev->ofdev.dev);
+}
+
+static inline struct device_node *macio_get_of_node(struct macio_dev *mdev)
+{
+ return mdev->ofdev.dev.of_node;
+}
+
+#ifdef CONFIG_PCI
+static inline struct pci_dev *macio_get_pci_dev(struct macio_dev *mdev)
+{
+ return mdev->bus->pdev;
+}
+#endif
+
+/*
+ * A driver for a mac-io chip based device
+ */
+struct macio_driver
+{
+ int (*probe)(struct macio_dev* dev, const struct of_device_id *match);
+ int (*remove)(struct macio_dev* dev);
+
+ int (*suspend)(struct macio_dev* dev, pm_message_t state);
+ int (*resume)(struct macio_dev* dev);
+ int (*shutdown)(struct macio_dev* dev);
+
+#ifdef CONFIG_PMAC_MEDIABAY
+ void (*mediabay_event)(struct macio_dev* dev, int mb_state);
+#endif
+ struct device_driver driver;
+};
+#define to_macio_driver(drv) container_of(drv,struct macio_driver, driver)
+
+extern int macio_register_driver(struct macio_driver *);
+extern void macio_unregister_driver(struct macio_driver *);
+
+#endif /* __KERNEL__ */
+#endif /* __MACIO_ASIC_H__ */
diff --git a/arch/powerpc/include/asm/mc146818rtc.h b/arch/powerpc/include/asm/mc146818rtc.h
new file mode 100644
index 000000000..d9e4ecd41
--- /dev/null
+++ b/arch/powerpc/include/asm/mc146818rtc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_MC146818RTC_H
+#define _ASM_POWERPC_MC146818RTC_H
+
+/*
+ * Machine dependent access functions for RTC registers.
+ */
+
+#ifdef __KERNEL__
+
+#include <asm/io.h>
+
+#ifndef RTC_PORT
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
+#endif
+
+/*
+ * The yet supported machines all access the RTC index register via
+ * an ISA port access but the way to access the date register differs ...
+ */
+#define CMOS_READ(addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+inb_p(RTC_PORT(1)); \
+})
+#define CMOS_WRITE(val, addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+outb_p((val),RTC_PORT(1)); \
+})
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MC146818RTC_H */
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
new file mode 100644
index 000000000..c9f0936bd
--- /dev/null
+++ b/arch/powerpc/include/asm/mce.h
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Machine check exception header file.
+ *
+ * Copyright 2013 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#ifndef __ASM_PPC64_MCE_H__
+#define __ASM_PPC64_MCE_H__
+
+#include <linux/bitops.h>
+
+enum MCE_Version {
+ MCE_V1 = 1,
+};
+
+enum MCE_Severity {
+ MCE_SEV_NO_ERROR = 0,
+ MCE_SEV_WARNING = 1,
+ MCE_SEV_SEVERE = 2,
+ MCE_SEV_FATAL = 3,
+};
+
+enum MCE_Disposition {
+ MCE_DISPOSITION_RECOVERED = 0,
+ MCE_DISPOSITION_NOT_RECOVERED = 1,
+};
+
+enum MCE_Initiator {
+ MCE_INITIATOR_UNKNOWN = 0,
+ MCE_INITIATOR_CPU = 1,
+ MCE_INITIATOR_PCI = 2,
+ MCE_INITIATOR_ISA = 3,
+ MCE_INITIATOR_MEMORY= 4,
+ MCE_INITIATOR_POWERMGM = 5,
+};
+
+enum MCE_ErrorType {
+ MCE_ERROR_TYPE_UNKNOWN = 0,
+ MCE_ERROR_TYPE_UE = 1,
+ MCE_ERROR_TYPE_SLB = 2,
+ MCE_ERROR_TYPE_ERAT = 3,
+ MCE_ERROR_TYPE_TLB = 4,
+ MCE_ERROR_TYPE_USER = 5,
+ MCE_ERROR_TYPE_RA = 6,
+ MCE_ERROR_TYPE_LINK = 7,
+ MCE_ERROR_TYPE_DCACHE = 8,
+ MCE_ERROR_TYPE_ICACHE = 9,
+};
+
+enum MCE_ErrorClass {
+ MCE_ECLASS_UNKNOWN = 0,
+ MCE_ECLASS_HARDWARE,
+ MCE_ECLASS_HARD_INDETERMINATE,
+ MCE_ECLASS_SOFTWARE,
+ MCE_ECLASS_SOFT_INDETERMINATE,
+};
+
+enum MCE_UeErrorType {
+ MCE_UE_ERROR_INDETERMINATE = 0,
+ MCE_UE_ERROR_IFETCH = 1,
+ MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
+ MCE_UE_ERROR_LOAD_STORE = 3,
+ MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4,
+};
+
+enum MCE_SlbErrorType {
+ MCE_SLB_ERROR_INDETERMINATE = 0,
+ MCE_SLB_ERROR_PARITY = 1,
+ MCE_SLB_ERROR_MULTIHIT = 2,
+};
+
+enum MCE_EratErrorType {
+ MCE_ERAT_ERROR_INDETERMINATE = 0,
+ MCE_ERAT_ERROR_PARITY = 1,
+ MCE_ERAT_ERROR_MULTIHIT = 2,
+};
+
+enum MCE_TlbErrorType {
+ MCE_TLB_ERROR_INDETERMINATE = 0,
+ MCE_TLB_ERROR_PARITY = 1,
+ MCE_TLB_ERROR_MULTIHIT = 2,
+};
+
+enum MCE_UserErrorType {
+ MCE_USER_ERROR_INDETERMINATE = 0,
+ MCE_USER_ERROR_TLBIE = 1,
+ MCE_USER_ERROR_SCV = 2,
+};
+
+enum MCE_RaErrorType {
+ MCE_RA_ERROR_INDETERMINATE = 0,
+ MCE_RA_ERROR_IFETCH = 1,
+ MCE_RA_ERROR_IFETCH_FOREIGN = 2,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 3,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 4,
+ MCE_RA_ERROR_LOAD = 5,
+ MCE_RA_ERROR_STORE = 6,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 7,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 8,
+ MCE_RA_ERROR_LOAD_STORE_FOREIGN = 9,
+};
+
+enum MCE_LinkErrorType {
+ MCE_LINK_ERROR_INDETERMINATE = 0,
+ MCE_LINK_ERROR_IFETCH_TIMEOUT = 1,
+ MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2,
+ MCE_LINK_ERROR_LOAD_TIMEOUT = 3,
+ MCE_LINK_ERROR_STORE_TIMEOUT = 4,
+ MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5,
+};
+
+struct machine_check_event {
+ enum MCE_Version version:8;
+ u8 in_use;
+ enum MCE_Severity severity:8;
+ enum MCE_Initiator initiator:8;
+ enum MCE_ErrorType error_type:8;
+ enum MCE_ErrorClass error_class:8;
+ enum MCE_Disposition disposition:8;
+ bool sync_error;
+ u16 cpu;
+ u64 gpr3;
+ u64 srr0;
+ u64 srr1;
+ union {
+ struct {
+ enum MCE_UeErrorType ue_error_type:8;
+ u8 effective_address_provided;
+ u8 physical_address_provided;
+ u8 ignore_event;
+ u8 reserved_1[4];
+ u64 effective_address;
+ u64 physical_address;
+ u8 reserved_2[8];
+ } ue_error;
+
+ struct {
+ enum MCE_SlbErrorType slb_error_type:8;
+ u8 effective_address_provided;
+ u8 reserved_1[6];
+ u64 effective_address;
+ u8 reserved_2[16];
+ } slb_error;
+
+ struct {
+ enum MCE_EratErrorType erat_error_type:8;
+ u8 effective_address_provided;
+ u8 reserved_1[6];
+ u64 effective_address;
+ u8 reserved_2[16];
+ } erat_error;
+
+ struct {
+ enum MCE_TlbErrorType tlb_error_type:8;
+ u8 effective_address_provided;
+ u8 reserved_1[6];
+ u64 effective_address;
+ u8 reserved_2[16];
+ } tlb_error;
+
+ struct {
+ enum MCE_UserErrorType user_error_type:8;
+ u8 effective_address_provided;
+ u8 reserved_1[6];
+ u64 effective_address;
+ u8 reserved_2[16];
+ } user_error;
+
+ struct {
+ enum MCE_RaErrorType ra_error_type:8;
+ u8 effective_address_provided;
+ u8 reserved_1[6];
+ u64 effective_address;
+ u8 reserved_2[16];
+ } ra_error;
+
+ struct {
+ enum MCE_LinkErrorType link_error_type:8;
+ u8 effective_address_provided;
+ u8 reserved_1[6];
+ u64 effective_address;
+ u8 reserved_2[16];
+ } link_error;
+ } u;
+};
+
+struct mce_error_info {
+ enum MCE_ErrorType error_type:8;
+ union {
+ enum MCE_UeErrorType ue_error_type:8;
+ enum MCE_SlbErrorType slb_error_type:8;
+ enum MCE_EratErrorType erat_error_type:8;
+ enum MCE_TlbErrorType tlb_error_type:8;
+ enum MCE_UserErrorType user_error_type:8;
+ enum MCE_RaErrorType ra_error_type:8;
+ enum MCE_LinkErrorType link_error_type:8;
+ } u;
+ enum MCE_Severity severity:8;
+ enum MCE_Initiator initiator:8;
+ enum MCE_ErrorClass error_class:8;
+ bool sync_error;
+ bool ignore_event;
+};
+
+#define MAX_MC_EVT 10
+
+struct mce_info {
+ int mce_nest_count;
+ struct machine_check_event mce_event[MAX_MC_EVT];
+ /* Queue for delayed MCE events. */
+ int mce_queue_count;
+ struct machine_check_event mce_event_queue[MAX_MC_EVT];
+ /* Queue for delayed MCE UE events. */
+ int mce_ue_count;
+ struct machine_check_event mce_ue_event_queue[MAX_MC_EVT];
+};
+
+/* Release flags for get_mce_event() */
+#define MCE_EVENT_RELEASE true
+#define MCE_EVENT_DONTRELEASE false
+
+struct pt_regs;
+struct notifier_block;
+
+extern void save_mce_event(struct pt_regs *regs, long handled,
+ struct mce_error_info *mce_err, uint64_t nip,
+ uint64_t addr, uint64_t phys_addr);
+extern int get_mce_event(struct machine_check_event *mce, bool release);
+extern void release_mce_event(void);
+extern void machine_check_queue_event(void);
+extern void machine_check_print_event_info(struct machine_check_event *evt,
+ bool user_mode, bool in_guest);
+unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr);
+extern void mce_common_process_ue(struct pt_regs *regs,
+ struct mce_error_info *mce_err);
+void mce_irq_work_queue(void);
+int mce_register_notifier(struct notifier_block *nb);
+int mce_unregister_notifier(struct notifier_block *nb);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+void mce_run_irq_context_handlers(void);
+#else
+static inline void mce_run_irq_context_handlers(void) { };
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+void set_mce_pending_irq_work(void);
+void clear_mce_pending_irq_work(void);
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+void flush_and_reload_slb(void);
+void flush_erat(void);
+long __machine_check_early_realmode_p7(struct pt_regs *regs);
+long __machine_check_early_realmode_p8(struct pt_regs *regs);
+long __machine_check_early_realmode_p9(struct pt_regs *regs);
+long __machine_check_early_realmode_p10(struct pt_regs *regs);
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+void mce_init(void);
+#else
+static inline void mce_init(void) { };
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+#endif /* __ASM_PPC64_MCE_H__ */
diff --git a/arch/powerpc/include/asm/mediabay.h b/arch/powerpc/include/asm/mediabay.h
new file mode 100644
index 000000000..230fda470
--- /dev/null
+++ b/arch/powerpc/include/asm/mediabay.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mediabay.h: definitions for using the media bay
+ * on PowerBook 3400 and similar computers.
+ *
+ * Copyright (C) 1997 Paul Mackerras.
+ */
+#ifndef _PPC_MEDIABAY_H
+#define _PPC_MEDIABAY_H
+
+#ifdef __KERNEL__
+
+#define MB_FD 0 /* media bay contains floppy drive (automatic eject ?) */
+#define MB_FD1 1 /* media bay contains floppy drive (manual eject ?) */
+#define MB_SOUND 2 /* sound device ? */
+#define MB_CD 3 /* media bay contains ATA drive such as CD or ZIP */
+#define MB_PCI 5 /* media bay contains a PCI device */
+#define MB_POWER 6 /* media bay contains a Power device (???) */
+#define MB_NO 7 /* media bay contains nothing */
+
+struct macio_dev;
+
+#ifdef CONFIG_PMAC_MEDIABAY
+
+/* Check the content type of the bay, returns MB_NO if the bay is still
+ * transitionning
+ */
+extern int check_media_bay(struct macio_dev *bay);
+
+/* The ATA driver uses the calls below to temporarily hold on the
+ * media bay callbacks while initializing the interface
+ */
+extern void lock_media_bay(struct macio_dev *bay);
+extern void unlock_media_bay(struct macio_dev *bay);
+
+#else
+
+static inline int check_media_bay(struct macio_dev *bay)
+{
+ return MB_NO;
+}
+
+static inline void lock_media_bay(struct macio_dev *bay) { }
+static inline void unlock_media_bay(struct macio_dev *bay) { }
+
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _PPC_MEDIABAY_H */
diff --git a/arch/powerpc/include/asm/mem_encrypt.h b/arch/powerpc/include/asm/mem_encrypt.h
new file mode 100644
index 000000000..2f26b8fc8
--- /dev/null
+++ b/arch/powerpc/include/asm/mem_encrypt.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SVM helper functions
+ *
+ * Copyright 2018 IBM Corporation
+ */
+
+#ifndef _ASM_POWERPC_MEM_ENCRYPT_H
+#define _ASM_POWERPC_MEM_ENCRYPT_H
+
+#include <asm/svm.h>
+
+static inline bool force_dma_unencrypted(struct device *dev)
+{
+ return is_secure_guest();
+}
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif /* _ASM_POWERPC_MEM_ENCRYPT_H */
diff --git a/arch/powerpc/include/asm/membarrier.h b/arch/powerpc/include/asm/membarrier.h
new file mode 100644
index 000000000..de7f79157
--- /dev/null
+++ b/arch/powerpc/include/asm/membarrier.h
@@ -0,0 +1,28 @@
+#ifndef _ASM_POWERPC_MEMBARRIER_H
+#define _ASM_POWERPC_MEMBARRIER_H
+
+static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ /*
+ * Only need the full barrier when switching between processes.
+ * Barrier when switching from kernel to userspace is not
+ * required here, given that it is implied by mmdrop(). Barrier
+ * when switching from userspace to kernel is not needed after
+ * store to rq->curr.
+ */
+ if (IS_ENABLED(CONFIG_SMP) &&
+ likely(!(atomic_read(&next->membarrier_state) &
+ (MEMBARRIER_STATE_PRIVATE_EXPEDITED |
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev))
+ return;
+
+ /*
+ * The membarrier system call requires a full memory barrier
+ * after storing to rq->curr, before going back to user-space.
+ */
+ smp_mb();
+}
+
+#endif /* _ASM_POWERPC_MEMBARRIER_H */
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
new file mode 100644
index 000000000..17a77d47e
--- /dev/null
+++ b/arch/powerpc/include/asm/mman.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ */
+#ifndef _ASM_POWERPC_MMAN_H
+#define _ASM_POWERPC_MMAN_H
+
+#include <uapi/asm/mman.h>
+
+#ifdef CONFIG_PPC64
+
+#include <asm/cputable.h>
+#include <linux/mm.h>
+#include <linux/pkeys.h>
+#include <asm/cpu_has_feature.h>
+#include <asm/firmware.h>
+
+static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
+ unsigned long pkey)
+{
+#ifdef CONFIG_PPC_MEM_KEYS
+ return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey));
+#else
+ return ((prot & PROT_SAO) ? VM_SAO : 0);
+#endif
+}
+#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
+
+static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
+{
+ if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
+ return false;
+ if (prot & PROT_SAO) {
+ if (!cpu_has_feature(CPU_FTR_SAO))
+ return false;
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
+ !IS_ENABLED(CONFIG_PPC_PROT_SAO_LPAR))
+ return false;
+ }
+ return true;
+}
+#define arch_validate_prot arch_validate_prot
+
+#endif /* CONFIG_PPC64 */
+#endif /* _ASM_POWERPC_MMAN_H */
diff --git a/arch/powerpc/include/asm/mmiowb.h b/arch/powerpc/include/asm/mmiowb.h
new file mode 100644
index 000000000..74a00127e
--- /dev/null
+++ b/arch/powerpc/include/asm/mmiowb.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MMIOWB_H
+#define _ASM_POWERPC_MMIOWB_H
+
+#ifdef CONFIG_MMIOWB
+
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+#include <asm/paca.h>
+
+#define arch_mmiowb_state() (&local_paca->mmiowb_state)
+#define mmiowb() mb()
+
+#endif /* CONFIG_MMIOWB */
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_POWERPC_MMIOWB_H */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
new file mode 100644
index 000000000..94b981152
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu.h
@@ -0,0 +1,421 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MMU_H_
+#define _ASM_POWERPC_MMU_H_
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+
+#include <asm/asm-const.h>
+
+/*
+ * MMU features bit definitions
+ */
+
+/*
+ * MMU families
+ */
+#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001)
+#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002)
+#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
+#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
+#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
+#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020)
+
+/* Radix page table supported and enabled */
+#define MMU_FTR_TYPE_RADIX ASM_CONST(0x00000040)
+
+/*
+ * Individual features below.
+ */
+
+/*
+ * Supports KUAP feature
+ * key 0 controlling userspace addresses on radix
+ * Key 3 on hash
+ */
+#define MMU_FTR_BOOK3S_KUAP ASM_CONST(0x00000200)
+
+/*
+ * Supports KUEP feature
+ * key 0 controlling userspace addresses on radix
+ * Key 3 on hash
+ */
+#define MMU_FTR_BOOK3S_KUEP ASM_CONST(0x00000400)
+
+/*
+ * Support for memory protection keys.
+ */
+#define MMU_FTR_PKEY ASM_CONST(0x00000800)
+
+/* Guest Translation Shootdown Enable */
+#define MMU_FTR_GTSE ASM_CONST(0x00001000)
+
+/*
+ * Support for 68 bit VA space. We added that from ISA 2.05
+ */
+#define MMU_FTR_68_BIT_VA ASM_CONST(0x00002000)
+/*
+ * Kernel read only support.
+ * We added the ppp value 0b110 in ISA 2.04.
+ */
+#define MMU_FTR_KERNEL_RO ASM_CONST(0x00004000)
+
+/*
+ * We need to clear top 16bits of va (from the remaining 64 bits )in
+ * tlbie* instructions
+ */
+#define MMU_FTR_TLBIE_CROP_VA ASM_CONST(0x00008000)
+
+/* Enable use of high BAT registers */
+#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000)
+
+/* Enable >32-bit physical addresses on 32-bit processor, only used
+ * by CONFIG_PPC_BOOK3S_32 currently as BookE supports that from day 1
+ */
+#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000)
+
+/* Enable use of broadcast TLB invalidations. We don't always set it
+ * on processors that support it due to other constraints with the
+ * use of such invalidations
+ */
+#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000)
+
+/* Enable use of tlbilx invalidate instructions.
+ */
+#define MMU_FTR_USE_TLBILX ASM_CONST(0x00080000)
+
+/* This indicates that the processor cannot handle multiple outstanding
+ * broadcast tlbivax or tlbsync. This makes the code use a spinlock
+ * around such invalidate forms.
+ */
+#define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000)
+
+/* This indicates that the processor doesn't handle way selection
+ * properly and needs SW to track and update the LRU state. This
+ * is specific to an errata on e300c2/c3/c4 class parts
+ */
+#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000)
+
+/* Doesn't support the B bit (1T segment) in SLBIE
+ */
+#define MMU_FTR_NO_SLBIE_B ASM_CONST(0x02000000)
+
+/* Support 16M large pages
+ */
+#define MMU_FTR_16M_PAGE ASM_CONST(0x04000000)
+
+/* Supports TLBIEL variant
+ */
+#define MMU_FTR_TLBIEL ASM_CONST(0x08000000)
+
+/* Supports tlbies w/o locking
+ */
+#define MMU_FTR_LOCKLESS_TLBIE ASM_CONST(0x10000000)
+
+/* Large pages can be marked CI
+ */
+#define MMU_FTR_CI_LARGE_PAGE ASM_CONST(0x20000000)
+
+/* 1T segments available
+ */
+#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
+
+// NX paste RMA reject in DSI
+#define MMU_FTR_NX_DSI ASM_CONST(0x80000000)
+
+/* MMU feature bit sets for various CPUs */
+#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 (MMU_FTR_HPTE_TABLE | MMU_FTR_TLBIEL | MMU_FTR_16M_PAGE)
+#define MMU_FTRS_POWER MMU_FTRS_DEFAULT_HPTE_ARCH_V2
+#define MMU_FTRS_PPC970 MMU_FTRS_POWER | MMU_FTR_TLBIE_CROP_VA
+#define MMU_FTRS_POWER5 MMU_FTRS_POWER | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_POWER6 MMU_FTRS_POWER5 | MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA
+#define MMU_FTRS_POWER7 MMU_FTRS_POWER6
+#define MMU_FTRS_POWER8 MMU_FTRS_POWER6
+#define MMU_FTRS_POWER9 MMU_FTRS_POWER6
+#define MMU_FTRS_POWER10 MMU_FTRS_POWER6
+#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
+ MMU_FTR_CI_LARGE_PAGE
+#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
+ MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
+#ifndef __ASSEMBLY__
+#include <linux/bug.h>
+#include <asm/cputable.h>
+#include <asm/page.h>
+
+typedef pte_t *pgtable_t;
+
+#ifdef CONFIG_PPC_E500
+#include <asm/percpu.h>
+DECLARE_PER_CPU(int, next_tlbcam_idx);
+#endif
+
+enum {
+ MMU_FTRS_POSSIBLE =
+#if defined(CONFIG_PPC_BOOK3S_604)
+ MMU_FTR_HPTE_TABLE |
+#endif
+#ifdef CONFIG_PPC_8xx
+ MMU_FTR_TYPE_8xx |
+#endif
+#ifdef CONFIG_40x
+ MMU_FTR_TYPE_40x |
+#endif
+#ifdef CONFIG_PPC_47x
+ MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL |
+#elif defined(CONFIG_44x)
+ MMU_FTR_TYPE_44x |
+#endif
+#ifdef CONFIG_PPC_E500
+ MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX |
+#endif
+#ifdef CONFIG_PPC_BOOK3S_32
+ MMU_FTR_USE_HIGH_BATS |
+#endif
+#ifdef CONFIG_PPC_83xx
+ MMU_FTR_NEED_DTLB_SW_LRU |
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ MMU_FTR_KERNEL_RO |
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
+ MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
+ MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
+ MMU_FTR_68_BIT_VA | MMU_FTR_HPTE_TABLE |
+#endif
+#ifdef CONFIG_PPC_RADIX_MMU
+ MMU_FTR_TYPE_RADIX |
+ MMU_FTR_GTSE | MMU_FTR_NX_DSI |
+#endif /* CONFIG_PPC_RADIX_MMU */
+#endif
+#ifdef CONFIG_PPC_KUAP
+ MMU_FTR_BOOK3S_KUAP |
+#endif /* CONFIG_PPC_KUAP */
+#ifdef CONFIG_PPC_MEM_KEYS
+ MMU_FTR_PKEY |
+#endif
+#ifdef CONFIG_PPC_KUEP
+ MMU_FTR_BOOK3S_KUEP |
+#endif /* CONFIG_PPC_KUAP */
+
+ 0,
+};
+
+#if defined(CONFIG_PPC_BOOK3S_604) && !defined(CONFIG_PPC_BOOK3S_603)
+#define MMU_FTRS_ALWAYS MMU_FTR_HPTE_TABLE
+#endif
+#ifdef CONFIG_PPC_8xx
+#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_8xx
+#endif
+#ifdef CONFIG_40x
+#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_40x
+#endif
+#ifdef CONFIG_PPC_47x
+#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_47x
+#elif defined(CONFIG_44x)
+#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_44x
+#endif
+#ifdef CONFIG_PPC_E500
+#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_FSL_E
+#endif
+
+/* BOOK3S_64 options */
+#if defined(CONFIG_PPC_RADIX_MMU) && !defined(CONFIG_PPC_64S_HASH_MMU)
+#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_RADIX
+#elif !defined(CONFIG_PPC_RADIX_MMU) && defined(CONFIG_PPC_64S_HASH_MMU)
+#define MMU_FTRS_ALWAYS MMU_FTR_HPTE_TABLE
+#endif
+
+#ifndef MMU_FTRS_ALWAYS
+#define MMU_FTRS_ALWAYS 0
+#endif
+
+static __always_inline bool early_mmu_has_feature(unsigned long feature)
+{
+ if (MMU_FTRS_ALWAYS & feature)
+ return true;
+
+ return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
+}
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
+#include <linux/jump_label.h>
+
+#define NUM_MMU_FTR_KEYS 32
+
+extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];
+
+extern void mmu_feature_keys_init(void);
+
+static __always_inline bool mmu_has_feature(unsigned long feature)
+{
+ int i;
+
+#ifndef __clang__ /* clang can't cope with this */
+ BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
+ if (!static_key_initialized) {
+ printk("Warning! mmu_has_feature() used prior to jump label init!\n");
+ dump_stack();
+ return early_mmu_has_feature(feature);
+ }
+#endif
+
+ if (MMU_FTRS_ALWAYS & feature)
+ return true;
+
+ if (!(MMU_FTRS_POSSIBLE & feature))
+ return false;
+
+ i = __builtin_ctzl(feature);
+ return static_branch_likely(&mmu_feature_keys[i]);
+}
+
+static inline void mmu_clear_feature(unsigned long feature)
+{
+ int i;
+
+ i = __builtin_ctzl(feature);
+ cur_cpu_spec->mmu_features &= ~feature;
+ static_branch_disable(&mmu_feature_keys[i]);
+}
+#else
+
+static inline void mmu_feature_keys_init(void)
+{
+
+}
+
+static __always_inline bool mmu_has_feature(unsigned long feature)
+{
+ return early_mmu_has_feature(feature);
+}
+
+static inline void mmu_clear_feature(unsigned long feature)
+{
+ cur_cpu_spec->mmu_features &= ~feature;
+}
+#endif /* CONFIG_JUMP_LABEL */
+
+extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
+
+#ifdef CONFIG_PPC64
+/* This is our real memory area size on ppc64 server, on embedded, we
+ * make it match the size our of bolted TLB area
+ */
+extern u64 ppc64_rma_size;
+
+/* Cleanup function used by kexec */
+extern void mmu_cleanup_all(void);
+extern void radix__mmu_cleanup_all(void);
+
+/* Functions for creating and updating partition table on POWER9 */
+extern void mmu_partition_table_init(void);
+extern void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
+ unsigned long dw1, bool flush);
+#endif /* CONFIG_PPC64 */
+
+struct mm_struct;
+#ifdef CONFIG_DEBUG_VM
+extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
+#else /* CONFIG_DEBUG_VM */
+static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
+{
+}
+#endif /* !CONFIG_DEBUG_VM */
+
+static __always_inline bool radix_enabled(void)
+{
+ return mmu_has_feature(MMU_FTR_TYPE_RADIX);
+}
+
+static __always_inline bool early_radix_enabled(void)
+{
+ return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
+}
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+static inline bool strict_kernel_rwx_enabled(void)
+{
+ return rodata_enabled;
+}
+#else
+static inline bool strict_kernel_rwx_enabled(void)
+{
+ return false;
+}
+#endif
+
+static inline bool strict_module_rwx_enabled(void)
+{
+ return IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && strict_kernel_rwx_enabled();
+}
+#endif /* !__ASSEMBLY__ */
+
+/* The kernel use the constants below to index in the page sizes array.
+ * The use of fixed constants for this purpose is better for performances
+ * of the low level hash refill handlers.
+ *
+ * A non supported page size has a "shift" field set to 0
+ *
+ * Any new page size being implemented can get a new entry in here. Whether
+ * the kernel will use it or not is a different matter though. The actual page
+ * size used by hugetlbfs is not defined here and may be made variable
+ *
+ * Note: This array ended up being a false good idea as it's growing to the
+ * point where I wonder if we should replace it with something different,
+ * to think about, feedback welcome. --BenH.
+ */
+
+/* These are #defines as they have to be used in assembly */
+#define MMU_PAGE_4K 0
+#define MMU_PAGE_16K 1
+#define MMU_PAGE_64K 2
+#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
+#define MMU_PAGE_256K 4
+#define MMU_PAGE_512K 5
+#define MMU_PAGE_1M 6
+#define MMU_PAGE_2M 7
+#define MMU_PAGE_4M 8
+#define MMU_PAGE_8M 9
+#define MMU_PAGE_16M 10
+#define MMU_PAGE_64M 11
+#define MMU_PAGE_256M 12
+#define MMU_PAGE_1G 13
+#define MMU_PAGE_16G 14
+#define MMU_PAGE_64G 15
+
+/*
+ * N.B. we need to change the type of hpte_page_sizes if this gets to be > 16
+ * Also we need to change he type of mm_context.low/high_slices_psize.
+ */
+#define MMU_PAGE_COUNT 16
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/book3s/64/mmu.h>
+#else /* CONFIG_PPC_BOOK3S_64 */
+
+#ifndef __ASSEMBLY__
+/* MMU initialization */
+extern void early_init_mmu(void);
+extern void early_init_mmu_secondary(void);
+extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
+ phys_addr_t first_memblock_size);
+static inline void mmu_early_init_devtree(void) { }
+
+static inline void pkey_early_init_devtree(void) {}
+
+extern void *abatron_pteptrs[2];
+#endif /* __ASSEMBLY__ */
+#endif
+
+#if defined(CONFIG_PPC_BOOK3S_32)
+/* 32-bit classic hash table MMU */
+#include <asm/book3s/32/mmu-hash.h>
+#elif defined(CONFIG_PPC_MMU_NOHASH)
+#include <asm/nohash/mmu.h>
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
new file mode 100644
index 000000000..c1ea270bb
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -0,0 +1,303 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_MMU_CONTEXT_H
+#define __ASM_POWERPC_MMU_CONTEXT_H
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <asm/mmu.h>
+#include <asm/cputable.h>
+#include <asm/cputhreads.h>
+
+/*
+ * Most if the context management is out of line
+ */
+#define init_new_context init_new_context
+extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+#define destroy_context destroy_context
+extern void destroy_context(struct mm_struct *mm);
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+struct mm_iommu_table_group_mem_t;
+
+extern bool mm_iommu_preregistered(struct mm_struct *mm);
+extern long mm_iommu_new(struct mm_struct *mm,
+ unsigned long ua, unsigned long entries,
+ struct mm_iommu_table_group_mem_t **pmem);
+extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
+ unsigned long entries, unsigned long dev_hpa,
+ struct mm_iommu_table_group_mem_t **pmem);
+extern long mm_iommu_put(struct mm_struct *mm,
+ struct mm_iommu_table_group_mem_t *mem);
+extern void mm_iommu_init(struct mm_struct *mm);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
+ unsigned long ua, unsigned long size);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
+ unsigned long ua, unsigned long entries);
+extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
+ unsigned long ua, unsigned int pageshift, unsigned long *hpa);
+extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
+ unsigned int pageshift, unsigned long *size);
+extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
+extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
+#else
+static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
+ unsigned int pageshift, unsigned long *size)
+{
+ return false;
+}
+static inline void mm_iommu_init(struct mm_struct *mm) { }
+#endif
+extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+extern void radix__switch_mmu_context(struct mm_struct *prev,
+ struct mm_struct *next);
+static inline void switch_mmu_context(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ if (radix_enabled())
+ return radix__switch_mmu_context(prev, next);
+ return switch_slb(tsk, next);
+}
+
+extern int hash__alloc_context_id(void);
+void __init hash__reserve_context_id(int id);
+extern void __destroy_context(int context_id);
+static inline void mmu_context_init(void) { }
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+static inline int alloc_extended_context(struct mm_struct *mm,
+ unsigned long ea)
+{
+ int context_id;
+
+ int index = ea >> MAX_EA_BITS_PER_CONTEXT;
+
+ context_id = hash__alloc_context_id();
+ if (context_id < 0)
+ return context_id;
+
+ VM_WARN_ON(mm->context.extended_id[index]);
+ mm->context.extended_id[index] = context_id;
+ return context_id;
+}
+
+static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
+{
+ int context_id;
+
+ context_id = get_user_context(&mm->context, ea);
+ if (!context_id)
+ return true;
+ return false;
+}
+#endif
+
+#else
+extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk);
+extern unsigned long __init_new_context(void);
+extern void __destroy_context(unsigned long context_id);
+extern void mmu_context_init(void);
+static inline int alloc_extended_context(struct mm_struct *mm,
+ unsigned long ea)
+{
+ /* non book3s_64 should never find this called */
+ WARN_ON(1);
+ return -ENOMEM;
+}
+
+static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
+{
+ return false;
+}
+#endif
+
+extern int use_cop(unsigned long acop, struct mm_struct *mm);
+extern void drop_cop(unsigned long acop, struct mm_struct *mm);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline void inc_mm_active_cpus(struct mm_struct *mm)
+{
+ atomic_inc(&mm->context.active_cpus);
+}
+
+static inline void dec_mm_active_cpus(struct mm_struct *mm)
+{
+ atomic_dec(&mm->context.active_cpus);
+}
+
+static inline void mm_context_add_copro(struct mm_struct *mm)
+{
+ /*
+ * If any copro is in use, increment the active CPU count
+ * in order to force TLB invalidations to be global as to
+ * propagate to the Nest MMU.
+ */
+ if (atomic_inc_return(&mm->context.copros) == 1)
+ inc_mm_active_cpus(mm);
+}
+
+static inline void mm_context_remove_copro(struct mm_struct *mm)
+{
+ int c;
+
+ /*
+ * When removing the last copro, we need to broadcast a global
+ * flush of the full mm, as the next TLBI may be local and the
+ * nMMU and/or PSL need to be cleaned up.
+ *
+ * Both the 'copros' and 'active_cpus' counts are looked at in
+ * flush_all_mm() to determine the scope (local/global) of the
+ * TLBIs, so we need to flush first before decrementing
+ * 'copros'. If this API is used by several callers for the
+ * same context, it can lead to over-flushing. It's hopefully
+ * not common enough to be a problem.
+ *
+ * Skip on hash, as we don't know how to do the proper flush
+ * for the time being. Invalidations will remain global if
+ * used on hash. Note that we can't drop 'copros' either, as
+ * it could make some invalidations local with no flush
+ * in-between.
+ */
+ if (radix_enabled()) {
+ flush_all_mm(mm);
+
+ c = atomic_dec_if_positive(&mm->context.copros);
+ /* Detect imbalance between add and remove */
+ WARN_ON(c < 0);
+
+ if (c == 0)
+ dec_mm_active_cpus(mm);
+ }
+}
+
+/*
+ * vas_windows counter shows number of open windows in the mm
+ * context. During context switch, use this counter to clear the
+ * foreign real address mapping (CP_ABORT) for the thread / process
+ * that intend to use COPY/PASTE. When a process closes all windows,
+ * disable CP_ABORT which is expensive to run.
+ *
+ * For user context, register a copro so that TLBIs are seen by the
+ * nest MMU. mm_context_add/remove_vas_window() are used only for user
+ * space windows.
+ */
+static inline void mm_context_add_vas_window(struct mm_struct *mm)
+{
+ atomic_inc(&mm->context.vas_windows);
+ mm_context_add_copro(mm);
+}
+
+static inline void mm_context_remove_vas_window(struct mm_struct *mm)
+{
+ int v;
+
+ mm_context_remove_copro(mm);
+ v = atomic_dec_if_positive(&mm->context.vas_windows);
+
+ /* Detect imbalance between add and remove */
+ WARN_ON(v < 0);
+}
+#else
+static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
+static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
+static inline void mm_context_add_copro(struct mm_struct *mm) { }
+static inline void mm_context_remove_copro(struct mm_struct *mm) { }
+#endif
+
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
+void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end);
+#else
+static inline void do_h_rpt_invalidate_prt(unsigned long pid,
+ unsigned long lpid,
+ unsigned long type,
+ unsigned long pg_sizes,
+ unsigned long start,
+ unsigned long end) { }
+#endif
+
+extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk);
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch_mm_irqs_off(prev, next, tsk);
+ local_irq_restore(flags);
+}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+#define activate_mm activate_mm
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+ switch_mm_irqs_off(prev, next, current);
+}
+
+/* We don't currently use enter_lazy_tlb() for anything */
+#ifdef CONFIG_PPC_BOOK3E_64
+#define enter_lazy_tlb enter_lazy_tlb
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+ struct task_struct *tsk)
+{
+ /* 64-bit Book3E keeps track of current PGD in the PACA */
+ get_paca()->pgd = NULL;
+}
+#endif
+
+extern void arch_exit_mmap(struct mm_struct *mm);
+
+static inline void arch_unmap(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ unsigned long vdso_base = (unsigned long)mm->context.vdso;
+
+ if (start <= vdso_base && vdso_base < end)
+ mm->context.vdso = NULL;
+}
+
+#ifdef CONFIG_PPC_MEM_KEYS
+bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
+ bool execute, bool foreign);
+void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm);
+#else /* CONFIG_PPC_MEM_KEYS */
+static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
+ bool write, bool execute, bool foreign)
+{
+ /* by default, allow everything */
+ return true;
+}
+
+#define pkey_mm_init(mm)
+#define arch_dup_pkeys(oldmm, mm)
+
+static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags)
+{
+ return 0x0UL;
+}
+
+#endif /* CONFIG_PPC_MEM_KEYS */
+
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+ arch_dup_pkeys(oldmm, mm);
+ return 0;
+}
+
+#include <asm-generic/mmu_context.h>
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
new file mode 100644
index 000000000..4c6c6dbd1
--- /dev/null
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
+ *
+ * PowerPC64 port:
+ * Copyright (C) 2002 Anton Blanchard, IBM Corp.
+ */
+#ifndef _ASM_MMZONE_H_
+#define _ASM_MMZONE_H_
+#ifdef __KERNEL__
+
+#include <linux/cpumask.h>
+
+/*
+ * generic non-linear memory support:
+ *
+ * 1) we will not split memory into more chunks than will fit into the
+ * flags field of the struct page
+ */
+
+#ifdef CONFIG_NUMA
+
+extern struct pglist_data *node_data[];
+/*
+ * Return a pointer to the node data for node n.
+ */
+#define NODE_DATA(nid) (node_data[nid])
+
+/*
+ * Following are specific to this numa platform.
+ */
+
+extern int numa_cpu_lookup_table[];
+extern cpumask_var_t node_to_cpumask_map[];
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern unsigned long max_pfn;
+u64 memory_hotplug_max(void);
+#else
+#define memory_hotplug_max() memblock_end_of_DRAM()
+#endif
+
+#else
+#define memory_hotplug_max() memblock_end_of_DRAM()
+#endif /* CONFIG_NUMA */
+#ifdef CONFIG_FA_DUMP
+#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
+#endif
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern int create_section_mapping(unsigned long start, unsigned long end,
+ int nid, pgprot_t prot);
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
new file mode 100644
index 000000000..09e2ffd36
--- /dev/null
+++ b/arch/powerpc/include/asm/module.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_MODULE_H
+#define _ASM_POWERPC_MODULE_H
+#ifdef __KERNEL__
+
+#include <linux/list.h>
+#include <asm/bug.h>
+#include <asm-generic/module.h>
+
+#ifndef __powerpc64__
+/*
+ * Thanks to Paul M for explaining this.
+ *
+ * PPC can only do rel jumps += 32MB, and often the kernel and other
+ * modules are further away than this. So, we jump to a table of
+ * trampolines attached to the module (the Procedure Linkage Table)
+ * whenever that happens.
+ */
+
+struct ppc_plt_entry {
+ /* 16 byte jump instruction sequence (4 instructions) */
+ unsigned int jump[4];
+};
+#endif /* __powerpc64__ */
+
+
+struct mod_arch_specific {
+#ifdef __powerpc64__
+ unsigned int stubs_section; /* Index of stubs section in module */
+ unsigned int toc_section; /* What section is the TOC? */
+ bool toc_fixed; /* Have we fixed up .TOC.? */
+
+ /* For module function descriptor dereference */
+ unsigned long start_opd;
+ unsigned long end_opd;
+#else /* powerpc64 */
+ /* Indices of PLT sections within module. */
+ unsigned int core_plt_section;
+ unsigned int init_plt_section;
+#endif /* powerpc64 */
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+ unsigned long tramp;
+ unsigned long tramp_regs;
+#endif
+
+ /* List of BUG addresses, source line numbers and filenames */
+ struct list_head bug_list;
+ struct bug_entry *bug_table;
+ unsigned int num_bugs;
+};
+
+/*
+ * Select ELF headers.
+ * Make empty section for module_frob_arch_sections to expand.
+ */
+
+#ifdef __powerpc64__
+# ifdef MODULE
+ asm(".section .stubs,\"ax\",@nobits; .align 3; .previous");
+# endif
+#else
+# ifdef MODULE
+ asm(".section .plt,\"ax\",@nobits; .align 3; .previous");
+ asm(".section .init.plt,\"ax\",@nobits; .align 3; .previous");
+# endif /* MODULE */
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+# ifdef MODULE
+ asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous");
+# endif /* MODULE */
+
+int module_trampoline_target(struct module *mod, unsigned long trampoline,
+ unsigned long *target);
+int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs);
+#else
+static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
+{
+ return 0;
+}
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/module.lds.h b/arch/powerpc/include/asm/module.lds.h
new file mode 100644
index 000000000..cea5dc124
--- /dev/null
+++ b/arch/powerpc/include/asm/module.lds.h
@@ -0,0 +1,8 @@
+/* Force alignment of .toc section. */
+SECTIONS
+{
+ .toc 0 : ALIGN(256)
+ {
+ *(.got .toc)
+ }
+}
diff --git a/arch/powerpc/include/asm/mpc5121.h b/arch/powerpc/include/asm/mpc5121.h
new file mode 100644
index 000000000..9ae49e743
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc5121.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * MPC5121 Prototypes and definitions
+ */
+
+#ifndef __ASM_POWERPC_MPC5121_H__
+#define __ASM_POWERPC_MPC5121_H__
+
+/* MPC512x Reset module registers */
+struct mpc512x_reset_module {
+ u32 rcwlr; /* Reset Configuration Word Low Register */
+ u32 rcwhr; /* Reset Configuration Word High Register */
+ u32 reserved1;
+ u32 reserved2;
+ u32 rsr; /* Reset Status Register */
+ u32 rmr; /* Reset Mode Register */
+ u32 rpr; /* Reset Protection Register */
+ u32 rcr; /* Reset Control Register */
+ u32 rcer; /* Reset Control Enable Register */
+};
+
+/*
+ * Clock Control Module
+ */
+struct mpc512x_ccm {
+ u32 spmr; /* System PLL Mode Register */
+ u32 sccr1; /* System Clock Control Register 1 */
+ u32 sccr2; /* System Clock Control Register 2 */
+ u32 scfr1; /* System Clock Frequency Register 1 */
+ u32 scfr2; /* System Clock Frequency Register 2 */
+ u32 scfr2s; /* System Clock Frequency Shadow Register 2 */
+ u32 bcr; /* Bread Crumb Register */
+ u32 psc_ccr[12]; /* PSC Clock Control Registers */
+ u32 spccr; /* SPDIF Clock Control Register */
+ u32 cccr; /* CFM Clock Control Register */
+ u32 dccr; /* DIU Clock Control Register */
+ u32 mscan_ccr[4]; /* MSCAN Clock Control Registers */
+ u32 out_ccr[4]; /* OUT CLK Configure Registers */
+ u32 rsv0[2]; /* Reserved */
+ u32 scfr3; /* System Clock Frequency Register 3 */
+ u32 rsv1[3]; /* Reserved */
+ u32 spll_lock_cnt; /* System PLL Lock Counter */
+ u8 res[0x6c]; /* Reserved */
+};
+
+/*
+ * LPC Module
+ */
+struct mpc512x_lpc {
+ u32 cs_cfg[8]; /* CS config */
+ u32 cs_ctrl; /* CS Control Register */
+ u32 cs_status; /* CS Status Register */
+ u32 burst_ctrl; /* CS Burst Control Register */
+ u32 deadcycle_ctrl; /* CS Deadcycle Control Register */
+ u32 holdcycle_ctrl; /* CS Holdcycle Control Register */
+ u32 alt; /* Address Latch Timing Register */
+};
+
+int mpc512x_cs_config(unsigned int cs, u32 val);
+
+/*
+ * SCLPC Module (LPB FIFO)
+ */
+struct mpc512x_lpbfifo {
+ u32 pkt_size; /* SCLPC Packet Size Register */
+ u32 start_addr; /* SCLPC Start Address Register */
+ u32 ctrl; /* SCLPC Control Register */
+ u32 enable; /* SCLPC Enable Register */
+ u32 reserved1;
+ u32 status; /* SCLPC Status Register */
+ u32 bytes_done; /* SCLPC Bytes Done Register */
+ u32 emb_sc; /* EMB Share Counter Register */
+ u32 emb_pc; /* EMB Pause Control Register */
+ u32 reserved2[7];
+ u32 data_word; /* LPC RX/TX FIFO Data Word Register */
+ u32 fifo_status; /* LPC RX/TX FIFO Status Register */
+ u32 fifo_ctrl; /* LPC RX/TX FIFO Control Register */
+ u32 fifo_alarm; /* LPC RX/TX FIFO Alarm Register */
+};
+
+#define MPC512X_SCLPC_START (1 << 31)
+#define MPC512X_SCLPC_CS(x) (((x) & 0x7) << 24)
+#define MPC512X_SCLPC_FLUSH (1 << 17)
+#define MPC512X_SCLPC_READ (1 << 16)
+#define MPC512X_SCLPC_DAI (1 << 8)
+#define MPC512X_SCLPC_BPT(x) ((x) & 0x3f)
+#define MPC512X_SCLPC_RESET (1 << 24)
+#define MPC512X_SCLPC_FIFO_RESET (1 << 16)
+#define MPC512X_SCLPC_ABORT_INT_ENABLE (1 << 9)
+#define MPC512X_SCLPC_NORM_INT_ENABLE (1 << 8)
+#define MPC512X_SCLPC_ENABLE (1 << 0)
+#define MPC512X_SCLPC_SUCCESS (1 << 24)
+#define MPC512X_SCLPC_FIFO_CTRL(x) (((x) & 0x7) << 24)
+#define MPC512X_SCLPC_FIFO_ALARM(x) ((x) & 0x3ff)
+
+enum lpb_dev_portsize {
+ LPB_DEV_PORTSIZE_UNDEFINED = 0,
+ LPB_DEV_PORTSIZE_1_BYTE = 1,
+ LPB_DEV_PORTSIZE_2_BYTES = 2,
+ LPB_DEV_PORTSIZE_4_BYTES = 4,
+ LPB_DEV_PORTSIZE_8_BYTES = 8
+};
+
+enum mpc512x_lpbfifo_req_dir {
+ MPC512X_LPBFIFO_REQ_DIR_READ,
+ MPC512X_LPBFIFO_REQ_DIR_WRITE
+};
+
+struct mpc512x_lpbfifo_request {
+ phys_addr_t dev_phys_addr; /* physical address of some device on LPB */
+ void *ram_virt_addr; /* virtual address of some region in RAM */
+ u32 size;
+ enum lpb_dev_portsize portsize;
+ enum mpc512x_lpbfifo_req_dir dir;
+ void (*callback)(struct mpc512x_lpbfifo_request *);
+};
+
+int mpc512x_lpbfifo_submit(struct mpc512x_lpbfifo_request *req);
+
+#endif /* __ASM_POWERPC_MPC5121_H__ */
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
new file mode 100644
index 000000000..5ea16a71c
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -0,0 +1,365 @@
+/*
+ * Prototypes, etc. for the Freescale MPC52xx embedded cpu chips
+ * May need to be cleaned as the port goes on ...
+ *
+ * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_POWERPC_MPC52xx_H__
+#define __ASM_POWERPC_MPC52xx_H__
+
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+#include <asm/mpc5xxx.h>
+#endif /* __ASSEMBLY__ */
+
+#include <linux/suspend.h>
+
+/* Variants of the 5200(B) */
+#define MPC5200_SVR 0x80110010
+#define MPC5200_SVR_MASK 0xfffffff0
+#define MPC5200B_SVR 0x80110020
+#define MPC5200B_SVR_MASK 0xfffffff0
+
+/* ======================================================================== */
+/* Structures mapping of some unit register set */
+/* ======================================================================== */
+
+#ifndef __ASSEMBLY__
+
+/* Memory Mapping Control */
+struct mpc52xx_mmap_ctl {
+ u32 mbar; /* MMAP_CTRL + 0x00 */
+
+ u32 cs0_start; /* MMAP_CTRL + 0x04 */
+ u32 cs0_stop; /* MMAP_CTRL + 0x08 */
+ u32 cs1_start; /* MMAP_CTRL + 0x0c */
+ u32 cs1_stop; /* MMAP_CTRL + 0x10 */
+ u32 cs2_start; /* MMAP_CTRL + 0x14 */
+ u32 cs2_stop; /* MMAP_CTRL + 0x18 */
+ u32 cs3_start; /* MMAP_CTRL + 0x1c */
+ u32 cs3_stop; /* MMAP_CTRL + 0x20 */
+ u32 cs4_start; /* MMAP_CTRL + 0x24 */
+ u32 cs4_stop; /* MMAP_CTRL + 0x28 */
+ u32 cs5_start; /* MMAP_CTRL + 0x2c */
+ u32 cs5_stop; /* MMAP_CTRL + 0x30 */
+
+ u32 sdram0; /* MMAP_CTRL + 0x34 */
+ u32 sdram1; /* MMAP_CTRL + 0X38 */
+
+ u32 reserved[4]; /* MMAP_CTRL + 0x3c .. 0x48 */
+
+ u32 boot_start; /* MMAP_CTRL + 0x4c */
+ u32 boot_stop; /* MMAP_CTRL + 0x50 */
+
+ u32 ipbi_ws_ctrl; /* MMAP_CTRL + 0x54 */
+
+ u32 cs6_start; /* MMAP_CTRL + 0x58 */
+ u32 cs6_stop; /* MMAP_CTRL + 0x5c */
+ u32 cs7_start; /* MMAP_CTRL + 0x60 */
+ u32 cs7_stop; /* MMAP_CTRL + 0x64 */
+};
+
+/* SDRAM control */
+struct mpc52xx_sdram {
+ u32 mode; /* SDRAM + 0x00 */
+ u32 ctrl; /* SDRAM + 0x04 */
+ u32 config1; /* SDRAM + 0x08 */
+ u32 config2; /* SDRAM + 0x0c */
+};
+
+/* SDMA */
+struct mpc52xx_sdma {
+ u32 taskBar; /* SDMA + 0x00 */
+ u32 currentPointer; /* SDMA + 0x04 */
+ u32 endPointer; /* SDMA + 0x08 */
+ u32 variablePointer; /* SDMA + 0x0c */
+
+ u8 IntVect1; /* SDMA + 0x10 */
+ u8 IntVect2; /* SDMA + 0x11 */
+ u16 PtdCntrl; /* SDMA + 0x12 */
+
+ u32 IntPend; /* SDMA + 0x14 */
+ u32 IntMask; /* SDMA + 0x18 */
+
+ u16 tcr[16]; /* SDMA + 0x1c .. 0x3a */
+
+ u8 ipr[32]; /* SDMA + 0x3c .. 0x5b */
+
+ u32 cReqSelect; /* SDMA + 0x5c */
+ u32 task_size0; /* SDMA + 0x60 */
+ u32 task_size1; /* SDMA + 0x64 */
+ u32 MDEDebug; /* SDMA + 0x68 */
+ u32 ADSDebug; /* SDMA + 0x6c */
+ u32 Value1; /* SDMA + 0x70 */
+ u32 Value2; /* SDMA + 0x74 */
+ u32 Control; /* SDMA + 0x78 */
+ u32 Status; /* SDMA + 0x7c */
+ u32 PTDDebug; /* SDMA + 0x80 */
+};
+
+/* GPT */
+struct mpc52xx_gpt {
+ u32 mode; /* GPTx + 0x00 */
+ u32 count; /* GPTx + 0x04 */
+ u32 pwm; /* GPTx + 0x08 */
+ u32 status; /* GPTx + 0X0c */
+};
+
+/* GPIO */
+struct mpc52xx_gpio {
+ u32 port_config; /* GPIO + 0x00 */
+ u32 simple_gpioe; /* GPIO + 0x04 */
+ u32 simple_ode; /* GPIO + 0x08 */
+ u32 simple_ddr; /* GPIO + 0x0c */
+ u32 simple_dvo; /* GPIO + 0x10 */
+ u32 simple_ival; /* GPIO + 0x14 */
+ u8 outo_gpioe; /* GPIO + 0x18 */
+ u8 reserved1[3]; /* GPIO + 0x19 */
+ u8 outo_dvo; /* GPIO + 0x1c */
+ u8 reserved2[3]; /* GPIO + 0x1d */
+ u8 sint_gpioe; /* GPIO + 0x20 */
+ u8 reserved3[3]; /* GPIO + 0x21 */
+ u8 sint_ode; /* GPIO + 0x24 */
+ u8 reserved4[3]; /* GPIO + 0x25 */
+ u8 sint_ddr; /* GPIO + 0x28 */
+ u8 reserved5[3]; /* GPIO + 0x29 */
+ u8 sint_dvo; /* GPIO + 0x2c */
+ u8 reserved6[3]; /* GPIO + 0x2d */
+ u8 sint_inten; /* GPIO + 0x30 */
+ u8 reserved7[3]; /* GPIO + 0x31 */
+ u16 sint_itype; /* GPIO + 0x34 */
+ u16 reserved8; /* GPIO + 0x36 */
+ u8 gpio_control; /* GPIO + 0x38 */
+ u8 reserved9[3]; /* GPIO + 0x39 */
+ u8 sint_istat; /* GPIO + 0x3c */
+ u8 sint_ival; /* GPIO + 0x3d */
+ u8 bus_errs; /* GPIO + 0x3e */
+ u8 reserved10; /* GPIO + 0x3f */
+};
+
+#define MPC52xx_GPIO_PSC_CONFIG_UART_WITHOUT_CD 4
+#define MPC52xx_GPIO_PSC_CONFIG_UART_WITH_CD 5
+#define MPC52xx_GPIO_PCI_DIS (1<<15)
+
+/* GPIO with WakeUp*/
+struct mpc52xx_gpio_wkup {
+ u8 wkup_gpioe; /* GPIO_WKUP + 0x00 */
+ u8 reserved1[3]; /* GPIO_WKUP + 0x03 */
+ u8 wkup_ode; /* GPIO_WKUP + 0x04 */
+ u8 reserved2[3]; /* GPIO_WKUP + 0x05 */
+ u8 wkup_ddr; /* GPIO_WKUP + 0x08 */
+ u8 reserved3[3]; /* GPIO_WKUP + 0x09 */
+ u8 wkup_dvo; /* GPIO_WKUP + 0x0C */
+ u8 reserved4[3]; /* GPIO_WKUP + 0x0D */
+ u8 wkup_inten; /* GPIO_WKUP + 0x10 */
+ u8 reserved5[3]; /* GPIO_WKUP + 0x11 */
+ u8 wkup_iinten; /* GPIO_WKUP + 0x14 */
+ u8 reserved6[3]; /* GPIO_WKUP + 0x15 */
+ u16 wkup_itype; /* GPIO_WKUP + 0x18 */
+ u8 reserved7[2]; /* GPIO_WKUP + 0x1A */
+ u8 wkup_maste; /* GPIO_WKUP + 0x1C */
+ u8 reserved8[3]; /* GPIO_WKUP + 0x1D */
+ u8 wkup_ival; /* GPIO_WKUP + 0x20 */
+ u8 reserved9[3]; /* GPIO_WKUP + 0x21 */
+ u8 wkup_istat; /* GPIO_WKUP + 0x24 */
+ u8 reserved10[3]; /* GPIO_WKUP + 0x25 */
+};
+
+/* XLB Bus control */
+struct mpc52xx_xlb {
+ u8 reserved[0x40];
+ u32 config; /* XLB + 0x40 */
+ u32 version; /* XLB + 0x44 */
+ u32 status; /* XLB + 0x48 */
+ u32 int_enable; /* XLB + 0x4c */
+ u32 addr_capture; /* XLB + 0x50 */
+ u32 bus_sig_capture; /* XLB + 0x54 */
+ u32 addr_timeout; /* XLB + 0x58 */
+ u32 data_timeout; /* XLB + 0x5c */
+ u32 bus_act_timeout; /* XLB + 0x60 */
+ u32 master_pri_enable; /* XLB + 0x64 */
+ u32 master_priority; /* XLB + 0x68 */
+ u32 base_address; /* XLB + 0x6c */
+ u32 snoop_window; /* XLB + 0x70 */
+};
+
+#define MPC52xx_XLB_CFG_PLDIS (1 << 31)
+#define MPC52xx_XLB_CFG_SNOOP (1 << 15)
+
+/* Clock Distribution control */
+struct mpc52xx_cdm {
+ u32 jtag_id; /* CDM + 0x00 reg0 read only */
+ u32 rstcfg; /* CDM + 0x04 reg1 read only */
+ u32 breadcrumb; /* CDM + 0x08 reg2 */
+
+ u8 mem_clk_sel; /* CDM + 0x0c reg3 byte0 */
+ u8 xlb_clk_sel; /* CDM + 0x0d reg3 byte1 read only */
+ u8 ipb_clk_sel; /* CDM + 0x0e reg3 byte2 */
+ u8 pci_clk_sel; /* CDM + 0x0f reg3 byte3 */
+
+ u8 ext_48mhz_en; /* CDM + 0x10 reg4 byte0 */
+ u8 fd_enable; /* CDM + 0x11 reg4 byte1 */
+ u16 fd_counters; /* CDM + 0x12 reg4 byte2,3 */
+
+ u32 clk_enables; /* CDM + 0x14 reg5 */
+
+ u8 osc_disable; /* CDM + 0x18 reg6 byte0 */
+ u8 reserved0[3]; /* CDM + 0x19 reg6 byte1,2,3 */
+
+ u8 ccs_sleep_enable; /* CDM + 0x1c reg7 byte0 */
+ u8 osc_sleep_enable; /* CDM + 0x1d reg7 byte1 */
+ u8 reserved1; /* CDM + 0x1e reg7 byte2 */
+ u8 ccs_qreq_test; /* CDM + 0x1f reg7 byte3 */
+
+ u8 soft_reset; /* CDM + 0x20 u8 byte0 */
+ u8 no_ckstp; /* CDM + 0x21 u8 byte0 */
+ u8 reserved2[2]; /* CDM + 0x22 u8 byte1,2,3 */
+
+ u8 pll_lock; /* CDM + 0x24 reg9 byte0 */
+ u8 pll_looselock; /* CDM + 0x25 reg9 byte1 */
+ u8 pll_sm_lockwin; /* CDM + 0x26 reg9 byte2 */
+ u8 reserved3; /* CDM + 0x27 reg9 byte3 */
+
+ u16 reserved4; /* CDM + 0x28 reg10 byte0,1 */
+ u16 mclken_div_psc1; /* CDM + 0x2a reg10 byte2,3 */
+
+ u16 reserved5; /* CDM + 0x2c reg11 byte0,1 */
+ u16 mclken_div_psc2; /* CDM + 0x2e reg11 byte2,3 */
+
+ u16 reserved6; /* CDM + 0x30 reg12 byte0,1 */
+ u16 mclken_div_psc3; /* CDM + 0x32 reg12 byte2,3 */
+
+ u16 reserved7; /* CDM + 0x34 reg13 byte0,1 */
+ u16 mclken_div_psc6; /* CDM + 0x36 reg13 byte2,3 */
+};
+
+/* Interrupt controller Register set */
+struct mpc52xx_intr {
+ u32 per_mask; /* INTR + 0x00 */
+ u32 per_pri1; /* INTR + 0x04 */
+ u32 per_pri2; /* INTR + 0x08 */
+ u32 per_pri3; /* INTR + 0x0c */
+ u32 ctrl; /* INTR + 0x10 */
+ u32 main_mask; /* INTR + 0x14 */
+ u32 main_pri1; /* INTR + 0x18 */
+ u32 main_pri2; /* INTR + 0x1c */
+ u32 reserved1; /* INTR + 0x20 */
+ u32 enc_status; /* INTR + 0x24 */
+ u32 crit_status; /* INTR + 0x28 */
+ u32 main_status; /* INTR + 0x2c */
+ u32 per_status; /* INTR + 0x30 */
+ u32 reserved2; /* INTR + 0x34 */
+ u32 per_error; /* INTR + 0x38 */
+};
+
+#endif /* __ASSEMBLY__ */
+
+
+/* ========================================================================= */
+/* Prototypes for MPC52xx sysdev */
+/* ========================================================================= */
+
+#ifndef __ASSEMBLY__
+
+struct device_node;
+
+/* mpc52xx_common.c */
+extern void mpc5200_setup_xlb_arbiter(void);
+extern void mpc52xx_declare_of_platform_devices(void);
+extern int mpc5200_psc_ac97_gpio_reset(int psc_number);
+extern void mpc52xx_map_common_devices(void);
+extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv);
+extern void __noreturn mpc52xx_restart(char *cmd);
+
+/* mpc52xx_gpt.c */
+struct mpc52xx_gpt_priv;
+extern struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq);
+extern int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period,
+ int continuous);
+extern u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt);
+extern int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt);
+
+/* mpc52xx_lpbfifo.c */
+#define MPC52XX_LPBFIFO_FLAG_READ (0)
+#define MPC52XX_LPBFIFO_FLAG_WRITE (1<<0)
+#define MPC52XX_LPBFIFO_FLAG_NO_INCREMENT (1<<1)
+#define MPC52XX_LPBFIFO_FLAG_NO_DMA (1<<2)
+#define MPC52XX_LPBFIFO_FLAG_POLL_DMA (1<<3)
+
+struct mpc52xx_lpbfifo_request {
+ struct list_head list;
+
+ /* localplus bus address */
+ unsigned int cs;
+ size_t offset;
+
+ /* Memory address */
+ void *data;
+ phys_addr_t data_phys;
+
+ /* Details of transfer */
+ size_t size;
+ size_t pos; /* current position of transfer */
+ int flags;
+ int defer_xfer_start;
+
+ /* What to do when finished */
+ void (*callback)(struct mpc52xx_lpbfifo_request *);
+
+ void *priv; /* Driver private data */
+
+ /* statistics */
+ int irq_count;
+ int irq_ticks;
+ u8 last_byte;
+ int buffer_not_done_cnt;
+};
+
+extern int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req);
+extern void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req);
+extern void mpc52xx_lpbfifo_poll(void);
+extern int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req);
+
+/* mpc52xx_pic.c */
+extern void mpc52xx_init_irq(void);
+extern unsigned int mpc52xx_get_irq(void);
+
+/* mpc52xx_pci.c */
+#ifdef CONFIG_PCI
+extern int __init mpc52xx_add_bridge(struct device_node *node);
+extern void __init mpc52xx_setup_pci(void);
+#else
+static inline void mpc52xx_setup_pci(void) { }
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_PM
+struct mpc52xx_suspend {
+ void (*board_suspend_prepare)(void __iomem *mbar);
+ void (*board_resume_finish)(void __iomem *mbar);
+};
+
+extern struct mpc52xx_suspend mpc52xx_suspend;
+extern int __init mpc52xx_pm_init(void);
+extern int mpc52xx_set_wakeup_gpio(u8 pin, u8 level);
+
+/* lite5200 calls mpc5200 suspend functions, so here they are */
+extern int mpc52xx_pm_prepare(void);
+extern int mpc52xx_pm_enter(suspend_state_t);
+extern void mpc52xx_pm_finish(void);
+extern char saved_sram[0x4000]; /* reuse buffer from mpc52xx suspend */
+
+#ifdef CONFIG_PPC_LITE5200
+int __init lite5200_pm_init(void);
+#endif
+#endif /* CONFIG_PM */
+
+#endif /* __ASM_POWERPC_MPC52xx_H__ */
+
diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h
new file mode 100644
index 000000000..ec995b289
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc52xx_psc.h
@@ -0,0 +1,352 @@
+/*
+ * include/asm-ppc/mpc52xx_psc.h
+ *
+ * Definitions of consts/structs to drive the Freescale MPC52xx OnChip
+ * PSCs. Theses are shared between multiple drivers since a PSC can be
+ * UART, AC97, IR, I2S, ... So this header is in asm-ppc.
+ *
+ *
+ * Maintainer : Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Based/Extracted from some header of the 2.4 originally written by
+ * Dale Farnsworth <dfarnsworth@mvista.com>
+ *
+ * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_MPC52xx_PSC_H__
+#define __ASM_MPC52xx_PSC_H__
+
+#include <asm/types.h>
+
+/* Max number of PSCs */
+#ifdef CONFIG_PPC_MPC512x
+#define MPC52xx_PSC_MAXNUM 12
+#else
+#define MPC52xx_PSC_MAXNUM 6
+#endif
+
+/* Programmable Serial Controller (PSC) status register bits */
+#define MPC52xx_PSC_SR_UNEX_RX 0x0001
+#define MPC52xx_PSC_SR_DATA_VAL 0x0002
+#define MPC52xx_PSC_SR_DATA_OVR 0x0004
+#define MPC52xx_PSC_SR_CMDSEND 0x0008
+#define MPC52xx_PSC_SR_CDE 0x0080
+#define MPC52xx_PSC_SR_RXRDY 0x0100
+#define MPC52xx_PSC_SR_RXFULL 0x0200
+#define MPC52xx_PSC_SR_TXRDY 0x0400
+#define MPC52xx_PSC_SR_TXEMP 0x0800
+#define MPC52xx_PSC_SR_OE 0x1000
+#define MPC52xx_PSC_SR_PE 0x2000
+#define MPC52xx_PSC_SR_FE 0x4000
+#define MPC52xx_PSC_SR_RB 0x8000
+
+/* PSC Command values */
+#define MPC52xx_PSC_RX_ENABLE 0x0001
+#define MPC52xx_PSC_RX_DISABLE 0x0002
+#define MPC52xx_PSC_TX_ENABLE 0x0004
+#define MPC52xx_PSC_TX_DISABLE 0x0008
+#define MPC52xx_PSC_SEL_MODE_REG_1 0x0010
+#define MPC52xx_PSC_RST_RX 0x0020
+#define MPC52xx_PSC_RST_TX 0x0030
+#define MPC52xx_PSC_RST_ERR_STAT 0x0040
+#define MPC52xx_PSC_RST_BRK_CHG_INT 0x0050
+#define MPC52xx_PSC_START_BRK 0x0060
+#define MPC52xx_PSC_STOP_BRK 0x0070
+
+/* PSC TxRx FIFO status bits */
+#define MPC52xx_PSC_RXTX_FIFO_ERR 0x0040
+#define MPC52xx_PSC_RXTX_FIFO_UF 0x0020
+#define MPC52xx_PSC_RXTX_FIFO_OF 0x0010
+#define MPC52xx_PSC_RXTX_FIFO_FR 0x0008
+#define MPC52xx_PSC_RXTX_FIFO_FULL 0x0004
+#define MPC52xx_PSC_RXTX_FIFO_ALARM 0x0002
+#define MPC52xx_PSC_RXTX_FIFO_EMPTY 0x0001
+
+/* PSC interrupt status/mask bits */
+#define MPC52xx_PSC_IMR_UNEX_RX_SLOT 0x0001
+#define MPC52xx_PSC_IMR_DATA_VALID 0x0002
+#define MPC52xx_PSC_IMR_DATA_OVR 0x0004
+#define MPC52xx_PSC_IMR_CMD_SEND 0x0008
+#define MPC52xx_PSC_IMR_ERROR 0x0040
+#define MPC52xx_PSC_IMR_DEOF 0x0080
+#define MPC52xx_PSC_IMR_TXRDY 0x0100
+#define MPC52xx_PSC_IMR_RXRDY 0x0200
+#define MPC52xx_PSC_IMR_DB 0x0400
+#define MPC52xx_PSC_IMR_TXEMP 0x0800
+#define MPC52xx_PSC_IMR_ORERR 0x1000
+#define MPC52xx_PSC_IMR_IPC 0x8000
+
+/* PSC input port change bits */
+#define MPC52xx_PSC_CTS 0x01
+#define MPC52xx_PSC_DCD 0x02
+#define MPC52xx_PSC_D_CTS 0x10
+#define MPC52xx_PSC_D_DCD 0x20
+
+/* PSC acr bits */
+#define MPC52xx_PSC_IEC_CTS 0x01
+#define MPC52xx_PSC_IEC_DCD 0x02
+
+/* PSC output port bits */
+#define MPC52xx_PSC_OP_RTS 0x01
+#define MPC52xx_PSC_OP_RES 0x02
+
+/* PSC mode fields */
+#define MPC52xx_PSC_MODE_5_BITS 0x00
+#define MPC52xx_PSC_MODE_6_BITS 0x01
+#define MPC52xx_PSC_MODE_7_BITS 0x02
+#define MPC52xx_PSC_MODE_8_BITS 0x03
+#define MPC52xx_PSC_MODE_BITS_MASK 0x03
+#define MPC52xx_PSC_MODE_PAREVEN 0x00
+#define MPC52xx_PSC_MODE_PARODD 0x04
+#define MPC52xx_PSC_MODE_PARFORCE 0x08
+#define MPC52xx_PSC_MODE_PARNONE 0x10
+#define MPC52xx_PSC_MODE_ERR 0x20
+#define MPC52xx_PSC_MODE_FFULL 0x40
+#define MPC52xx_PSC_MODE_RXRTS 0x80
+
+#define MPC52xx_PSC_MODE_ONE_STOP_5_BITS 0x00
+#define MPC52xx_PSC_MODE_ONE_STOP 0x07
+#define MPC52xx_PSC_MODE_TWO_STOP 0x0f
+#define MPC52xx_PSC_MODE_TXCTS 0x10
+
+#define MPC52xx_PSC_RFNUM_MASK 0x01ff
+
+#define MPC52xx_PSC_SICR_DTS1 (1 << 29)
+#define MPC52xx_PSC_SICR_SHDR (1 << 28)
+#define MPC52xx_PSC_SICR_SIM_MASK (0xf << 24)
+#define MPC52xx_PSC_SICR_SIM_UART (0x0 << 24)
+#define MPC52xx_PSC_SICR_SIM_UART_DCD (0x8 << 24)
+#define MPC52xx_PSC_SICR_SIM_CODEC_8 (0x1 << 24)
+#define MPC52xx_PSC_SICR_SIM_CODEC_16 (0x2 << 24)
+#define MPC52xx_PSC_SICR_SIM_AC97 (0x3 << 24)
+#define MPC52xx_PSC_SICR_SIM_SIR (0x8 << 24)
+#define MPC52xx_PSC_SICR_SIM_SIR_DCD (0xc << 24)
+#define MPC52xx_PSC_SICR_SIM_MIR (0x5 << 24)
+#define MPC52xx_PSC_SICR_SIM_FIR (0x6 << 24)
+#define MPC52xx_PSC_SICR_SIM_CODEC_24 (0x7 << 24)
+#define MPC52xx_PSC_SICR_SIM_CODEC_32 (0xf << 24)
+#define MPC52xx_PSC_SICR_ACRB (0x8 << 24)
+#define MPC52xx_PSC_SICR_AWR (1 << 30)
+#define MPC52xx_PSC_SICR_GENCLK (1 << 23)
+#define MPC52xx_PSC_SICR_I2S (1 << 22)
+#define MPC52xx_PSC_SICR_CLKPOL (1 << 21)
+#define MPC52xx_PSC_SICR_SYNCPOL (1 << 20)
+#define MPC52xx_PSC_SICR_CELLSLAVE (1 << 19)
+#define MPC52xx_PSC_SICR_CELL2XCLK (1 << 18)
+#define MPC52xx_PSC_SICR_ESAI (1 << 17)
+#define MPC52xx_PSC_SICR_ENAC97 (1 << 16)
+#define MPC52xx_PSC_SICR_SPI (1 << 15)
+#define MPC52xx_PSC_SICR_MSTR (1 << 14)
+#define MPC52xx_PSC_SICR_CPOL (1 << 13)
+#define MPC52xx_PSC_SICR_CPHA (1 << 12)
+#define MPC52xx_PSC_SICR_USEEOF (1 << 11)
+#define MPC52xx_PSC_SICR_DISABLEEOF (1 << 10)
+
+/* Structure of the hardware registers */
+struct mpc52xx_psc {
+ union {
+ u8 mode; /* PSC + 0x00 */
+ u8 mr2;
+ };
+ u8 reserved0[3];
+ union { /* PSC + 0x04 */
+ u16 status;
+ u16 clock_select;
+ } sr_csr;
+#define mpc52xx_psc_status sr_csr.status
+#define mpc52xx_psc_clock_select sr_csr.clock_select
+ u16 reserved1;
+ u8 command; /* PSC + 0x08 */
+ u8 reserved2[3];
+ union { /* PSC + 0x0c */
+ u8 buffer_8;
+ u16 buffer_16;
+ u32 buffer_32;
+ } buffer;
+#define mpc52xx_psc_buffer_8 buffer.buffer_8
+#define mpc52xx_psc_buffer_16 buffer.buffer_16
+#define mpc52xx_psc_buffer_32 buffer.buffer_32
+ union { /* PSC + 0x10 */
+ u8 ipcr;
+ u8 acr;
+ } ipcr_acr;
+#define mpc52xx_psc_ipcr ipcr_acr.ipcr
+#define mpc52xx_psc_acr ipcr_acr.acr
+ u8 reserved3[3];
+ union { /* PSC + 0x14 */
+ u16 isr;
+ u16 imr;
+ } isr_imr;
+#define mpc52xx_psc_isr isr_imr.isr
+#define mpc52xx_psc_imr isr_imr.imr
+ u16 reserved4;
+ u8 ctur; /* PSC + 0x18 */
+ u8 reserved5[3];
+ u8 ctlr; /* PSC + 0x1c */
+ u8 reserved6[3];
+ /* BitClkDiv field of CCR is byte swapped in
+ * the hardware for mpc5200/b compatibility */
+ u32 ccr; /* PSC + 0x20 */
+ u32 ac97_slots; /* PSC + 0x24 */
+ u32 ac97_cmd; /* PSC + 0x28 */
+ u32 ac97_data; /* PSC + 0x2c */
+ u8 ivr; /* PSC + 0x30 */
+ u8 reserved8[3];
+ u8 ip; /* PSC + 0x34 */
+ u8 reserved9[3];
+ u8 op1; /* PSC + 0x38 */
+ u8 reserved10[3];
+ u8 op0; /* PSC + 0x3c */
+ u8 reserved11[3];
+ u32 sicr; /* PSC + 0x40 */
+ u8 ircr1; /* PSC + 0x44 */
+ u8 reserved13[3];
+ u8 ircr2; /* PSC + 0x44 */
+ u8 reserved14[3];
+ u8 irsdr; /* PSC + 0x4c */
+ u8 reserved15[3];
+ u8 irmdr; /* PSC + 0x50 */
+ u8 reserved16[3];
+ u8 irfdr; /* PSC + 0x54 */
+ u8 reserved17[3];
+};
+
+struct mpc52xx_psc_fifo {
+ u16 rfnum; /* PSC + 0x58 */
+ u16 reserved18;
+ u16 tfnum; /* PSC + 0x5c */
+ u16 reserved19;
+ u32 rfdata; /* PSC + 0x60 */
+ u16 rfstat; /* PSC + 0x64 */
+ u16 reserved20;
+ u8 rfcntl; /* PSC + 0x68 */
+ u8 reserved21[5];
+ u16 rfalarm; /* PSC + 0x6e */
+ u16 reserved22;
+ u16 rfrptr; /* PSC + 0x72 */
+ u16 reserved23;
+ u16 rfwptr; /* PSC + 0x76 */
+ u16 reserved24;
+ u16 rflrfptr; /* PSC + 0x7a */
+ u16 reserved25;
+ u16 rflwfptr; /* PSC + 0x7e */
+ u32 tfdata; /* PSC + 0x80 */
+ u16 tfstat; /* PSC + 0x84 */
+ u16 reserved26;
+ u8 tfcntl; /* PSC + 0x88 */
+ u8 reserved27[5];
+ u16 tfalarm; /* PSC + 0x8e */
+ u16 reserved28;
+ u16 tfrptr; /* PSC + 0x92 */
+ u16 reserved29;
+ u16 tfwptr; /* PSC + 0x96 */
+ u16 reserved30;
+ u16 tflrfptr; /* PSC + 0x9a */
+ u16 reserved31;
+ u16 tflwfptr; /* PSC + 0x9e */
+};
+
+#define MPC512x_PSC_FIFO_EOF 0x100
+#define MPC512x_PSC_FIFO_RESET_SLICE 0x80
+#define MPC512x_PSC_FIFO_ENABLE_SLICE 0x01
+#define MPC512x_PSC_FIFO_ENABLE_DMA 0x04
+
+#define MPC512x_PSC_FIFO_EMPTY 0x1
+#define MPC512x_PSC_FIFO_FULL 0x2
+#define MPC512x_PSC_FIFO_ALARM 0x4
+#define MPC512x_PSC_FIFO_URERR 0x8
+
+struct mpc512x_psc_fifo {
+ u32 reserved1[10];
+ u32 txcmd; /* PSC + 0x80 */
+ u32 txalarm; /* PSC + 0x84 */
+ u32 txsr; /* PSC + 0x88 */
+ u32 txisr; /* PSC + 0x8c */
+ u32 tximr; /* PSC + 0x90 */
+ u32 txcnt; /* PSC + 0x94 */
+ u32 txptr; /* PSC + 0x98 */
+ u32 txsz; /* PSC + 0x9c */
+ u32 reserved2[7];
+ union {
+ u8 txdata_8;
+ u16 txdata_16;
+ u32 txdata_32;
+ } txdata; /* PSC + 0xbc */
+#define txdata_8 txdata.txdata_8
+#define txdata_16 txdata.txdata_16
+#define txdata_32 txdata.txdata_32
+ u32 rxcmd; /* PSC + 0xc0 */
+ u32 rxalarm; /* PSC + 0xc4 */
+ u32 rxsr; /* PSC + 0xc8 */
+ u32 rxisr; /* PSC + 0xcc */
+ u32 rximr; /* PSC + 0xd0 */
+ u32 rxcnt; /* PSC + 0xd4 */
+ u32 rxptr; /* PSC + 0xd8 */
+ u32 rxsz; /* PSC + 0xdc */
+ u32 reserved3[7];
+ union {
+ u8 rxdata_8;
+ u16 rxdata_16;
+ u32 rxdata_32;
+ } rxdata; /* PSC + 0xfc */
+#define rxdata_8 rxdata.rxdata_8
+#define rxdata_16 rxdata.rxdata_16
+#define rxdata_32 rxdata.rxdata_32
+};
+
+struct mpc5125_psc {
+ u8 mr1; /* PSC + 0x00 */
+ u8 reserved0[3];
+ u8 mr2; /* PSC + 0x04 */
+ u8 reserved1[3];
+ struct {
+ u16 status; /* PSC + 0x08 */
+ u8 reserved2[2];
+ u8 clock_select; /* PSC + 0x0c */
+ u8 reserved3[3];
+ } sr_csr;
+ u8 command; /* PSC + 0x10 */
+ u8 reserved4[3];
+ union { /* PSC + 0x14 */
+ u8 buffer_8;
+ u16 buffer_16;
+ u32 buffer_32;
+ } buffer;
+ struct {
+ u8 ipcr; /* PSC + 0x18 */
+ u8 reserved5[3];
+ u8 acr; /* PSC + 0x1c */
+ u8 reserved6[3];
+ } ipcr_acr;
+ struct {
+ u16 isr; /* PSC + 0x20 */
+ u8 reserved7[2];
+ u16 imr; /* PSC + 0x24 */
+ u8 reserved8[2];
+ } isr_imr;
+ u8 ctur; /* PSC + 0x28 */
+ u8 reserved9[3];
+ u8 ctlr; /* PSC + 0x2c */
+ u8 reserved10[3];
+ u32 ccr; /* PSC + 0x30 */
+ u32 ac97slots; /* PSC + 0x34 */
+ u32 ac97cmd; /* PSC + 0x38 */
+ u32 ac97data; /* PSC + 0x3c */
+ u8 reserved11[4];
+ u8 ip; /* PSC + 0x44 */
+ u8 reserved12[3];
+ u8 op1; /* PSC + 0x48 */
+ u8 reserved13[3];
+ u8 op0; /* PSC + 0x4c */
+ u8 reserved14[3];
+ u32 sicr; /* PSC + 0x50 */
+ u8 reserved15[4]; /* make eq. sizeof(mpc52xx_psc) */
+};
+
+#endif /* __ASM_MPC52xx_PSC_H__ */
diff --git a/arch/powerpc/include/asm/mpc5xxx.h b/arch/powerpc/include/asm/mpc5xxx.h
new file mode 100644
index 000000000..44db26380
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc5xxx.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: John Rigby, <jrigby@freescale.com>, Friday Apr 13 2007
+ *
+ * Description:
+ * MPC5xxx Prototypes and definitions
+ */
+
+#ifndef __ASM_POWERPC_MPC5xxx_H__
+#define __ASM_POWERPC_MPC5xxx_H__
+
+#include <linux/property.h>
+
+unsigned long mpc5xxx_fwnode_get_bus_frequency(struct fwnode_handle *fwnode);
+
+static inline unsigned long mpc5xxx_get_bus_frequency(struct device *dev)
+{
+ return mpc5xxx_fwnode_get_bus_frequency(dev_fwnode(dev));
+}
+
+#endif /* __ASM_POWERPC_MPC5xxx_H__ */
+
diff --git a/arch/powerpc/include/asm/mpc6xx.h b/arch/powerpc/include/asm/mpc6xx.h
new file mode 100644
index 000000000..6ed9f4ccc
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc6xx.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_MPC6xx_H
+#define __ASM_POWERPC_MPC6xx_H
+
+void mpc6xx_enter_standby(void);
+
+#endif
diff --git a/arch/powerpc/include/asm/mpc8260.h b/arch/powerpc/include/asm/mpc8260.h
new file mode 100644
index 000000000..fd8c57074
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc8260.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Since there are many different boards and no standard configuration,
+ * we have a unique include file for each. Rather than change every
+ * file that has to include MPC8260 configuration, they all include
+ * this one and the configuration switching is done here.
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_MPC8260_H__
+#define __ASM_POWERPC_MPC8260_H__
+
+#define MPC82XX_BCR_PLDP 0x00800000 /* Pipeline Maximum Depth */
+
+#ifdef CONFIG_8260
+
+#if defined(CONFIG_PQ2ADS) || defined (CONFIG_PQ2FADS)
+#include <platforms/82xx/pq2ads.h>
+#endif
+
+#ifdef CONFIG_PCI_8260
+#include <platforms/82xx/m82xx_pci.h>
+#endif
+
+#endif /* CONFIG_8260 */
+#endif /* !__ASM_POWERPC_MPC8260_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/mpc85xx.h b/arch/powerpc/include/asm/mpc85xx.h
new file mode 100644
index 000000000..21aabc323
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc85xx.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * MPC85xx cpu type detection
+ *
+ * Copyright 2011-2012 Freescale Semiconductor, Inc.
+ */
+
+#ifndef __ASM_PPC_MPC85XX_H
+#define __ASM_PPC_MPC85XX_H
+
+#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */
+#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/
+#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/
+
+/* Some parts define SVR[0:23] as the SOC version */
+#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */
+
+#define SVR_8533 0x803400
+#define SVR_8535 0x803701
+#define SVR_8536 0x803700
+#define SVR_8540 0x803000
+#define SVR_8541 0x807200
+#define SVR_8543 0x803200
+#define SVR_8544 0x803401
+#define SVR_8545 0x803102
+#define SVR_8547 0x803101
+#define SVR_8548 0x803100
+#define SVR_8555 0x807100
+#define SVR_8560 0x807000
+#define SVR_8567 0x807501
+#define SVR_8568 0x807500
+#define SVR_8569 0x808000
+#define SVR_8572 0x80E000
+#define SVR_P1010 0x80F100
+#define SVR_P1011 0x80E500
+#define SVR_P1012 0x80E501
+#define SVR_P1013 0x80E700
+#define SVR_P1014 0x80F101
+#define SVR_P1017 0x80F700
+#define SVR_P1020 0x80E400
+#define SVR_P1021 0x80E401
+#define SVR_P1022 0x80E600
+#define SVR_P1023 0x80F600
+#define SVR_P1024 0x80E402
+#define SVR_P1025 0x80E403
+#define SVR_P2010 0x80E300
+#define SVR_P2020 0x80E200
+#define SVR_P2040 0x821000
+#define SVR_P2041 0x821001
+#define SVR_P3041 0x821103
+#define SVR_P4040 0x820100
+#define SVR_P4080 0x820000
+#define SVR_P5010 0x822100
+#define SVR_P5020 0x822000
+#define SVR_P5021 0X820500
+#define SVR_P5040 0x820400
+#define SVR_T4240 0x824000
+#define SVR_T4120 0x824001
+#define SVR_T4160 0x824100
+#define SVR_T4080 0x824102
+#define SVR_C291 0x850000
+#define SVR_C292 0x850020
+#define SVR_C293 0x850030
+#define SVR_B4860 0X868000
+#define SVR_G4860 0x868001
+#define SVR_G4060 0x868003
+#define SVR_B4440 0x868100
+#define SVR_G4440 0x868101
+#define SVR_B4420 0x868102
+#define SVR_B4220 0x868103
+#define SVR_T1040 0x852000
+#define SVR_T1041 0x852001
+#define SVR_T1042 0x852002
+#define SVR_T1020 0x852100
+#define SVR_T1021 0x852101
+#define SVR_T1022 0x852102
+#define SVR_T2080 0x853000
+#define SVR_T2081 0x853100
+
+#define SVR_8610 0x80A000
+#define SVR_8641 0x809000
+#define SVR_8641D 0x809001
+
+#define SVR_9130 0x860001
+#define SVR_9131 0x860000
+#define SVR_9132 0x861000
+#define SVR_9232 0x861400
+
+#define SVR_Unknown 0xFFFFFF
+
+#endif
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
new file mode 100644
index 000000000..58353c5bd
--- /dev/null
+++ b/arch/powerpc/include/asm/mpic.h
@@ -0,0 +1,497 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MPIC_H
+#define _ASM_POWERPC_MPIC_H
+#ifdef __KERNEL__
+
+#include <linux/irq.h>
+#include <asm/dcr.h>
+#include <asm/msi_bitmap.h>
+
+/*
+ * Global registers
+ */
+
+#define MPIC_GREG_BASE 0x01000
+
+#define MPIC_GREG_FEATURE_0 0x00000
+#define MPIC_GREG_FEATURE_LAST_SRC_MASK 0x07ff0000
+#define MPIC_GREG_FEATURE_LAST_SRC_SHIFT 16
+#define MPIC_GREG_FEATURE_LAST_CPU_MASK 0x00001f00
+#define MPIC_GREG_FEATURE_LAST_CPU_SHIFT 8
+#define MPIC_GREG_FEATURE_VERSION_MASK 0xff
+#define MPIC_GREG_FEATURE_1 0x00010
+#define MPIC_GREG_GLOBAL_CONF_0 0x00020
+#define MPIC_GREG_GCONF_RESET 0x80000000
+/* On the FSL mpic implementations the Mode field is expand to be
+ * 2 bits wide:
+ * 0b00 = pass through (interrupts routed to IRQ0)
+ * 0b01 = Mixed mode
+ * 0b10 = reserved
+ * 0b11 = External proxy / coreint
+ */
+#define MPIC_GREG_GCONF_COREINT 0x60000000
+#define MPIC_GREG_GCONF_8259_PTHROU_DIS 0x20000000
+#define MPIC_GREG_GCONF_NO_BIAS 0x10000000
+#define MPIC_GREG_GCONF_BASE_MASK 0x000fffff
+#define MPIC_GREG_GCONF_MCK 0x08000000
+#define MPIC_GREG_GLOBAL_CONF_1 0x00030
+#define MPIC_GREG_VENDOR_0 0x00040
+#define MPIC_GREG_VENDOR_1 0x00050
+#define MPIC_GREG_VENDOR_2 0x00060
+#define MPIC_GREG_VENDOR_3 0x00070
+#define MPIC_GREG_VENDOR_ID 0x00080
+#define MPIC_GREG_VENDOR_ID_STEPPING_MASK 0x00ff0000
+#define MPIC_GREG_VENDOR_ID_STEPPING_SHIFT 16
+#define MPIC_GREG_VENDOR_ID_DEVICE_ID_MASK 0x0000ff00
+#define MPIC_GREG_VENDOR_ID_DEVICE_ID_SHIFT 8
+#define MPIC_GREG_VENDOR_ID_VENDOR_ID_MASK 0x000000ff
+#define MPIC_GREG_PROCESSOR_INIT 0x00090
+#define MPIC_GREG_IPI_VECTOR_PRI_0 0x000a0
+#define MPIC_GREG_IPI_VECTOR_PRI_1 0x000b0
+#define MPIC_GREG_IPI_VECTOR_PRI_2 0x000c0
+#define MPIC_GREG_IPI_VECTOR_PRI_3 0x000d0
+#define MPIC_GREG_IPI_STRIDE 0x10
+#define MPIC_GREG_SPURIOUS 0x000e0
+#define MPIC_GREG_TIMER_FREQ 0x000f0
+
+/*
+ *
+ * Timer registers
+ */
+#define MPIC_TIMER_BASE 0x01100
+#define MPIC_TIMER_STRIDE 0x40
+#define MPIC_TIMER_GROUP_STRIDE 0x1000
+
+#define MPIC_TIMER_CURRENT_CNT 0x00000
+#define MPIC_TIMER_BASE_CNT 0x00010
+#define MPIC_TIMER_VECTOR_PRI 0x00020
+#define MPIC_TIMER_DESTINATION 0x00030
+
+/*
+ * Per-Processor registers
+ */
+
+#define MPIC_CPU_THISBASE 0x00000
+#define MPIC_CPU_BASE 0x20000
+#define MPIC_CPU_STRIDE 0x01000
+
+#define MPIC_CPU_IPI_DISPATCH_0 0x00040
+#define MPIC_CPU_IPI_DISPATCH_1 0x00050
+#define MPIC_CPU_IPI_DISPATCH_2 0x00060
+#define MPIC_CPU_IPI_DISPATCH_3 0x00070
+#define MPIC_CPU_IPI_DISPATCH_STRIDE 0x00010
+#define MPIC_CPU_CURRENT_TASK_PRI 0x00080
+#define MPIC_CPU_TASKPRI_MASK 0x0000000f
+#define MPIC_CPU_WHOAMI 0x00090
+#define MPIC_CPU_WHOAMI_MASK 0x0000001f
+#define MPIC_CPU_INTACK 0x000a0
+#define MPIC_CPU_EOI 0x000b0
+#define MPIC_CPU_MCACK 0x000c0
+
+/*
+ * Per-source registers
+ */
+
+#define MPIC_IRQ_BASE 0x10000
+#define MPIC_IRQ_STRIDE 0x00020
+#define MPIC_IRQ_VECTOR_PRI 0x00000
+#define MPIC_VECPRI_MASK 0x80000000
+#define MPIC_VECPRI_ACTIVITY 0x40000000 /* Read Only */
+#define MPIC_VECPRI_PRIORITY_MASK 0x000f0000
+#define MPIC_VECPRI_PRIORITY_SHIFT 16
+#define MPIC_VECPRI_VECTOR_MASK 0x000007ff
+#define MPIC_VECPRI_POLARITY_POSITIVE 0x00800000
+#define MPIC_VECPRI_POLARITY_NEGATIVE 0x00000000
+#define MPIC_VECPRI_POLARITY_MASK 0x00800000
+#define MPIC_VECPRI_SENSE_LEVEL 0x00400000
+#define MPIC_VECPRI_SENSE_EDGE 0x00000000
+#define MPIC_VECPRI_SENSE_MASK 0x00400000
+#define MPIC_IRQ_DESTINATION 0x00010
+
+#define MPIC_FSL_BRR1 0x00000
+#define MPIC_FSL_BRR1_VER 0x0000ffff
+
+#define MPIC_MAX_IRQ_SOURCES 2048
+#define MPIC_MAX_CPUS 32
+#define MPIC_MAX_ISU 32
+
+#define MPIC_MAX_ERR 32
+#define MPIC_FSL_ERR_INT 16
+
+/*
+ * Tsi108 implementation of MPIC has many differences from the original one
+ */
+
+/*
+ * Global registers
+ */
+
+#define TSI108_GREG_BASE 0x00000
+#define TSI108_GREG_FEATURE_0 0x00000
+#define TSI108_GREG_GLOBAL_CONF_0 0x00004
+#define TSI108_GREG_VENDOR_ID 0x0000c
+#define TSI108_GREG_IPI_VECTOR_PRI_0 0x00204 /* Doorbell 0 */
+#define TSI108_GREG_IPI_STRIDE 0x0c
+#define TSI108_GREG_SPURIOUS 0x00010
+#define TSI108_GREG_TIMER_FREQ 0x00014
+
+/*
+ * Timer registers
+ */
+#define TSI108_TIMER_BASE 0x0030
+#define TSI108_TIMER_STRIDE 0x10
+#define TSI108_TIMER_CURRENT_CNT 0x00000
+#define TSI108_TIMER_BASE_CNT 0x00004
+#define TSI108_TIMER_VECTOR_PRI 0x00008
+#define TSI108_TIMER_DESTINATION 0x0000c
+
+/*
+ * Per-Processor registers
+ */
+#define TSI108_CPU_BASE 0x00300
+#define TSI108_CPU_STRIDE 0x00040
+#define TSI108_CPU_IPI_DISPATCH_0 0x00200
+#define TSI108_CPU_IPI_DISPATCH_STRIDE 0x00000
+#define TSI108_CPU_CURRENT_TASK_PRI 0x00000
+#define TSI108_CPU_WHOAMI 0xffffffff
+#define TSI108_CPU_INTACK 0x00004
+#define TSI108_CPU_EOI 0x00008
+#define TSI108_CPU_MCACK 0x00004 /* Doesn't really exist here */
+
+/*
+ * Per-source registers
+ */
+#define TSI108_IRQ_BASE 0x00100
+#define TSI108_IRQ_STRIDE 0x00008
+#define TSI108_IRQ_VECTOR_PRI 0x00000
+#define TSI108_VECPRI_VECTOR_MASK 0x000000ff
+#define TSI108_VECPRI_POLARITY_POSITIVE 0x01000000
+#define TSI108_VECPRI_POLARITY_NEGATIVE 0x00000000
+#define TSI108_VECPRI_SENSE_LEVEL 0x02000000
+#define TSI108_VECPRI_SENSE_EDGE 0x00000000
+#define TSI108_VECPRI_POLARITY_MASK 0x01000000
+#define TSI108_VECPRI_SENSE_MASK 0x02000000
+#define TSI108_IRQ_DESTINATION 0x00004
+
+/* weird mpic register indices and mask bits in the HW info array */
+enum {
+ MPIC_IDX_GREG_BASE = 0,
+ MPIC_IDX_GREG_FEATURE_0,
+ MPIC_IDX_GREG_GLOBAL_CONF_0,
+ MPIC_IDX_GREG_VENDOR_ID,
+ MPIC_IDX_GREG_IPI_VECTOR_PRI_0,
+ MPIC_IDX_GREG_IPI_STRIDE,
+ MPIC_IDX_GREG_SPURIOUS,
+ MPIC_IDX_GREG_TIMER_FREQ,
+
+ MPIC_IDX_TIMER_BASE,
+ MPIC_IDX_TIMER_STRIDE,
+ MPIC_IDX_TIMER_CURRENT_CNT,
+ MPIC_IDX_TIMER_BASE_CNT,
+ MPIC_IDX_TIMER_VECTOR_PRI,
+ MPIC_IDX_TIMER_DESTINATION,
+
+ MPIC_IDX_CPU_BASE,
+ MPIC_IDX_CPU_STRIDE,
+ MPIC_IDX_CPU_IPI_DISPATCH_0,
+ MPIC_IDX_CPU_IPI_DISPATCH_STRIDE,
+ MPIC_IDX_CPU_CURRENT_TASK_PRI,
+ MPIC_IDX_CPU_WHOAMI,
+ MPIC_IDX_CPU_INTACK,
+ MPIC_IDX_CPU_EOI,
+ MPIC_IDX_CPU_MCACK,
+
+ MPIC_IDX_IRQ_BASE,
+ MPIC_IDX_IRQ_STRIDE,
+ MPIC_IDX_IRQ_VECTOR_PRI,
+
+ MPIC_IDX_VECPRI_VECTOR_MASK,
+ MPIC_IDX_VECPRI_POLARITY_POSITIVE,
+ MPIC_IDX_VECPRI_POLARITY_NEGATIVE,
+ MPIC_IDX_VECPRI_SENSE_LEVEL,
+ MPIC_IDX_VECPRI_SENSE_EDGE,
+ MPIC_IDX_VECPRI_POLARITY_MASK,
+ MPIC_IDX_VECPRI_SENSE_MASK,
+ MPIC_IDX_IRQ_DESTINATION,
+ MPIC_IDX_END
+};
+
+
+#ifdef CONFIG_MPIC_U3_HT_IRQS
+/* Fixup table entry */
+struct mpic_irq_fixup
+{
+ u8 __iomem *base;
+ u8 __iomem *applebase;
+ u32 data;
+ unsigned int index;
+};
+#endif /* CONFIG_MPIC_U3_HT_IRQS */
+
+
+enum mpic_reg_type {
+ mpic_access_mmio_le,
+ mpic_access_mmio_be,
+#ifdef CONFIG_PPC_DCR
+ mpic_access_dcr
+#endif
+};
+
+struct mpic_reg_bank {
+ u32 __iomem *base;
+#ifdef CONFIG_PPC_DCR
+ dcr_host_t dhost;
+#endif /* CONFIG_PPC_DCR */
+};
+
+struct mpic_irq_save {
+ u32 vecprio,
+ dest;
+#ifdef CONFIG_MPIC_U3_HT_IRQS
+ u32 fixup_data;
+#endif
+};
+
+/* The instance data of a given MPIC */
+struct mpic
+{
+ /* The OpenFirmware dt node for this MPIC */
+ struct device_node *node;
+
+ /* The remapper for this MPIC */
+ struct irq_domain *irqhost;
+
+ /* The "linux" controller struct */
+ struct irq_chip hc_irq;
+#ifdef CONFIG_MPIC_U3_HT_IRQS
+ struct irq_chip hc_ht_irq;
+#endif
+#ifdef CONFIG_SMP
+ struct irq_chip hc_ipi;
+#endif
+ struct irq_chip hc_tm;
+ struct irq_chip hc_err;
+ const char *name;
+ /* Flags */
+ unsigned int flags;
+ /* How many irq sources in a given ISU */
+ unsigned int isu_size;
+ unsigned int isu_shift;
+ unsigned int isu_mask;
+ /* Number of sources */
+ unsigned int num_sources;
+
+ /* vector numbers used for internal sources (ipi/timers) */
+ unsigned int ipi_vecs[4];
+ unsigned int timer_vecs[8];
+ /* vector numbers used for FSL MPIC error interrupts */
+ unsigned int err_int_vecs[MPIC_MAX_ERR];
+
+ /* Spurious vector to program into unused sources */
+ unsigned int spurious_vec;
+
+#ifdef CONFIG_MPIC_U3_HT_IRQS
+ /* The fixup table */
+ struct mpic_irq_fixup *fixups;
+ raw_spinlock_t fixup_lock;
+#endif
+
+ /* Register access method */
+ enum mpic_reg_type reg_type;
+
+ /* The physical base address of the MPIC */
+ phys_addr_t paddr;
+
+ /* The various ioremap'ed bases */
+ struct mpic_reg_bank thiscpuregs;
+ struct mpic_reg_bank gregs;
+ struct mpic_reg_bank tmregs;
+ struct mpic_reg_bank cpuregs[MPIC_MAX_CPUS];
+ struct mpic_reg_bank isus[MPIC_MAX_ISU];
+
+ /* ioremap'ed base for error interrupt registers */
+ u32 __iomem *err_regs;
+
+ /* Protected sources */
+ unsigned long *protected;
+
+#ifdef CONFIG_MPIC_WEIRD
+ /* Pointer to HW info array */
+ u32 *hw_set;
+#endif
+
+#ifdef CONFIG_PCI_MSI
+ struct msi_bitmap msi_bitmap;
+#endif
+
+#ifdef CONFIG_MPIC_BROKEN_REGREAD
+ u32 isu_reg0_shadow[MPIC_MAX_IRQ_SOURCES];
+#endif
+
+ /* link */
+ struct mpic *next;
+
+#ifdef CONFIG_PM
+ struct mpic_irq_save *save_data;
+#endif
+};
+
+extern struct bus_type mpic_subsys;
+
+/*
+ * MPIC flags (passed to mpic_alloc)
+ *
+ * The top 4 bits contain an MPIC bhw id that is used to index the
+ * register offsets and some masks when CONFIG_MPIC_WEIRD is set.
+ * Note setting any ID (leaving those bits to 0) means standard MPIC
+ */
+
+/*
+ * This is a secondary ("chained") controller; it only uses the CPU0
+ * registers. Primary controllers have IPIs and affinity control.
+ */
+#define MPIC_SECONDARY 0x00000001
+
+/* Set this for a big-endian MPIC */
+#define MPIC_BIG_ENDIAN 0x00000002
+/* Broken U3 MPIC */
+#define MPIC_U3_HT_IRQS 0x00000004
+/* Broken IPI registers (autodetected) */
+#define MPIC_BROKEN_IPI 0x00000008
+/* Spurious vector requires EOI */
+#define MPIC_SPV_EOI 0x00000020
+/* No passthrough disable */
+#define MPIC_NO_PTHROU_DIS 0x00000040
+/* DCR based MPIC */
+#define MPIC_USES_DCR 0x00000080
+/* MPIC has 11-bit vector fields (or larger) */
+#define MPIC_LARGE_VECTORS 0x00000100
+/* Enable delivery of prio 15 interrupts as MCK instead of EE */
+#define MPIC_ENABLE_MCK 0x00000200
+/* Disable bias among target selection, spread interrupts evenly */
+#define MPIC_NO_BIAS 0x00000400
+/* Destination only supports a single CPU at a time */
+#define MPIC_SINGLE_DEST_CPU 0x00001000
+/* Enable CoreInt delivery of interrupts */
+#define MPIC_ENABLE_COREINT 0x00002000
+/* Do not reset the MPIC during initialization */
+#define MPIC_NO_RESET 0x00004000
+/* Freescale MPIC (compatible includes "fsl,mpic") */
+#define MPIC_FSL 0x00008000
+/* Freescale MPIC supports EIMR (error interrupt mask register).
+ * This flag is set for MPIC version >= 4.1 (version determined
+ * from the BRR1 register).
+*/
+#define MPIC_FSL_HAS_EIMR 0x00010000
+
+/* MPIC HW modification ID */
+#define MPIC_REGSET_MASK 0xf0000000
+#define MPIC_REGSET(val) (((val) & 0xf ) << 28)
+#define MPIC_GET_REGSET(flags) (((flags) >> 28) & 0xf)
+
+#define MPIC_REGSET_STANDARD MPIC_REGSET(0) /* Original MPIC */
+#define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
+
+/* Get the version of primary MPIC */
+#ifdef CONFIG_MPIC
+extern u32 fsl_mpic_primary_get_version(void);
+#else
+static inline u32 fsl_mpic_primary_get_version(void)
+{
+ return 0;
+}
+#endif
+
+/* Allocate the controller structure and setup the linux irq descs
+ * for the range if interrupts passed in. No HW initialization is
+ * actually performed.
+ *
+ * @phys_addr: physial base address of the MPIC
+ * @flags: flags, see constants above
+ * @isu_size: number of interrupts in an ISU. Use 0 to use a
+ * standard ISU-less setup (aka powermac)
+ * @irq_offset: first irq number to assign to this mpic
+ * @irq_count: number of irqs to use with this mpic IRQ sources. Pass 0
+ * to match the number of sources
+ * @ipi_offset: first irq number to assign to this mpic IPI sources,
+ * used only on primary mpic
+ * @senses: array of sense values
+ * @senses_num: number of entries in the array
+ *
+ * Note about the sense array. If none is passed, all interrupts are
+ * setup to be level negative unless MPIC_U3_HT_IRQS is set in which
+ * case they are edge positive (and the array is ignored anyway).
+ * The values in the array start at the first source of the MPIC,
+ * that is senses[0] correspond to linux irq "irq_offset".
+ */
+extern struct mpic *mpic_alloc(struct device_node *node,
+ phys_addr_t phys_addr,
+ unsigned int flags,
+ unsigned int isu_size,
+ unsigned int irq_count,
+ const char *name);
+
+/* Assign ISUs, to call before mpic_init()
+ *
+ * @mpic: controller structure as returned by mpic_alloc()
+ * @isu_num: ISU number
+ * @phys_addr: physical address of the ISU
+ */
+extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
+ phys_addr_t phys_addr);
+
+
+/* Initialize the controller. After this has been called, none of the above
+ * should be called again for this mpic
+ */
+extern void mpic_init(struct mpic *mpic);
+
+/*
+ * All of the following functions must only be used after the
+ * ISUs have been assigned and the controller fully initialized
+ * with mpic_init()
+ */
+
+
+/* Change the priority of an interrupt. Default is 8 for irqs and
+ * 10 for IPIs. You can call this on both IPIs and IRQ numbers, but the
+ * IPI number is then the offset'ed (linux irq number mapped to the IPI)
+ */
+extern void mpic_irq_set_priority(unsigned int irq, unsigned int pri);
+
+/* Setup a non-boot CPU */
+extern void mpic_setup_this_cpu(void);
+
+/* Clean up for kexec (or cpu offline or ...) */
+extern void mpic_teardown_this_cpu(int secondary);
+
+/* Get the current cpu priority for this cpu (0..15) */
+extern int mpic_cpu_get_priority(void);
+
+/* Set the current cpu priority for this cpu */
+extern void mpic_cpu_set_priority(int prio);
+
+/* Request IPIs on primary mpic */
+void __init mpic_request_ipis(void);
+
+/* Send a message (IPI) to a given target (cpu number or MSG_*) */
+void smp_mpic_message_pass(int target, int msg);
+
+/* Unmask a specific virq */
+extern void mpic_unmask_irq(struct irq_data *d);
+/* Mask a specific virq */
+extern void mpic_mask_irq(struct irq_data *d);
+/* EOI a specific virq */
+extern void mpic_end_irq(struct irq_data *d);
+
+/* Fetch interrupt from a given mpic */
+extern unsigned int mpic_get_one_irq(struct mpic *mpic);
+/* This one gets from the primary mpic */
+extern unsigned int mpic_get_irq(void);
+/* This one gets from the primary mpic via CoreInt*/
+extern unsigned int mpic_get_coreint_irq(void);
+/* Fetch Machine Check interrupt from primary mpic */
+extern unsigned int mpic_get_mcirq(void);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MPIC_H */
diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h
new file mode 100644
index 000000000..cd25eeced
--- /dev/null
+++ b/arch/powerpc/include/asm/mpic_msgr.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2011-2012, Meador Inge, Mentor Graphics Corporation.
+ */
+
+#ifndef _ASM_MPIC_MSGR_H
+#define _ASM_MPIC_MSGR_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <asm/smp.h>
+#include <asm/io.h>
+
+struct mpic_msgr {
+ u32 __iomem *base;
+ u32 __iomem *mer;
+ int irq;
+ unsigned char in_use;
+ raw_spinlock_t lock;
+ int num;
+};
+
+/* Get a message register
+ *
+ * @reg_num: the MPIC message register to get
+ *
+ * A pointer to the message register is returned. If
+ * the message register asked for is already in use, then
+ * EBUSY is returned. If the number given is not associated
+ * with an actual message register, then ENODEV is returned.
+ * Successfully getting the register marks it as in use.
+ */
+extern struct mpic_msgr *mpic_msgr_get(unsigned int reg_num);
+
+/* Relinquish a message register
+ *
+ * @msgr: the message register to return
+ *
+ * Disables the given message register and marks it as free.
+ * After this call has completed successully the message
+ * register is available to be acquired by a call to
+ * mpic_msgr_get.
+ */
+extern void mpic_msgr_put(struct mpic_msgr *msgr);
+
+/* Enable a message register
+ *
+ * @msgr: the message register to enable
+ *
+ * The given message register is enabled for sending
+ * messages.
+ */
+extern void mpic_msgr_enable(struct mpic_msgr *msgr);
+
+/* Disable a message register
+ *
+ * @msgr: the message register to disable
+ *
+ * The given message register is disabled for sending
+ * messages.
+ */
+extern void mpic_msgr_disable(struct mpic_msgr *msgr);
+
+/* Write a message to a message register
+ *
+ * @msgr: the message register to write to
+ * @message: the message to write
+ *
+ * The given 32-bit message is written to the given message
+ * register. Writing to an enabled message registers fires
+ * an interrupt.
+ */
+static inline void mpic_msgr_write(struct mpic_msgr *msgr, u32 message)
+{
+ out_be32(msgr->base, message);
+}
+
+/* Read a message from a message register
+ *
+ * @msgr: the message register to read from
+ *
+ * Returns the 32-bit value currently in the given message register.
+ * Upon reading the register any interrupts for that register are
+ * cleared.
+ */
+static inline u32 mpic_msgr_read(struct mpic_msgr *msgr)
+{
+ return in_be32(msgr->base);
+}
+
+/* Clear a message register
+ *
+ * @msgr: the message register to clear
+ *
+ * Clears any interrupts associated with the given message register.
+ */
+static inline void mpic_msgr_clear(struct mpic_msgr *msgr)
+{
+ (void) mpic_msgr_read(msgr);
+}
+
+/* Set the destination CPU for the message register
+ *
+ * @msgr: the message register whose destination is to be set
+ * @cpu_num: the Linux CPU number to bind the message register to
+ *
+ * Note that the CPU number given is the CPU number used by the kernel
+ * and *not* the actual hardware CPU number.
+ */
+static inline void mpic_msgr_set_destination(struct mpic_msgr *msgr,
+ u32 cpu_num)
+{
+ out_be32(msgr->base, 1 << get_hard_smp_processor_id(cpu_num));
+}
+
+/* Get the IRQ number for the message register
+ * @msgr: the message register whose IRQ is to be returned
+ *
+ * Returns the IRQ number associated with the given message register.
+ * 0 is returned if this message register is not capable of receiving
+ * interrupts. What message register can and cannot receive interrupts is
+ * specified in the device tree for the system.
+ */
+static inline int mpic_msgr_get_irq(struct mpic_msgr *msgr)
+{
+ return msgr->irq;
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/mpic_timer.h b/arch/powerpc/include/asm/mpic_timer.h
new file mode 100644
index 000000000..d33e4149b
--- /dev/null
+++ b/arch/powerpc/include/asm/mpic_timer.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * arch/powerpc/include/asm/mpic_timer.h
+ *
+ * Header file for Mpic Global Timer
+ *
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * Author: Wang Dongsheng <Dongsheng.Wang@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ */
+
+#ifndef __MPIC_TIMER__
+#define __MPIC_TIMER__
+
+#include <linux/interrupt.h>
+#include <linux/time.h>
+
+struct mpic_timer {
+ void *dev;
+ struct cascade_priv *cascade_handle;
+ unsigned int num;
+ unsigned int irq;
+};
+
+#ifdef CONFIG_MPIC_TIMER
+struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
+ time64_t time);
+void mpic_start_timer(struct mpic_timer *handle);
+void mpic_stop_timer(struct mpic_timer *handle);
+void mpic_get_remain_time(struct mpic_timer *handle, time64_t *time);
+void mpic_free_timer(struct mpic_timer *handle);
+#else
+struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
+ time64_t time) { return NULL; }
+void mpic_start_timer(struct mpic_timer *handle) { }
+void mpic_stop_timer(struct mpic_timer *handle) { }
+void mpic_get_remain_time(struct mpic_timer *handle, time64_t *time) { }
+void mpic_free_timer(struct mpic_timer *handle) { }
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/msi_bitmap.h b/arch/powerpc/include/asm/msi_bitmap.h
new file mode 100644
index 000000000..55c2f7db9
--- /dev/null
+++ b/arch/powerpc/include/asm/msi_bitmap.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _POWERPC_SYSDEV_MSI_BITMAP_H
+#define _POWERPC_SYSDEV_MSI_BITMAP_H
+
+/*
+ * Copyright 2008, Michael Ellerman, IBM Corporation.
+ */
+
+#include <linux/of.h>
+#include <asm/irq.h>
+
+struct msi_bitmap {
+ struct device_node *of_node;
+ unsigned long *bitmap;
+ spinlock_t lock;
+ unsigned int irq_count;
+ bool bitmap_from_slab;
+};
+
+int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num);
+void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset,
+ unsigned int num);
+void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq);
+
+int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp);
+
+int msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
+ struct device_node *of_node);
+void msi_bitmap_free(struct msi_bitmap *bmp);
+
+#endif /* _POWERPC_SYSDEV_MSI_BITMAP_H */
diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h
new file mode 100644
index 000000000..c3c7adef7
--- /dev/null
+++ b/arch/powerpc/include/asm/nmi.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_NMI_H
+#define _ASM_NMI_H
+
+#ifdef CONFIG_PPC_WATCHDOG
+extern void arch_touch_nmi_watchdog(void);
+long soft_nmi_interrupt(struct pt_regs *regs);
+void watchdog_nmi_set_timeout_pct(u64 pct);
+#else
+static inline void arch_touch_nmi_watchdog(void) {}
+static inline void watchdog_nmi_set_timeout_pct(u64 pct) {}
+#endif
+
+#ifdef CONFIG_NMI_IPI
+extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
+ bool exclude_self);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+#endif
+
+extern void hv_nmi_check_nonrecoverable(struct pt_regs *regs);
+
+#endif /* _ASM_NMI_H */
diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
new file mode 100644
index 000000000..de092b04e
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H
+#define _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H
+
+#define PAGE_SHIFT_8M 23
+
+static inline pte_t *hugepd_page(hugepd_t hpd)
+{
+ BUG_ON(!hugepd_ok(hpd));
+
+ return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK);
+}
+
+static inline unsigned int hugepd_shift(hugepd_t hpd)
+{
+ return PAGE_SHIFT_8M;
+}
+
+static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
+ unsigned int pdshift)
+{
+ unsigned long idx = (addr & (SZ_4M - 1)) >> PAGE_SHIFT;
+
+ return hugepd_page(hpd) + idx;
+}
+
+static inline void flush_hugetlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ flush_tlb_page(vma, vmaddr);
+}
+
+static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
+{
+ *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M);
+}
+
+static inline void hugepd_populate_kernel(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
+{
+ *hpdp = __hugepd(__pa(new) | _PMD_PRESENT | _PMD_PAGE_8M);
+}
+
+static inline int check_and_get_huge_psize(int shift)
+{
+ return shift_to_mmu_psize(shift);
+}
+
+#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
+
+#define __HAVE_ARCH_HUGE_PTE_CLEAR
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz)
+{
+ pte_update(mm, addr, ptep, ~0UL, 0, 1);
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
+ unsigned long set = pte_val(pte_wrprotect(__pte(0)));
+
+ pte_update(mm, addr, ptep, clr, set, 1);
+}
+
+#ifdef CONFIG_PPC_4K_PAGES
+static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
+{
+ size_t size = 1UL << shift;
+
+ if (size == SZ_16K)
+ return __pte(pte_val(entry) | _PAGE_SPS);
+ else
+ return __pte(pte_val(entry) | _PAGE_SPS | _PAGE_HUGE);
+}
+#define arch_make_huge_pte arch_make_huge_pte
+#endif
+
+#endif /* _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H */
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
new file mode 100644
index 000000000..c44d97751
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_KUP_8XX_H_
+#define _ASM_POWERPC_KUP_8XX_H_
+
+#include <asm/bug.h>
+#include <asm/mmu.h>
+
+#ifdef CONFIG_PPC_KUAP
+
+#ifndef __ASSEMBLY__
+
+#include <linux/jump_label.h>
+
+#include <asm/reg.h>
+
+extern struct static_key_false disable_kuap_key;
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return static_branch_unlikely(&disable_kuap_key);
+}
+
+static inline void __kuap_lock(void)
+{
+}
+
+static inline void __kuap_save_and_lock(struct pt_regs *regs)
+{
+ regs->kuap = mfspr(SPRN_MD_AP);
+ mtspr(SPRN_MD_AP, MD_APG_KUAP);
+}
+
+static inline void kuap_user_restore(struct pt_regs *regs)
+{
+}
+
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+{
+ mtspr(SPRN_MD_AP, regs->kuap);
+}
+
+static inline unsigned long __kuap_get_and_assert_locked(void)
+{
+ unsigned long kuap;
+
+ kuap = mfspr(SPRN_MD_AP);
+
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ WARN_ON_ONCE(kuap >> 16 != MD_APG_KUAP >> 16);
+
+ return kuap;
+}
+
+static inline void __allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
+{
+ mtspr(SPRN_MD_AP, MD_APG_INIT);
+}
+
+static inline void __prevent_user_access(unsigned long dir)
+{
+ mtspr(SPRN_MD_AP, MD_APG_KUAP);
+}
+
+static inline unsigned long __prevent_user_access_return(void)
+{
+ unsigned long flags;
+
+ flags = mfspr(SPRN_MD_AP);
+
+ mtspr(SPRN_MD_AP, MD_APG_KUAP);
+
+ return flags;
+}
+
+static inline void __restore_user_access(unsigned long flags)
+{
+ mtspr(SPRN_MD_AP, flags);
+}
+
+static inline bool
+__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_PPC_KUAP */
+
+#endif /* _ASM_POWERPC_KUP_8XX_H_ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-40x.h b/arch/powerpc/include/asm/nohash/32/mmu-40x.h
new file mode 100644
index 000000000..8a8f13a22
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/mmu-40x.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MMU_40X_H_
+#define _ASM_POWERPC_MMU_40X_H_
+
+/*
+ * PPC40x support
+ */
+
+#define PPC40X_TLB_SIZE 64
+
+/*
+ * TLB entries are defined by a "high" tag portion and a "low" data
+ * portion. On all architectures, the data portion is 32-bits.
+ *
+ * TLB entries are managed entirely under software control by reading,
+ * writing, and searchoing using the 4xx-specific tlbre, tlbwr, and tlbsx
+ * instructions.
+ */
+
+#define TLB_LO 1
+#define TLB_HI 0
+
+#define TLB_DATA TLB_LO
+#define TLB_TAG TLB_HI
+
+/* Tag portion */
+
+#define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */
+#define TLB_PAGESZ_MASK 0x00000380
+#define TLB_PAGESZ(x) (((x) & 0x7) << 7)
+#define PAGESZ_1K 0
+#define PAGESZ_4K 1
+#define PAGESZ_16K 2
+#define PAGESZ_64K 3
+#define PAGESZ_256K 4
+#define PAGESZ_1M 5
+#define PAGESZ_4M 6
+#define PAGESZ_16M 7
+#define TLB_VALID 0x00000040 /* Entry is valid */
+
+/* Data portion */
+
+#define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */
+#define TLB_PERM_MASK 0x00000300
+#define TLB_EX 0x00000200 /* Instruction execution allowed */
+#define TLB_WR 0x00000100 /* Writes permitted */
+#define TLB_ZSEL_MASK 0x000000F0
+#define TLB_ZSEL(x) (((x) & 0xF) << 4)
+#define TLB_ATTR_MASK 0x0000000F
+#define TLB_W 0x00000008 /* Caching is write-through */
+#define TLB_I 0x00000004 /* Caching is inhibited */
+#define TLB_M 0x00000002 /* Memory is coherent */
+#define TLB_G 0x00000001 /* Memory is guarded from prefetch */
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ unsigned int id;
+ unsigned int active;
+ void __user *vdso;
+} mm_context_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#define mmu_virtual_psize MMU_PAGE_4K
+#define mmu_linear_psize MMU_PAGE_256M
+
+#endif /* _ASM_POWERPC_MMU_40X_H_ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
new file mode 100644
index 000000000..2d92a39d8
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MMU_44X_H_
+#define _ASM_POWERPC_MMU_44X_H_
+/*
+ * PPC440 support
+ */
+
+#include <asm/asm-const.h>
+
+#define PPC44x_MMUCR_TID 0x000000ff
+#define PPC44x_MMUCR_STS 0x00010000
+
+#define PPC44x_TLB_PAGEID 0
+#define PPC44x_TLB_XLAT 1
+#define PPC44x_TLB_ATTRIB 2
+
+/* Page identification fields */
+#define PPC44x_TLB_EPN_MASK 0xfffffc00 /* Effective Page Number */
+#define PPC44x_TLB_VALID 0x00000200 /* Valid flag */
+#define PPC44x_TLB_TS 0x00000100 /* Translation address space */
+#define PPC44x_TLB_1K 0x00000000 /* Page sizes */
+#define PPC44x_TLB_4K 0x00000010
+#define PPC44x_TLB_16K 0x00000020
+#define PPC44x_TLB_64K 0x00000030
+#define PPC44x_TLB_256K 0x00000040
+#define PPC44x_TLB_1M 0x00000050
+#define PPC44x_TLB_16M 0x00000070
+#define PPC44x_TLB_256M 0x00000090
+
+/* Translation fields */
+#define PPC44x_TLB_RPN_MASK 0xfffffc00 /* Real Page Number */
+#define PPC44x_TLB_ERPN_MASK 0x0000000f
+
+/* Storage attribute and access control fields */
+#define PPC44x_TLB_ATTR_MASK 0x0000ff80
+#define PPC44x_TLB_U0 0x00008000 /* User 0 */
+#define PPC44x_TLB_U1 0x00004000 /* User 1 */
+#define PPC44x_TLB_U2 0x00002000 /* User 2 */
+#define PPC44x_TLB_U3 0x00001000 /* User 3 */
+#define PPC44x_TLB_W 0x00000800 /* Caching is write-through */
+#define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */
+#define PPC44x_TLB_M 0x00000200 /* Memory is coherent */
+#define PPC44x_TLB_G 0x00000100 /* Memory is guarded */
+#define PPC44x_TLB_E 0x00000080 /* Memory is little endian */
+
+#define PPC44x_TLB_PERM_MASK 0x0000003f
+#define PPC44x_TLB_UX 0x00000020 /* User execution */
+#define PPC44x_TLB_UW 0x00000010 /* User write */
+#define PPC44x_TLB_UR 0x00000008 /* User read */
+#define PPC44x_TLB_SX 0x00000004 /* Super execution */
+#define PPC44x_TLB_SW 0x00000002 /* Super write */
+#define PPC44x_TLB_SR 0x00000001 /* Super read */
+
+/* Number of TLB entries */
+#define PPC44x_TLB_SIZE 64
+
+/* 47x bits */
+#define PPC47x_MMUCR_TID 0x0000ffff
+#define PPC47x_MMUCR_STS 0x00010000
+
+/* Page identification fields */
+#define PPC47x_TLB0_EPN_MASK 0xfffff000 /* Effective Page Number */
+#define PPC47x_TLB0_VALID 0x00000800 /* Valid flag */
+#define PPC47x_TLB0_TS 0x00000400 /* Translation address space */
+#define PPC47x_TLB0_4K 0x00000000
+#define PPC47x_TLB0_16K 0x00000010
+#define PPC47x_TLB0_64K 0x00000030
+#define PPC47x_TLB0_1M 0x00000070
+#define PPC47x_TLB0_16M 0x000000f0
+#define PPC47x_TLB0_256M 0x000001f0
+#define PPC47x_TLB0_1G 0x000003f0
+#define PPC47x_TLB0_BOLTED_R 0x00000008 /* tlbre only */
+
+/* Translation fields */
+#define PPC47x_TLB1_RPN_MASK 0xfffff000 /* Real Page Number */
+#define PPC47x_TLB1_ERPN_MASK 0x000003ff
+
+/* Storage attribute and access control fields */
+#define PPC47x_TLB2_ATTR_MASK 0x0003ff80
+#define PPC47x_TLB2_IL1I 0x00020000 /* Memory is guarded */
+#define PPC47x_TLB2_IL1D 0x00010000 /* Memory is guarded */
+#define PPC47x_TLB2_U0 0x00008000 /* User 0 */
+#define PPC47x_TLB2_U1 0x00004000 /* User 1 */
+#define PPC47x_TLB2_U2 0x00002000 /* User 2 */
+#define PPC47x_TLB2_U3 0x00001000 /* User 3 */
+#define PPC47x_TLB2_W 0x00000800 /* Caching is write-through */
+#define PPC47x_TLB2_I 0x00000400 /* Caching is inhibited */
+#define PPC47x_TLB2_M 0x00000200 /* Memory is coherent */
+#define PPC47x_TLB2_G 0x00000100 /* Memory is guarded */
+#define PPC47x_TLB2_E 0x00000080 /* Memory is little endian */
+#define PPC47x_TLB2_PERM_MASK 0x0000003f
+#define PPC47x_TLB2_UX 0x00000020 /* User execution */
+#define PPC47x_TLB2_UW 0x00000010 /* User write */
+#define PPC47x_TLB2_UR 0x00000008 /* User read */
+#define PPC47x_TLB2_SX 0x00000004 /* Super execution */
+#define PPC47x_TLB2_SW 0x00000002 /* Super write */
+#define PPC47x_TLB2_SR 0x00000001 /* Super read */
+#define PPC47x_TLB2_U_RWX (PPC47x_TLB2_UX|PPC47x_TLB2_UW|PPC47x_TLB2_UR)
+#define PPC47x_TLB2_S_RWX (PPC47x_TLB2_SX|PPC47x_TLB2_SW|PPC47x_TLB2_SR)
+#define PPC47x_TLB2_S_RW (PPC47x_TLB2_SW | PPC47x_TLB2_SR)
+#define PPC47x_TLB2_IMG (PPC47x_TLB2_I | PPC47x_TLB2_M | PPC47x_TLB2_G)
+
+#ifndef __ASSEMBLY__
+
+extern unsigned int tlb_44x_hwater;
+extern unsigned int tlb_44x_index;
+
+typedef struct {
+ unsigned int id;
+ unsigned int active;
+ void __user *vdso;
+} mm_context_t;
+
+/* patch sites */
+extern s32 patch__tlb_44x_hwater_D, patch__tlb_44x_hwater_I;
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_PPC_EARLY_DEBUG_44x
+#define PPC44x_EARLY_TLBS 1
+#else
+#define PPC44x_EARLY_TLBS 2
+#define PPC44x_EARLY_DEBUG_VIRTADDR (ASM_CONST(0xf0000000) \
+ | (ASM_CONST(CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW) & 0xffff))
+#endif
+
+/* Size of the TLBs used for pinning in lowmem */
+#define PPC_PIN_SIZE (1 << 28) /* 256M */
+
+#if defined(CONFIG_PPC_4K_PAGES)
+#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
+#define PPC47x_TLBE_SIZE PPC47x_TLB0_4K
+#define mmu_virtual_psize MMU_PAGE_4K
+#elif defined(CONFIG_PPC_16K_PAGES)
+#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
+#define PPC47x_TLBE_SIZE PPC47x_TLB0_16K
+#define mmu_virtual_psize MMU_PAGE_16K
+#elif defined(CONFIG_PPC_64K_PAGES)
+#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
+#define PPC47x_TLBE_SIZE PPC47x_TLB0_64K
+#define mmu_virtual_psize MMU_PAGE_64K
+#elif defined(CONFIG_PPC_256K_PAGES)
+#define PPC44x_TLBE_SIZE PPC44x_TLB_256K
+#define mmu_virtual_psize MMU_PAGE_256K
+#else
+#error "Unsupported PAGE_SIZE"
+#endif
+
+#define mmu_linear_psize MMU_PAGE_256M
+
+#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2)
+#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2)
+#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2)
+#define PPC44x_PTE_ADD_MASK_BIT (32 - PTE_T_LOG2 - PTE_SHIFT)
+
+#endif /* _ASM_POWERPC_MMU_44X_H_ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
new file mode 100644
index 000000000..0e93a4728
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MMU_8XX_H_
+#define _ASM_POWERPC_MMU_8XX_H_
+/*
+ * PPC8xx support
+ */
+
+/* Control/status registers for the MPC8xx.
+ * A write operation to these registers causes serialized access.
+ * During software tablewalk, the registers used perform mask/shift-add
+ * operations when written/read. A TLB entry is created when the Mx_RPN
+ * is written, and the contents of several registers are used to
+ * create the entry.
+ */
+#define SPRN_MI_CTR 784 /* Instruction TLB control register */
+#define MI_GPM 0x80000000 /* Set domain manager mode */
+#define MI_PPM 0x40000000 /* Set subpage protection */
+#define MI_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
+#define MI_RSV4I 0x08000000 /* Reserve 4 TLB entries */
+#define MI_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
+#define MI_IDXMASK 0x00001f00 /* TLB index to be loaded */
+
+/* These are the Ks and Kp from the PowerPC books. For proper operation,
+ * Ks = 0, Kp = 1.
+ */
+#define SPRN_MI_AP 786
+#define MI_Ks 0x80000000 /* Should not be set */
+#define MI_Kp 0x40000000 /* Should always be set */
+
+/*
+ * All pages' PP data bits are set to either 001 or 011 by copying _PAGE_EXEC
+ * into bit 21 in the ITLBmiss handler (bit 21 is the middle bit), which means
+ * respectively NA for All or X for Supervisor and no access for User.
+ * Then we use the APG to say whether accesses are according to Page rules or
+ * "all Supervisor" rules (Access to all)
+ * _PAGE_ACCESSED is also managed via APG. When _PAGE_ACCESSED is not set, say
+ * "all User" rules, that will lead to NA for all.
+ * Therefore, we define 4 APG groups. lsb is _PAGE_ACCESSED
+ * 0 => Kernel => 11 (all accesses performed according as user iaw page definition)
+ * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition)
+ * 2 => User => 11 (all accesses performed according as user iaw page definition)
+ * 3 => User+Accessed => 10 (all accesses performed according to swaped page definition) for KUEP
+ * 4-15 => Not Used
+ */
+#define MI_APG_INIT 0xde000000
+
+/* The effective page number register. When read, contains the information
+ * about the last instruction TLB miss. When MI_RPN is written, bits in
+ * this register are used to create the TLB entry.
+ */
+#define SPRN_MI_EPN 787
+#define MI_EPNMASK 0xfffff000 /* Effective page number for entry */
+#define MI_EVALID 0x00000200 /* Entry is valid */
+#define MI_ASIDMASK 0x0000000f /* ASID match value */
+ /* Reset value is undefined */
+
+/* A "level 1" or "segment" or whatever you want to call it register.
+ * For the instruction TLB, it contains bits that get loaded into the
+ * TLB entry when the MI_RPN is written.
+ */
+#define SPRN_MI_TWC 789
+#define MI_APG 0x000001e0 /* Access protection group (0) */
+#define MI_GUARDED 0x00000010 /* Guarded storage */
+#define MI_PSMASK 0x0000000c /* Mask of page size bits */
+#define MI_PS8MEG 0x0000000c /* 8M page size */
+#define MI_PS512K 0x00000004 /* 512K page size */
+#define MI_PS4K_16K 0x00000000 /* 4K or 16K page size */
+#define MI_SVALID 0x00000001 /* Segment entry is valid */
+ /* Reset value is undefined */
+
+/* Real page number. Defined by the pte. Writing this register
+ * causes a TLB entry to be created for the instruction TLB, using
+ * additional information from the MI_EPN, and MI_TWC registers.
+ */
+#define SPRN_MI_RPN 790
+#define MI_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */
+
+/* Define an RPN value for mapping kernel memory to large virtual
+ * pages for boot initialization. This has real page number of 0,
+ * large page size, shared page, cache enabled, and valid.
+ * Also mark all subpages valid and write access.
+ */
+#define MI_BOOTINIT 0x000001fd
+
+#define SPRN_MD_CTR 792 /* Data TLB control register */
+#define MD_GPM 0x80000000 /* Set domain manager mode */
+#define MD_PPM 0x40000000 /* Set subpage protection */
+#define MD_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
+#define MD_WTDEF 0x10000000 /* Set writethrough when MMU dis */
+#define MD_RSV4I 0x08000000 /* Reserve 4 TLB entries */
+#define MD_TWAM 0x04000000 /* Use 4K page hardware assist */
+#define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
+#define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */
+
+#define SPRN_M_CASID 793 /* Address space ID (context) to match */
+#define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */
+
+
+/* These are the Ks and Kp from the PowerPC books. For proper operation,
+ * Ks = 0, Kp = 1.
+ */
+#define SPRN_MD_AP 794
+#define MD_Ks 0x80000000 /* Should not be set */
+#define MD_Kp 0x40000000 /* Should always be set */
+
+/* See explanation above at the definition of MI_APG_INIT */
+#define MD_APG_INIT 0xdc000000
+#define MD_APG_KUAP 0xde000000
+
+/* The effective page number register. When read, contains the information
+ * about the last instruction TLB miss. When MD_RPN is written, bits in
+ * this register are used to create the TLB entry.
+ */
+#define SPRN_MD_EPN 795
+#define MD_EPNMASK 0xfffff000 /* Effective page number for entry */
+#define MD_EVALID 0x00000200 /* Entry is valid */
+#define MD_ASIDMASK 0x0000000f /* ASID match value */
+ /* Reset value is undefined */
+
+/* The pointer to the base address of the first level page table.
+ * During a software tablewalk, reading this register provides the address
+ * of the entry associated with MD_EPN.
+ */
+#define SPRN_M_TWB 796
+#define M_L1TB 0xfffff000 /* Level 1 table base address */
+#define M_L1INDX 0x00000ffc /* Level 1 index, when read */
+ /* Reset value is undefined */
+
+/* A "level 1" or "segment" or whatever you want to call it register.
+ * For the data TLB, it contains bits that get loaded into the TLB entry
+ * when the MD_RPN is written. It is also provides the hardware assist
+ * for finding the PTE address during software tablewalk.
+ */
+#define SPRN_MD_TWC 797
+#define MD_L2TB 0xfffff000 /* Level 2 table base address */
+#define MD_L2INDX 0xfffffe00 /* Level 2 index (*pte), when read */
+#define MD_APG 0x000001e0 /* Access protection group (0) */
+#define MD_GUARDED 0x00000010 /* Guarded storage */
+#define MD_PSMASK 0x0000000c /* Mask of page size bits */
+#define MD_PS8MEG 0x0000000c /* 8M page size */
+#define MD_PS512K 0x00000004 /* 512K page size */
+#define MD_PS4K_16K 0x00000000 /* 4K or 16K page size */
+#define MD_WT 0x00000002 /* Use writethrough page attribute */
+#define MD_SVALID 0x00000001 /* Segment entry is valid */
+ /* Reset value is undefined */
+
+
+/* Real page number. Defined by the pte. Writing this register
+ * causes a TLB entry to be created for the data TLB, using
+ * additional information from the MD_EPN, and MD_TWC registers.
+ */
+#define SPRN_MD_RPN 798
+#define MD_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */
+
+/* This is a temporary storage register that could be used to save
+ * a processor working register during a tablewalk.
+ */
+#define SPRN_M_TW 799
+
+#if defined(CONFIG_PPC_4K_PAGES)
+#define mmu_virtual_psize MMU_PAGE_4K
+#elif defined(CONFIG_PPC_16K_PAGES)
+#define mmu_virtual_psize MMU_PAGE_16K
+#define PTE_FRAG_NR 4
+#define PTE_FRAG_SIZE_SHIFT 12
+#define PTE_FRAG_SIZE (1UL << 12)
+#else
+#error "Unsupported PAGE_SIZE"
+#endif
+
+#define mmu_linear_psize MMU_PAGE_8M
+
+#define MODULES_VADDR (PAGE_OFFSET - SZ_256M)
+#define MODULES_END PAGE_OFFSET
+
+#ifndef __ASSEMBLY__
+
+#include <linux/mmdebug.h>
+#include <linux/sizes.h>
+
+void mmu_pin_tlb(unsigned long top, bool readonly);
+
+typedef struct {
+ unsigned int id;
+ unsigned int active;
+ void __user *vdso;
+ void *pte_frag;
+} mm_context_t;
+
+#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
+#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
+
+/* Page size definitions, common between 32 and 64-bit
+ *
+ * shift : is the "PAGE_SHIFT" value for that page size
+ * penc : is the pte encoding mask
+ *
+ */
+struct mmu_psize_def {
+ unsigned int shift; /* number of bits */
+ unsigned int enc; /* PTE encoding */
+ unsigned int ind; /* Corresponding indirect page size shift */
+ unsigned int flags;
+#define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */
+#define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */
+};
+
+extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+ if (mmu_psize_defs[psize].shift == shift)
+ return psize;
+ return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+ if (mmu_psize_defs[mmu_psize].shift)
+ return mmu_psize_defs[mmu_psize].shift;
+ BUG();
+}
+
+static inline bool arch_vmap_try_size(unsigned long addr, unsigned long end, u64 pfn,
+ unsigned int max_page_shift, unsigned long size)
+{
+ if (end - addr < size)
+ return false;
+
+ if ((1UL << max_page_shift) < size)
+ return false;
+
+ if (!IS_ALIGNED(addr, size))
+ return false;
+
+ if (!IS_ALIGNED(PFN_PHYS(pfn), size))
+ return false;
+
+ return true;
+}
+
+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
+ u64 pfn, unsigned int max_page_shift)
+{
+ if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_512K))
+ return SZ_512K;
+ if (PAGE_SIZE == SZ_16K)
+ return SZ_16K;
+ if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_16K))
+ return SZ_16K;
+ return PAGE_SIZE;
+}
+#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
+
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ if (size >= SZ_512K)
+ return 19;
+ else if (size >= SZ_16K)
+ return 14;
+ else
+ return PAGE_SHIFT;
+}
+#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
+
+/* patch sites */
+extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1;
+extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_MMU_8XX_H_ */
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h
new file mode 100644
index 000000000..11eac371e
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGALLOC_32_H
+#define _ASM_POWERPC_PGALLOC_32_H
+
+#include <linux/threads.h>
+#include <linux/slab.h>
+
+/*
+ * We don't have any real pmd's, and this code never triggers because
+ * the pgd will always be present..
+ */
+/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
+#define pmd_free(mm, x) do { } while (0)
+#define __pmd_free_tlb(tlb,x,a) do { } while (0)
+/* #define pgd_populate(mm, pmd, pte) BUG() */
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
+ pte_t *pte)
+{
+ if (IS_ENABLED(CONFIG_BOOKE))
+ *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
+ else
+ *pmdp = __pmd(__pa(pte) | _PMD_PRESENT);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pte_page)
+{
+ if (IS_ENABLED(CONFIG_BOOKE))
+ *pmdp = __pmd((unsigned long)pte_page | _PMD_PRESENT);
+ else
+ *pmdp = __pmd(__pa(pte_page) | _PMD_USER | _PMD_PRESENT);
+}
+
+#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
new file mode 100644
index 000000000..0d40b3318
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -0,0 +1,371 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
+#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/sched.h>
+#include <linux/threads.h>
+#include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */
+
+#ifdef CONFIG_44x
+extern int icache_44x_need_flush;
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#define PTE_INDEX_SIZE PTE_SHIFT
+#define PMD_INDEX_SIZE 0
+#define PUD_INDEX_SIZE 0
+#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
+
+#define PMD_CACHE_INDEX PMD_INDEX_SIZE
+#define PUD_CACHE_INDEX PUD_INDEX_SIZE
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE 0
+#define PUD_TABLE_SIZE 0
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+
+#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
+#endif /* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+
+/*
+ * The normal case is that PTEs are 32-bits and we have a 1-page
+ * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
+ *
+ * For any >32-bit physical address platform, we can use the following
+ * two level page table layout where the pgdir is 8KB and the MS 13 bits
+ * are an index to the second level table. The combined pgdir/pmd first
+ * level has 2048 entries and the second level has 512 64-bit PTE entries.
+ * -Matt
+ */
+/* PGDIR_SHIFT determines what a top-level page table entry can map */
+#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS 0
+
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
+ (unsigned long long)pte_val(e))
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+#ifndef __ASSEMBLY__
+
+int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
+void unmap_kernel_page(unsigned long va);
+
+#endif /* !__ASSEMBLY__ */
+
+
+/*
+ * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
+ * value (for now) on others, from where we can start layout kernel
+ * virtual space that goes below PKMAP and FIXMAP
+ */
+#include <asm/fixmap.h>
+
+/*
+ * ioremap_bot starts at that address. Early ioremaps move down from there,
+ * until mem_init() at which point this becomes the top of the vmalloc
+ * and ioremap space
+ */
+#ifdef CONFIG_HIGHMEM
+#define IOREMAP_TOP PKMAP_BASE
+#else
+#define IOREMAP_TOP FIXADDR_START
+#endif
+
+/* PPC32 shares vmalloc area with ioremap */
+#define IOREMAP_START VMALLOC_START
+#define IOREMAP_END VMALLOC_END
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 16MB value just means that there will be a 64MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * We no longer map larger than phys RAM with the BATs so we don't have
+ * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
+ * about clashes between our early calls to ioremap() that start growing down
+ * from IOREMAP_TOP being run into the VM area allocations (growing upwards
+ * from VMALLOC_START). For this reason we have ioremap_bot to check when
+ * we actually run into our mappings setup in the early boot with the VM
+ * system. This really does become a problem for machines with good amounts
+ * of RAM. -- Cort
+ */
+#define VMALLOC_OFFSET (0x1000000) /* 16M */
+#ifdef PPC_PIN_SIZE
+#define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#else
+#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#endif
+
+#ifdef CONFIG_KASAN_VMALLOC
+#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
+#define VMALLOC_END ioremap_bot
+#endif
+
+/*
+ * Bits in a linux-style PTE. These match the bits in the
+ * (hardware-defined) PowerPC PTE as closely as possible.
+ */
+
+#if defined(CONFIG_40x)
+#include <asm/nohash/32/pte-40x.h>
+#elif defined(CONFIG_44x)
+#include <asm/nohash/32/pte-44x.h>
+#elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT)
+#include <asm/nohash/pte-e500.h>
+#elif defined(CONFIG_PPC_85xx)
+#include <asm/nohash/32/pte-85xx.h>
+#elif defined(CONFIG_PPC_8xx)
+#include <asm/nohash/32/pte-8xx.h>
+#endif
+
+/*
+ * Location of the PFN in the PTE. Most 32-bit platforms use the same
+ * as _PAGE_SHIFT here (ie, naturally aligned).
+ * Platform who don't just pre-define the value so we don't override it here.
+ */
+#ifndef PTE_RPN_SHIFT
+#define PTE_RPN_SHIFT (PAGE_SHIFT)
+#endif
+
+/*
+ * The mask covered by the RPN must be a ULL on 32-bit platforms with
+ * 64-bit PTEs.
+ */
+#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 36
+#else
+#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#endif
+
+/*
+ * _PAGE_CHG_MASK masks of bits that are to be preserved across
+ * pgprot changes.
+ */
+#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
+
+#ifndef __ASSEMBLY__
+
+#define pte_clear(mm, addr, ptep) \
+ do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
+
+#ifndef pte_mkwrite
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_RW);
+}
+#endif
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+#ifndef pte_wrprotect
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_RW);
+}
+#endif
+
+#ifndef pte_mkexec
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_EXEC);
+}
+#endif
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
+#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ *pmdp = __pmd(0);
+}
+
+/*
+ * PTE updates. This function is called whenever an existing
+ * valid PTE is updated. This does -not- include set_pte_at()
+ * which nowadays only sets a new PTE.
+ *
+ * Depending on the type of MMU, we may need to use atomic updates
+ * and the PTE may be either 32 or 64 bit wide. In the later case,
+ * when using atomic updates, only the low part of the PTE is
+ * accessed atomically.
+ *
+ * In addition, on 44x, we also maintain a global flag indicating
+ * that an executable user mapping was modified, which is needed
+ * to properly flush the virtually tagged instruction cache of
+ * those implementations.
+ *
+ * On the 8xx, the page tables are a bit special. For 16k pages, we have
+ * 4 identical entries. For 512k pages, we have 128 entries as if it was
+ * 4k pages, but they are flagged as 512k pages for the hardware.
+ * For other page sizes, we have a single entry in the table.
+ */
+#ifdef CONFIG_PPC_8xx
+static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
+static int hugepd_ok(hugepd_t hpd);
+
+static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge)
+{
+ if (!huge)
+ return PAGE_SIZE / SZ_4K;
+ else if (hugepd_ok(*((hugepd_t *)pmd)))
+ return 1;
+ else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE))
+ return SZ_16K / SZ_4K;
+ else
+ return SZ_512K / SZ_4K;
+}
+
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
+{
+ pte_basic_t *entry = (pte_basic_t *)p;
+ pte_basic_t old = pte_val(*p);
+ pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
+ int num, i;
+ pmd_t *pmd = pmd_off(mm, addr);
+
+ num = number_of_cells_per_pte(pmd, new, huge);
+
+ for (i = 0; i < num; i++, entry++, new += SZ_4K)
+ *entry = new;
+
+ return old;
+}
+
+#ifdef CONFIG_PPC_16K_PAGES
+#define __HAVE_ARCH_PTEP_GET
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ pte_basic_t val = READ_ONCE(ptep->pte);
+ pte_t pte = {val, val, val, val};
+
+ return pte;
+}
+#endif /* CONFIG_PPC_16K_PAGES */
+
+#else
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
+{
+ pte_basic_t old = pte_val(*p);
+ pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
+
+ *p = __pte(new);
+
+#ifdef CONFIG_44x
+ if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
+ icache_44x_need_flush = 1;
+#endif
+ return old;
+}
+#endif
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old;
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+ return (old & _PAGE_ACCESSED) != 0;
+}
+#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
+ __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#ifndef ptep_set_wrprotect
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
+}
+#endif
+
+#ifndef __ptep_set_access_flags
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
+ pte_t *ptep, pte_t entry,
+ unsigned long address,
+ int psize)
+{
+ unsigned long set = pte_val(entry) &
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+ int huge = psize > mmu_virtual_psize ? 1 : 0;
+
+ pte_update(vma->vm_mm, address, ptep, 0, set, huge);
+
+ flush_tlb_page(vma, address);
+}
+#endif
+
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+/*
+ * Note that on Book E processors, the pmd contains the kernel virtual
+ * (lowmem) address of the pte page. The physical address is less useful
+ * because everything runs with translation enabled (even the TLB miss
+ * handler). On everything else the pmd contains the physical address
+ * of the pte page. -- paulus
+ */
+#ifndef CONFIG_BOOKE
+#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
+#else
+#define pmd_page_vaddr(pmd) \
+ ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
+#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
+#endif
+
+#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
+/*
+ * Encode and decode a swap entry.
+ * Note that the bits we use in a PTE for representing a swap entry
+ * must not include the _PAGE_PRESENT bit.
+ * -- paulus
+ */
+#define __swp_type(entry) ((entry).val & 0x1f)
+#define __swp_offset(entry) ((entry).val >> 5)
+#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
new file mode 100644
index 000000000..acf61242e
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_40x_H
+#define _ASM_POWERPC_NOHASH_32_PTE_40x_H
+#ifdef __KERNEL__
+
+/*
+ * At present, all PowerPC 400-class processors share a similar TLB
+ * architecture. The instruction and data sides share a unified,
+ * 64-entry, fully-associative TLB which is maintained totally under
+ * software control. In addition, the instruction side has a
+ * hardware-managed, 4-entry, fully-associative TLB which serves as a
+ * first level to the shared TLB. These two TLBs are known as the UTLB
+ * and ITLB, respectively (see "mmu.h" for definitions).
+ *
+ * There are several potential gotchas here. The 40x hardware TLBLO
+ * field looks like this:
+ *
+ * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ * RPN..................... 0 0 EX WR ZSEL....... W I M G
+ *
+ * Where possible we make the Linux PTE bits match up with this
+ *
+ * - bits 20 and 21 must be cleared, because we use 4k pages (40x can
+ * support down to 1k pages), this is done in the TLBMiss exception
+ * handler.
+ * - We use only zones 0 (for kernel pages) and 1 (for user pages)
+ * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
+ * miss handler. Bit 27 is PAGE_USER, thus selecting the correct
+ * zone.
+ * - PRESENT *must* be in the bottom two bits because swap cache
+ * entries use the top 30 bits. Because 40x doesn't support SMP
+ * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
+ * is cleared in the TLB miss handler before the TLB entry is loaded.
+ * - All other bits of the PTE are loaded into TLBLO without
+ * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
+ * software PTE bits. We actually use bits 21, 24, 25, and
+ * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
+ * PRESENT.
+ */
+
+#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
+#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
+#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
+#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
+#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
+#define _PAGE_SPECIAL 0x020 /* software: Special page */
+#define _PAGE_DIRTY 0x080 /* software: dirty page */
+#define _PAGE_RW 0x100 /* hardware: WR, anded with dirty in exception */
+#define _PAGE_EXEC 0x200 /* hardware: EX permission */
+#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
+
+/* No page size encoding in the linux PTE */
+#define _PAGE_PSIZE 0
+
+/* cache related flags non existing on 40x */
+#define _PAGE_COHERENT 0
+
+#define _PAGE_KERNEL_RO 0
+#define _PAGE_KERNEL_ROX _PAGE_EXEC
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
+#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
+
+#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
+#define _PMD_PRESENT_MASK _PMD_PRESENT
+#define _PMD_BAD 0x802
+#define _PMD_SIZE_4M 0x0c0
+#define _PMD_SIZE_16M 0x0e0
+#define _PMD_USER 0
+
+#define _PTE_NONE_MASK 0
+
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _PAGE_BASE (_PAGE_BASE_NC)
+
+/* Permission masks used to generate the __P and __S table */
+#define PAGE_NONE __pgprot(_PAGE_BASE)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h
new file mode 100644
index 000000000..78bc304f7
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_44x_H
+#define _ASM_POWERPC_NOHASH_32_PTE_44x_H
+#ifdef __KERNEL__
+
+/*
+ * Definitions for PPC440
+ *
+ * Because of the 3 word TLB entries to support 36-bit addressing,
+ * the attribute are difficult to map in such a fashion that they
+ * are easily loaded during exception processing. I decided to
+ * organize the entry so the ERPN is the only portion in the
+ * upper word of the PTE and the attribute bits below are packed
+ * in as sensibly as they can be in the area below a 4KB page size
+ * oriented RPN. This at least makes it easy to load the RPN and
+ * ERPN fields in the TLB. -Matt
+ *
+ * This isn't entirely true anymore, at least some bits are now
+ * easier to move into the TLB from the PTE. -BenH.
+ *
+ * Note that these bits preclude future use of a page size
+ * less than 4KB.
+ *
+ *
+ * PPC 440 core has following TLB attribute fields;
+ *
+ * TLB1:
+ * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ * RPN................................. - - - - - - ERPN.......
+ *
+ * TLB2:
+ * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR
+ *
+ * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional
+ * TLB2 storage attribute fields. Those are:
+ *
+ * TLB2:
+ * 0...10 11 12 13 14 15 16...31
+ * no change WL1 IL1I IL1D IL2I IL2D no change
+ *
+ * There are some constrains and options, to decide mapping software bits
+ * into TLB entry.
+ *
+ * - PRESENT *must* be in the bottom three bits because swap cache
+ * entries use the top 29 bits for TLB2.
+ *
+ * - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
+ * because it doesn't support SMP. However, some later 460 variants
+ * have -some- form of SMP support and so I keep the bit there for
+ * future use
+ *
+ * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
+ * for memory protection related functions (see PTE structure in
+ * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the
+ * above bits. Note that the bit values are CPU specific, not architecture
+ * specific.
+ *
+ * The kernel PTE entry holds an arch-dependent swp_entry structure under
+ * certain situations. In other words, in such situations some portion of
+ * the PTE bits are used as a swp_entry. In the PPC implementation, the
+ * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still
+ * hold protection values. That means the three protection bits are
+ * reserved for both PTE and SWAP entry at the most significant three
+ * LSBs.
+ *
+ * There are three protection bits available for SWAP entry:
+ * _PAGE_PRESENT
+ * _PAGE_HASHPTE (if HW has)
+ *
+ * So those three bits have to be inside of 0-2nd LSB of PTE.
+ *
+ */
+
+#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
+#define _PAGE_RW 0x00000002 /* S: Write permission */
+#define _PAGE_EXEC 0x00000004 /* H: Execute permission */
+#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
+#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
+#define _PAGE_SPECIAL 0x00000020 /* S: Special page */
+#define _PAGE_USER 0x00000040 /* S: User page */
+#define _PAGE_ENDIAN 0x00000080 /* H: E bit */
+#define _PAGE_GUARDED 0x00000100 /* H: G bit */
+#define _PAGE_COHERENT 0x00000200 /* H: M bit */
+#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */
+#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */
+
+/* No page size encoding in the linux PTE */
+#define _PAGE_PSIZE 0
+
+#define _PAGE_KERNEL_RO 0
+#define _PAGE_KERNEL_ROX _PAGE_EXEC
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
+#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
+
+/* TODO: Add large page lowmem mapping support */
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+#define _PMD_USER 0
+
+/* ERPN in a PTE never gets cleared, ignore it */
+#define _PTE_NONE_MASK 0xffffffff00000000ULL
+
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
+#if defined(CONFIG_SMP)
+#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
+#else
+#define _PAGE_BASE (_PAGE_BASE_NC)
+#endif
+
+/* Permission masks used to generate the __P and __S table */
+#define PAGE_NONE __pgprot(_PAGE_BASE)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_NOHASH_32_PTE_44x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-85xx.h b/arch/powerpc/include/asm/nohash/32/pte-85xx.h
new file mode 100644
index 000000000..93fb8e11a
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/pte-85xx.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_85xx_H
+#define _ASM_POWERPC_NOHASH_32_PTE_85xx_H
+#ifdef __KERNEL__
+
+/* PTE bit definitions for Freescale BookE SW loaded TLB MMU based
+ * processors
+ *
+ MMU Assist Register 3:
+
+ 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63
+ RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR
+
+ - PRESENT *must* be in the bottom three bits because swap cache
+ entries use the top 29 bits.
+
+*/
+
+/* Definitions for FSL Book-E Cores */
+#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */
+#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */
+#define _PAGE_RW 0x00004 /* S: Write permission (SW) */
+#define _PAGE_DIRTY 0x00008 /* S: Page dirty */
+#define _PAGE_EXEC 0x00010 /* H: SX permission */
+#define _PAGE_ACCESSED 0x00020 /* S: Page referenced */
+
+#define _PAGE_ENDIAN 0x00040 /* H: E bit */
+#define _PAGE_GUARDED 0x00080 /* H: G bit */
+#define _PAGE_COHERENT 0x00100 /* H: M bit */
+#define _PAGE_NO_CACHE 0x00200 /* H: I bit */
+#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
+#define _PAGE_SPECIAL 0x00800 /* S: Special page */
+
+#define _PAGE_KERNEL_RO 0
+#define _PAGE_KERNEL_ROX _PAGE_EXEC
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
+#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
+
+/* No page size encoding in the linux PTE */
+#define _PAGE_PSIZE 0
+
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+#define _PMD_USER 0
+
+#define _PTE_NONE_MASK 0
+
+#define PTE_WIMGE_SHIFT (6)
+
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
+#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
+#else
+#define _PAGE_BASE (_PAGE_BASE_NC)
+#endif
+
+/* Permission masks used to generate the __P and __S table */
+#define PAGE_NONE __pgprot(_PAGE_BASE)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_85xx_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
new file mode 100644
index 000000000..0238e6bd0
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_8xx_H
+#define _ASM_POWERPC_NOHASH_32_PTE_8xx_H
+#ifdef __KERNEL__
+
+/*
+ * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
+ * We also use the two level tables, but we can put the real bits in them
+ * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
+ * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
+ * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
+ * based upon user/super access. The TLB does not have accessed nor write
+ * protect. We assume that if the TLB get loaded with an entry it is
+ * accessed, and overload the changed bit for write protect. We use
+ * two bits in the software pte that are supposed to be set to zero in
+ * the TLB entry (24 and 25) for these indicators. Although the level 1
+ * descriptor contains the guarded and writethrough/copyback bits, we can
+ * set these at the page level since they get copied from the Mx_TWC
+ * register when the TLB entry is loaded. We will use bit 27 for guard, since
+ * that is where it exists in the MD_TWC, and bit 26 for writethrough.
+ * These will get masked from the level 2 descriptor at TLB load time, and
+ * copied to the MD_TWC before it gets loaded.
+ * Large page sizes added. We currently support two sizes, 4K and 8M.
+ * This also allows a TLB hander optimization because we can directly
+ * load the PMD into MD_TWC. The 8M pages are only used for kernel
+ * mapping of well known areas. The PMD (PGD) entries contain control
+ * flags in addition to the address, so care must be taken that the
+ * software no longer assumes these are only pointers.
+ */
+
+/* Definitions for 8xx embedded chips. */
+#define _PAGE_PRESENT 0x0001 /* V: Page is valid */
+#define _PAGE_NO_CACHE 0x0002 /* CI: cache inhibit */
+#define _PAGE_SH 0x0004 /* SH: No ASID (context) compare */
+#define _PAGE_SPS 0x0008 /* SPS: Small Page Size (1 if 16k, 512k or 8M)*/
+#define _PAGE_DIRTY 0x0100 /* C: page changed */
+
+/* These 4 software bits must be masked out when the L2 entry is loaded
+ * into the TLB.
+ */
+#define _PAGE_GUARDED 0x0010 /* Copied to L1 G entry in DTLB */
+#define _PAGE_ACCESSED 0x0020 /* Copied to L1 APG 1 entry in I/DTLB */
+#define _PAGE_EXEC 0x0040 /* Copied to PP (bit 21) in ITLB */
+#define _PAGE_SPECIAL 0x0080 /* SW entry */
+
+#define _PAGE_NA 0x0200 /* Supervisor NA, User no access */
+#define _PAGE_RO 0x0600 /* Supervisor RO, User no access */
+
+#define _PAGE_HUGE 0x0800 /* Copied to L1 PS bit 29 */
+
+/* cache related flags non existing on 8xx */
+#define _PAGE_COHERENT 0
+#define _PAGE_WRITETHRU 0
+
+#define _PAGE_KERNEL_RO (_PAGE_SH | _PAGE_RO)
+#define _PAGE_KERNEL_ROX (_PAGE_SH | _PAGE_RO | _PAGE_EXEC)
+#define _PAGE_KERNEL_RW (_PAGE_SH | _PAGE_DIRTY)
+#define _PAGE_KERNEL_RWX (_PAGE_SH | _PAGE_DIRTY | _PAGE_EXEC)
+
+#define _PMD_PRESENT 0x0001
+#define _PMD_PRESENT_MASK _PMD_PRESENT
+#define _PMD_BAD 0x0f90
+#define _PMD_PAGE_MASK 0x000c
+#define _PMD_PAGE_8M 0x000c
+#define _PMD_PAGE_512K 0x0004
+#define _PMD_ACCESSED 0x0020 /* APG 1 */
+#define _PMD_USER 0x0040 /* APG 2 */
+
+#define _PTE_NONE_MASK 0
+
+#ifdef CONFIG_PPC_16K_PAGES
+#define _PAGE_PSIZE _PAGE_SPS
+#else
+#define _PAGE_PSIZE 0
+#endif
+
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
+#define _PAGE_BASE (_PAGE_BASE_NC)
+
+/* Permission masks used to generate the __P and __S table */
+#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_NA)
+#define PAGE_SHARED __pgprot(_PAGE_BASE)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_RO)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_RO | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_RO)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_RO | _PAGE_EXEC)
+
+#ifndef __ASSEMBLY__
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_RO);
+}
+
+#define pte_wrprotect pte_wrprotect
+
+static inline int pte_read(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_RO) != _PAGE_NA;
+}
+
+#define pte_read pte_read
+
+static inline int pte_write(pte_t pte)
+{
+ return !(pte_val(pte) & _PAGE_RO);
+}
+
+#define pte_write pte_write
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_RO);
+}
+
+#define pte_mkwrite pte_mkwrite
+
+static inline bool pte_user(pte_t pte)
+{
+ return !(pte_val(pte) & _PAGE_SH);
+}
+
+#define pte_user pte_user
+
+static inline pte_t pte_mkprivileged(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SH);
+}
+
+#define pte_mkprivileged pte_mkprivileged
+
+static inline pte_t pte_mkuser(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_SH);
+}
+
+#define pte_mkuser pte_mkuser
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE);
+}
+
+#define pte_mkhuge pte_mkhuge
+
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge);
+
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_update(mm, addr, ptep, 0, _PAGE_RO, 0);
+}
+#define ptep_set_wrprotect ptep_set_wrprotect
+
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
+ pte_t entry, unsigned long address, int psize)
+{
+ unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_EXEC);
+ unsigned long clr = ~pte_val(entry) & _PAGE_RO;
+ int huge = psize > mmu_virtual_psize ? 1 : 0;
+
+ pte_update(vma->vm_mm, address, ptep, clr, set, huge);
+
+ flush_tlb_page(vma, address);
+}
+#define __ptep_set_access_flags __ptep_set_access_flags
+
+static inline unsigned long pgd_leaf_size(pgd_t pgd)
+{
+ if (pgd_val(pgd) & _PMD_PAGE_8M)
+ return SZ_8M;
+ return SZ_4M;
+}
+
+#define pgd_leaf_size pgd_leaf_size
+
+static inline unsigned long pte_leaf_size(pte_t pte)
+{
+ pte_basic_t val = pte_val(pte);
+
+ if (val & _PAGE_HUGE)
+ return SZ_512K;
+ if (val & _PAGE_SPS)
+ return SZ_16K;
+ return SZ_4K;
+}
+
+#define pte_leaf_size pte_leaf_size
+
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_NOHASH_32_PTE_8xx_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
new file mode 100644
index 000000000..e50b211be
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_PGALLOC_64_H
+#define _ASM_POWERPC_PGALLOC_64_H
+/*
+ */
+
+#include <linux/slab.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
+
+struct vmemmap_backing {
+ struct vmemmap_backing *list;
+ unsigned long phys;
+ unsigned long virt_addr;
+};
+extern struct vmemmap_backing *vmemmap_list;
+
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
+{
+ p4d_set(p4d, (unsigned long)pud);
+}
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
+}
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ pud_set(pud, (unsigned long)pmd);
+}
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+{
+ pmd_set(pmd, (unsigned long)pte);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ pgtable_t pte_page)
+{
+ pmd_set(pmd, (unsigned long)pte_page);
+}
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
+}
+
+#define __pmd_free_tlb(tlb, pmd, addr) \
+ pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
+#define __pud_free_tlb(tlb, pud, addr) \
+ pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
+
+#endif /* _ASM_POWERPC_PGALLOC_64_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
new file mode 100644
index 000000000..10f5cf444
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
+#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
+
+#include <asm-generic/pgtable-nop4d.h>
+
+/*
+ * Entries per page directory level. The PTE level must use a 64b record
+ * for each page table entry. The PMD and PGD level use a 32b record for
+ * each entry by assuming that each entry is page aligned.
+ */
+#define PTE_INDEX_SIZE 9
+#define PMD_INDEX_SIZE 7
+#define PUD_INDEX_SIZE 9
+#define PGD_INDEX_SIZE 9
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
+#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif /* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
+#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+
+/* PMD_SHIFT determines what a second-level page table entry can map */
+#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+/* PUD_SHIFT determines what a third-level page table entry can map */
+#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
+#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS 0
+/* Bits to mask out from a PUD to get to the PMD page */
+#define PUD_MASKED_BITS 0
+/* Bits to mask out from a P4D to get to the PUD page */
+#define P4D_MASKED_BITS 0
+
+
+/*
+ * 4-level page tables related bits
+ */
+
+#define p4d_none(p4d) (!p4d_val(p4d))
+#define p4d_bad(p4d) (p4d_val(p4d) == 0)
+#define p4d_present(p4d) (p4d_val(p4d) != 0)
+
+#ifndef __ASSEMBLY__
+
+static inline pud_t *p4d_pgtable(p4d_t p4d)
+{
+ return (pud_t *) (p4d_val(p4d) & ~P4D_MASKED_BITS);
+}
+
+static inline void p4d_clear(p4d_t *p4dp)
+{
+ *p4dp = __p4d(0);
+}
+
+static inline pte_t p4d_pte(p4d_t p4d)
+{
+ return __pte(p4d_val(p4d));
+}
+
+static inline p4d_t pte_p4d(pte_t pte)
+{
+ return __p4d(pte_val(pte));
+}
+extern struct page *p4d_page(p4d_t p4d);
+
+#endif /* !__ASSEMBLY__ */
+
+#define pud_ERROR(e) \
+ pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
+
+/*
+ * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */
+#define remap_4k_pfn(vma, addr, pfn, prot) \
+ remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
+
+#endif /* _ _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
new file mode 100644
index 000000000..00a003d36
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_H
+#define _ASM_POWERPC_NOHASH_64_PGTABLE_H
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the ppc64 non-hashed page table.
+ */
+
+#include <linux/sizes.h>
+
+#include <asm/nohash/64/pgtable-4k.h>
+#include <asm/barrier.h>
+#include <asm/asm-const.h>
+
+/*
+ * Size of EA range mapped by our pagetables.
+ */
+#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
+ PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
+#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
+
+#define PMD_CACHE_INDEX PMD_INDEX_SIZE
+#define PUD_CACHE_INDEX PUD_INDEX_SIZE
+
+/*
+ * Define the address range of the kernel non-linear virtual area
+ */
+#define KERN_VIRT_START ASM_CONST(0xc000100000000000)
+#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
+
+/*
+ * The vmalloc space starts at the beginning of that region, and
+ * occupies a quarter of it on Book3E
+ * (we keep a quarter for the virtual memmap)
+ */
+#define VMALLOC_START KERN_VIRT_START
+#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2)
+#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
+
+/*
+ * The third quarter of the kernel virtual space is used for IO mappings,
+ * it's itself carved into the PIO region (ISA and PHB IO space) and
+ * the ioremap space
+ *
+ * ISA_IO_BASE = KERN_IO_START, 64K reserved area
+ * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
+ * IOREMAP_BASE = ISA_IO_BASE + 2G to KERN_IO_START + KERN_IO_SIZE
+ */
+#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
+#define KERN_IO_SIZE (KERN_VIRT_SIZE >> 2)
+#define FULL_IO_SIZE 0x80000000ul
+#define ISA_IO_BASE (KERN_IO_START)
+#define ISA_IO_END (KERN_IO_START + 0x10000ul)
+#define PHB_IO_BASE (ISA_IO_END)
+#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
+#define IOREMAP_BASE (PHB_IO_END)
+#define IOREMAP_START (ioremap_bot)
+#define IOREMAP_END (KERN_IO_START + KERN_IO_SIZE - FIXADDR_SIZE)
+#define FIXADDR_SIZE SZ_32M
+
+/*
+ * Defines the address of the vmemap area, in its own region on
+ * after the vmalloc space on Book3E
+ */
+#define VMEMMAP_BASE VMALLOC_END
+#define VMEMMAP_END KERN_IO_START
+#define vmemmap ((struct page *)VMEMMAP_BASE)
+
+
+/*
+ * Include the PTE bits definitions
+ */
+#include <asm/nohash/pte-e500.h>
+
+#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
+
+/*
+ * _PAGE_CHG_MASK masks of bits that are to be preserved across
+ * pgprot changes.
+ */
+#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
+
+#define H_PAGE_4K_PFN 0
+
+#ifndef __ASSEMBLY__
+/* pte_clear moved to later in this file */
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_RW);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_RW);
+}
+
+#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
+#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
+
+static inline void pmd_set(pmd_t *pmdp, unsigned long val)
+{
+ *pmdp = __pmd(val);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ *pmdp = __pmd(0);
+}
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+ return __pte(pmd_val(pmd));
+}
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
+ || (pmd_val(pmd) & PMD_BAD_BITS))
+#define pmd_present(pmd) (!pmd_none(pmd))
+#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
+extern struct page *pmd_page(pmd_t pmd);
+#define pmd_pfn(pmd) (page_to_pfn(pmd_page(pmd)))
+
+static inline void pud_set(pud_t *pudp, unsigned long val)
+{
+ *pudp = __pud(val);
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+ *pudp = __pud(0);
+}
+
+#define pud_none(pud) (!pud_val(pud))
+#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
+ || (pud_val(pud) & PUD_BAD_BITS))
+#define pud_present(pud) (pud_val(pud) != 0)
+
+static inline pmd_t *pud_pgtable(pud_t pud)
+{
+ return (pmd_t *)(pud_val(pud) & ~PUD_MASKED_BITS);
+}
+
+extern struct page *pud_page(pud_t pud);
+
+static inline pte_t pud_pte(pud_t pud)
+{
+ return __pte(pud_val(pud));
+}
+
+static inline pud_t pte_pud(pte_t pte)
+{
+ return __pud(pte_val(pte));
+}
+#define pud_write(pud) pte_write(pud_pte(pud))
+#define p4d_write(pgd) pte_write(p4d_pte(p4d))
+
+static inline void p4d_set(p4d_t *p4dp, unsigned long val)
+{
+ *p4dp = __p4d(val);
+}
+
+/* Atomic PTE updates */
+static inline unsigned long pte_update(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, unsigned long clr,
+ unsigned long set,
+ int huge)
+{
+ unsigned long old = pte_val(*ptep);
+ *ptep = __pte((old & ~clr) | set);
+
+ /* huge pages use the old page table lock */
+ if (!huge)
+ assert_pte_locked(mm, addr);
+
+ return old;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old;
+
+ if (!pte_young(*ptep))
+ return 0;
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+ return (old & _PAGE_ACCESSED) != 0;
+}
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
+({ \
+ int __r; \
+ __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
+ __r; \
+})
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+
+ if ((pte_val(*ptep) & _PAGE_RW) == 0)
+ return;
+
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ if ((pte_val(*ptep) & _PAGE_RW) == 0)
+ return;
+
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
+}
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(__vma, __address, __ptep) \
+({ \
+ int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
+ __ptep); \
+ __young; \
+})
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
+ return __pte(old);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t * ptep)
+{
+ pte_update(mm, addr, ptep, ~0UL, 0, 0);
+}
+
+
+/* Set the dirty and/or accessed bits atomically in a linux PTE */
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
+ pte_t *ptep, pte_t entry,
+ unsigned long address,
+ int psize)
+{
+ unsigned long bits = pte_val(entry) &
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+
+ unsigned long old = pte_val(*ptep);
+ *ptep = __pte(old | bits);
+
+ flush_tlb_page(vma, address);
+}
+
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* Encode and de-code a swap entry */
+#define MAX_SWAPFILES_CHECK() do { \
+ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
+ } while (0)
+
+#define SWP_TYPE_BITS 5
+#define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
+ & ((1UL << SWP_TYPE_BITS) - 1))
+#define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT)
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ ((type) << _PAGE_BIT_SWAP_TYPE) \
+ | ((offset) << PTE_RPN_SHIFT) })
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
+#define __swp_entry_to_pte(x) __pte((x).val)
+
+int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
+void unmap_kernel_page(unsigned long va);
+extern int __meminit vmemmap_create_mapping(unsigned long start,
+ unsigned long page_size,
+ unsigned long phys);
+extern void vmemmap_remove_mapping(unsigned long start,
+ unsigned long page_size);
+void __patch_exception(int exc, unsigned long addr);
+#define patch_exception(exc, name) do { \
+ extern unsigned int name; \
+ __patch_exception((exc), (unsigned long)&name); \
+} while (0)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/nohash/hugetlb-e500.h b/arch/powerpc/include/asm/nohash/hugetlb-e500.h
new file mode 100644
index 000000000..8f04ad20e
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/hugetlb-e500.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_HUGETLB_E500_H
+#define _ASM_POWERPC_NOHASH_HUGETLB_E500_H
+
+static inline pte_t *hugepd_page(hugepd_t hpd)
+{
+ if (WARN_ON(!hugepd_ok(hpd)))
+ return NULL;
+
+ return (pte_t *)((hpd_val(hpd) & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+}
+
+static inline unsigned int hugepd_shift(hugepd_t hpd)
+{
+ return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
+}
+
+static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
+ unsigned int pdshift)
+{
+ /*
+ * On FSL BookE, we have multiple higher-level table entries that
+ * point to the same hugepte. Just use the first one since they're all
+ * identical. So for that case, idx=0.
+ */
+ return hugepd_page(hpd);
+}
+
+void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+
+static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
+{
+ /* We use the old format for PPC_E500 */
+ *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
+}
+
+static inline int check_and_get_huge_psize(int shift)
+{
+ if (shift & 1) /* Not a power of 4 */
+ return -EINVAL;
+
+ return shift_to_mmu_psize(shift);
+}
+
+#endif /* _ASM_POWERPC_NOHASH_HUGETLB_E500_H */
diff --git a/arch/powerpc/include/asm/nohash/kup-booke.h b/arch/powerpc/include/asm/nohash/kup-booke.h
new file mode 100644
index 000000000..49bb41ed0
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/kup-booke.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_KUP_BOOKE_H_
+#define _ASM_POWERPC_KUP_BOOKE_H_
+
+#include <asm/bug.h>
+
+#ifdef CONFIG_PPC_KUAP
+
+#ifdef __ASSEMBLY__
+
+.macro kuap_check_amr gpr1, gpr2
+.endm
+
+#else
+
+#include <linux/jump_label.h>
+#include <linux/sched.h>
+
+#include <asm/reg.h>
+
+extern struct static_key_false disable_kuap_key;
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return static_branch_unlikely(&disable_kuap_key);
+}
+
+static inline void __kuap_lock(void)
+{
+ mtspr(SPRN_PID, 0);
+ isync();
+}
+
+static inline void __kuap_save_and_lock(struct pt_regs *regs)
+{
+ regs->kuap = mfspr(SPRN_PID);
+ mtspr(SPRN_PID, 0);
+ isync();
+}
+
+static inline void kuap_user_restore(struct pt_regs *regs)
+{
+ if (kuap_is_disabled())
+ return;
+
+ mtspr(SPRN_PID, current->thread.pid);
+
+ /* Context synchronisation is performed by rfi */
+}
+
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+{
+ if (regs->kuap)
+ mtspr(SPRN_PID, current->thread.pid);
+
+ /* Context synchronisation is performed by rfi */
+}
+
+static inline unsigned long __kuap_get_and_assert_locked(void)
+{
+ unsigned long kuap = mfspr(SPRN_PID);
+
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ WARN_ON_ONCE(kuap);
+
+ return kuap;
+}
+
+static inline void __allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
+{
+ mtspr(SPRN_PID, current->thread.pid);
+ isync();
+}
+
+static inline void __prevent_user_access(unsigned long dir)
+{
+ mtspr(SPRN_PID, 0);
+ isync();
+}
+
+static inline unsigned long __prevent_user_access_return(void)
+{
+ unsigned long flags = mfspr(SPRN_PID);
+
+ mtspr(SPRN_PID, 0);
+ isync();
+
+ return flags;
+}
+
+static inline void __restore_user_access(unsigned long flags)
+{
+ if (flags) {
+ mtspr(SPRN_PID, current->thread.pid);
+ isync();
+ }
+}
+
+static inline bool
+__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ return !regs->kuap;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_PPC_KUAP */
+
+#endif /* _ASM_POWERPC_KUP_BOOKE_H_ */
diff --git a/arch/powerpc/include/asm/nohash/mmu-e500.h b/arch/powerpc/include/asm/nohash/mmu-e500.h
new file mode 100644
index 000000000..e43a418d3
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/mmu-e500.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MMU_BOOK3E_H_
+#define _ASM_POWERPC_MMU_BOOK3E_H_
+/*
+ * Freescale Book-E/Book-3e (ISA 2.06+) MMU support
+ */
+
+/* Book-3e defined page sizes */
+#define BOOK3E_PAGESZ_1K 0
+#define BOOK3E_PAGESZ_2K 1
+#define BOOK3E_PAGESZ_4K 2
+#define BOOK3E_PAGESZ_8K 3
+#define BOOK3E_PAGESZ_16K 4
+#define BOOK3E_PAGESZ_32K 5
+#define BOOK3E_PAGESZ_64K 6
+#define BOOK3E_PAGESZ_128K 7
+#define BOOK3E_PAGESZ_256K 8
+#define BOOK3E_PAGESZ_512K 9
+#define BOOK3E_PAGESZ_1M 10
+#define BOOK3E_PAGESZ_2M 11
+#define BOOK3E_PAGESZ_4M 12
+#define BOOK3E_PAGESZ_8M 13
+#define BOOK3E_PAGESZ_16M 14
+#define BOOK3E_PAGESZ_32M 15
+#define BOOK3E_PAGESZ_64M 16
+#define BOOK3E_PAGESZ_128M 17
+#define BOOK3E_PAGESZ_256M 18
+#define BOOK3E_PAGESZ_512M 19
+#define BOOK3E_PAGESZ_1GB 20
+#define BOOK3E_PAGESZ_2GB 21
+#define BOOK3E_PAGESZ_4GB 22
+#define BOOK3E_PAGESZ_8GB 23
+#define BOOK3E_PAGESZ_16GB 24
+#define BOOK3E_PAGESZ_32GB 25
+#define BOOK3E_PAGESZ_64GB 26
+#define BOOK3E_PAGESZ_128GB 27
+#define BOOK3E_PAGESZ_256GB 28
+#define BOOK3E_PAGESZ_512GB 29
+#define BOOK3E_PAGESZ_1TB 30
+#define BOOK3E_PAGESZ_2TB 31
+
+/* MAS registers bit definitions */
+
+#define MAS0_TLBSEL_MASK 0x30000000
+#define MAS0_TLBSEL_SHIFT 28
+#define MAS0_TLBSEL(x) (((x) << MAS0_TLBSEL_SHIFT) & MAS0_TLBSEL_MASK)
+#define MAS0_GET_TLBSEL(mas0) (((mas0) & MAS0_TLBSEL_MASK) >> \
+ MAS0_TLBSEL_SHIFT)
+#define MAS0_ESEL_MASK 0x0FFF0000
+#define MAS0_ESEL_SHIFT 16
+#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK)
+#define MAS0_NV(x) ((x) & 0x00000FFF)
+#define MAS0_HES 0x00004000
+#define MAS0_WQ_ALLWAYS 0x00000000
+#define MAS0_WQ_COND 0x00001000
+#define MAS0_WQ_CLR_RSRV 0x00002000
+
+#define MAS1_VALID 0x80000000
+#define MAS1_IPROT 0x40000000
+#define MAS1_TID(x) (((x) << 16) & 0x3FFF0000)
+#define MAS1_IND 0x00002000
+#define MAS1_TS 0x00001000
+#define MAS1_TSIZE_MASK 0x00000f80
+#define MAS1_TSIZE_SHIFT 7
+#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
+#define MAS1_GET_TSIZE(mas1) (((mas1) & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT)
+
+#define MAS2_EPN (~0xFFFUL)
+#define MAS2_X0 0x00000040
+#define MAS2_X1 0x00000020
+#define MAS2_W 0x00000010
+#define MAS2_I 0x00000008
+#define MAS2_M 0x00000004
+#define MAS2_G 0x00000002
+#define MAS2_E 0x00000001
+#define MAS2_WIMGE_MASK 0x0000001f
+#define MAS2_EPN_MASK(size) (~0 << (size + 10))
+
+#define MAS3_RPN 0xFFFFF000
+#define MAS3_U0 0x00000200
+#define MAS3_U1 0x00000100
+#define MAS3_U2 0x00000080
+#define MAS3_U3 0x00000040
+#define MAS3_UX 0x00000020
+#define MAS3_SX 0x00000010
+#define MAS3_UW 0x00000008
+#define MAS3_SW 0x00000004
+#define MAS3_UR 0x00000002
+#define MAS3_SR 0x00000001
+#define MAS3_BAP_MASK 0x0000003f
+#define MAS3_SPSIZE 0x0000003e
+#define MAS3_SPSIZE_SHIFT 1
+
+#define MAS4_TLBSEL_MASK MAS0_TLBSEL_MASK
+#define MAS4_TLBSELD(x) MAS0_TLBSEL(x)
+#define MAS4_INDD 0x00008000 /* Default IND */
+#define MAS4_TSIZED(x) MAS1_TSIZE(x)
+#define MAS4_X0D 0x00000040
+#define MAS4_X1D 0x00000020
+#define MAS4_WD 0x00000010
+#define MAS4_ID 0x00000008
+#define MAS4_MD 0x00000004
+#define MAS4_GD 0x00000002
+#define MAS4_ED 0x00000001
+#define MAS4_WIMGED_MASK 0x0000001f /* Default WIMGE */
+#define MAS4_WIMGED_SHIFT 0
+#define MAS4_VLED MAS4_X1D /* Default VLE */
+#define MAS4_ACMD 0x000000c0 /* Default ACM */
+#define MAS4_ACMD_SHIFT 6
+#define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */
+#define MAS4_TSIZED_SHIFT 7
+
+#define MAS5_SGS 0x80000000
+
+#define MAS6_SPID0 0x3FFF0000
+#define MAS6_SPID1 0x00007FFE
+#define MAS6_ISIZE(x) MAS1_TSIZE(x)
+#define MAS6_SAS 0x00000001
+#define MAS6_SPID MAS6_SPID0
+#define MAS6_SIND 0x00000002 /* Indirect page */
+#define MAS6_SIND_SHIFT 1
+#define MAS6_SPID_MASK 0x3fff0000
+#define MAS6_SPID_SHIFT 16
+#define MAS6_ISIZE_MASK 0x00000f80
+#define MAS6_ISIZE_SHIFT 7
+
+#define MAS7_RPN 0xFFFFFFFF
+
+#define MAS8_TGS 0x80000000 /* Guest space */
+#define MAS8_VF 0x40000000 /* Virtualization Fault */
+#define MAS8_TLPID 0x000000ff
+
+/* Bit definitions for MMUCFG */
+#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */
+#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */
+#define MMUCFG_MAVN_V2 0x00000001 /* v2.0 */
+#define MMUCFG_NTLBS 0x0000000c /* Number of TLBs */
+#define MMUCFG_PIDSIZE 0x000007c0 /* PID Reg Size */
+#define MMUCFG_TWC 0x00008000 /* TLB Write Conditional (v2.0) */
+#define MMUCFG_LRAT 0x00010000 /* LRAT Supported (v2.0) */
+#define MMUCFG_RASIZE 0x00fe0000 /* Real Addr Size */
+#define MMUCFG_LPIDSIZE 0x0f000000 /* LPID Reg Size */
+
+/* Bit definitions for MMUCSR0 */
+#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */
+#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */
+#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */
+#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */
+#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
+ MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
+#define MMUCSR0_TLB0PS 0x00000780 /* TLB0 Page Size */
+#define MMUCSR0_TLB1PS 0x00007800 /* TLB1 Page Size */
+#define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */
+#define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */
+
+/* MMUCFG bits */
+#define MMUCFG_MAVN_NASK 0x00000003
+#define MMUCFG_MAVN_V1_0 0x00000000
+#define MMUCFG_MAVN_V2_0 0x00000001
+#define MMUCFG_NTLB_MASK 0x0000000c
+#define MMUCFG_NTLB_SHIFT 2
+#define MMUCFG_PIDSIZE_MASK 0x000007c0
+#define MMUCFG_PIDSIZE_SHIFT 6
+#define MMUCFG_TWC 0x00008000
+#define MMUCFG_LRAT 0x00010000
+#define MMUCFG_RASIZE_MASK 0x00fe0000
+#define MMUCFG_RASIZE_SHIFT 17
+#define MMUCFG_LPIDSIZE_MASK 0x0f000000
+#define MMUCFG_LPIDSIZE_SHIFT 24
+
+/* TLBnCFG encoding */
+#define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */
+#define TLBnCFG_HES 0x00002000 /* HW select supported */
+#define TLBnCFG_IPROT 0x00008000 /* IPROT supported */
+#define TLBnCFG_GTWE 0x00010000 /* Guest can write */
+#define TLBnCFG_IND 0x00020000 /* IND entries supported */
+#define TLBnCFG_PT 0x00040000 /* Can load from page table */
+#define TLBnCFG_MINSIZE 0x00f00000 /* Minimum Page Size (v1.0) */
+#define TLBnCFG_MINSIZE_SHIFT 20
+#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */
+#define TLBnCFG_MAXSIZE_SHIFT 16
+#define TLBnCFG_ASSOC 0xff000000 /* Associativity */
+#define TLBnCFG_ASSOC_SHIFT 24
+
+/* TLBnPS encoding */
+#define TLBnPS_4K 0x00000004
+#define TLBnPS_8K 0x00000008
+#define TLBnPS_16K 0x00000010
+#define TLBnPS_32K 0x00000020
+#define TLBnPS_64K 0x00000040
+#define TLBnPS_128K 0x00000080
+#define TLBnPS_256K 0x00000100
+#define TLBnPS_512K 0x00000200
+#define TLBnPS_1M 0x00000400
+#define TLBnPS_2M 0x00000800
+#define TLBnPS_4M 0x00001000
+#define TLBnPS_8M 0x00002000
+#define TLBnPS_16M 0x00004000
+#define TLBnPS_32M 0x00008000
+#define TLBnPS_64M 0x00010000
+#define TLBnPS_128M 0x00020000
+#define TLBnPS_256M 0x00040000
+#define TLBnPS_512M 0x00080000
+#define TLBnPS_1G 0x00100000
+#define TLBnPS_2G 0x00200000
+#define TLBnPS_4G 0x00400000
+#define TLBnPS_8G 0x00800000
+#define TLBnPS_16G 0x01000000
+#define TLBnPS_32G 0x02000000
+#define TLBnPS_64G 0x04000000
+#define TLBnPS_128G 0x08000000
+#define TLBnPS_256G 0x10000000
+
+/* tlbilx action encoding */
+#define TLBILX_T_ALL 0
+#define TLBILX_T_TID 1
+#define TLBILX_T_FULLMATCH 3
+#define TLBILX_T_CLASS0 4
+#define TLBILX_T_CLASS1 5
+#define TLBILX_T_CLASS2 6
+#define TLBILX_T_CLASS3 7
+
+/*
+ * The mapping only needs to be cache-coherent on SMP, except on
+ * Freescale e500mc derivatives where it's also needed for coherent DMA.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
+#define MAS2_M_IF_NEEDED MAS2_M
+#else
+#define MAS2_M_IF_NEEDED 0
+#endif
+
+#ifndef __ASSEMBLY__
+#include <asm/bug.h>
+
+extern unsigned int tlbcam_index;
+
+typedef struct {
+ unsigned int id;
+ unsigned int active;
+ void __user *vdso;
+} mm_context_t;
+
+/* Page size definitions, common between 32 and 64-bit
+ *
+ * shift : is the "PAGE_SHIFT" value for that page size
+ * penc : is the pte encoding mask
+ *
+ */
+struct mmu_psize_def
+{
+ unsigned int shift; /* number of bits */
+ unsigned int enc; /* PTE encoding */
+ unsigned int ind; /* Corresponding indirect page size shift */
+ unsigned int flags;
+#define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */
+#define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */
+};
+extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+ if (mmu_psize_defs[psize].shift == shift)
+ return psize;
+ return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+ if (mmu_psize_defs[mmu_psize].shift)
+ return mmu_psize_defs[mmu_psize].shift;
+ BUG();
+}
+
+/* The page sizes use the same names as 64-bit hash but are
+ * constants
+ */
+#if defined(CONFIG_PPC_4K_PAGES)
+#define mmu_virtual_psize MMU_PAGE_4K
+#else
+#error Unsupported page size
+#endif
+
+extern int mmu_linear_psize;
+extern int mmu_vmemmap_psize;
+
+struct tlb_core_data {
+ /*
+ * Per-core spinlock for e6500 TLB handlers (no tlbsrx.)
+ * Must be the first struct element.
+ */
+ u8 lock;
+
+ /* For software way selection, as on Freescale TLB1 */
+ u8 esel_next, esel_max, esel_first;
+};
+
+#ifdef CONFIG_PPC64
+extern unsigned long linear_map_top;
+extern int book3e_htw_mode;
+
+#define PPC_HTW_NONE 0
+#define PPC_HTW_IBM 1
+#define PPC_HTW_E6500 2
+
+/*
+ * 64-bit booke platforms don't load the tlb in the tlb miss handler code.
+ * HUGETLB_NEED_PRELOAD handles this - it causes huge_ptep_set_access_flags to
+ * return 1, indicating that the tlb requires preloading.
+ */
+#define HUGETLB_NEED_PRELOAD
+
+#define mmu_cleanup_all NULL
+
+#define MAX_PHYSMEM_BITS 44
+
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */
diff --git a/arch/powerpc/include/asm/nohash/mmu.h b/arch/powerpc/include/asm/nohash/mmu.h
new file mode 100644
index 000000000..e264be219
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/mmu.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_MMU_H_
+#define _ASM_POWERPC_NOHASH_MMU_H_
+
+#if defined(CONFIG_40x)
+/* 40x-style software loaded TLB */
+#include <asm/nohash/32/mmu-40x.h>
+#elif defined(CONFIG_44x)
+/* 44x-style software loaded TLB */
+#include <asm/nohash/32/mmu-44x.h>
+#elif defined(CONFIG_PPC_E500)
+/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */
+#include <asm/nohash/mmu-e500.h>
+#elif defined (CONFIG_PPC_8xx)
+/* Motorola/Freescale 8xx software loaded TLB */
+#include <asm/nohash/32/mmu-8xx.h>
+#endif
+
+#endif /* _ASM_POWERPC_NOHASH_MMU_H_ */
diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h
new file mode 100644
index 000000000..4b6237631
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/pgalloc.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_PGALLOC_H
+#define _ASM_POWERPC_NOHASH_PGALLOC_H
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+#ifdef CONFIG_PPC64
+extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
+#else
+/* 44x etc which is BOOKE not BOOK3E */
+static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
+ unsigned long address)
+{
+
+}
+#endif /* !CONFIG_PPC_BOOK3E_64 */
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
+
+#ifdef CONFIG_PPC64
+#include <asm/nohash/64/pgalloc.h>
+#else
+#include <asm/nohash/32/pgalloc.h>
+#endif
+
+static inline void pgtable_free(void *table, int shift)
+{
+ if (!shift) {
+ pte_fragment_free((unsigned long *)table, 0);
+ } else {
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(shift), table);
+ }
+}
+
+#define get_hugepd_cache_index(x) (x)
+
+static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+{
+ unsigned long pgf = (unsigned long)table;
+
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf |= shift;
+ tlb_remove_table(tlb, (void *)pgf);
+}
+
+static inline void __tlb_remove_table(void *_table)
+{
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+ pgtable_free(table, shift);
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ tlb_flush_pgtable(tlb, address);
+ pgtable_free_tlb(tlb, table, 0);
+}
+#endif /* _ASM_POWERPC_NOHASH_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
new file mode 100644
index 000000000..3d7dce908
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
+#define _ASM_POWERPC_NOHASH_PGTABLE_H
+
+#if defined(CONFIG_PPC64)
+#include <asm/nohash/64/pgtable.h>
+#else
+#include <asm/nohash/32/pgtable.h>
+#endif
+
+/* Permission masks used for kernel mappings */
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
+
+#ifndef __ASSEMBLY__
+
+/* Generic accessors to PTE bits */
+#ifndef pte_write
+static inline int pte_write(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_RW;
+}
+#endif
+#ifndef pte_read
+static inline int pte_read(pte_t pte) { return 1; }
+#endif
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
+static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+static inline bool pte_hashpte(pte_t pte) { return false; }
+static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; }
+static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
+
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * These work without NUMA balancing but the kernel does not care. See the
+ * comment in include/linux/pgtable.h . On powerpc, this will only
+ * work for user pages and always return true for kernel pages.
+ */
+static inline int pte_protnone(pte_t pte)
+{
+ return pte_present(pte) && !pte_user(pte);
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+ return pte_protnone(pmd_pte(pmd));
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+static inline int pte_present(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_PRESENT;
+}
+
+static inline bool pte_hw_valid(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_PRESENT;
+}
+
+/*
+ * Don't just check for any non zero bits in __PAGE_USER, since for book3e
+ * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
+ * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too.
+ */
+#ifndef pte_user
+static inline bool pte_user(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
+}
+#endif
+
+/*
+ * We only find page table entry in the last level
+ * Hence no need for other accessors
+ */
+#define pte_access_permitted pte_access_permitted
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ /*
+ * A read-only access is controlled by _PAGE_USER bit.
+ * We have _PAGE_READ set for WRITE and EXECUTE
+ */
+ if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
+ return false;
+
+ if (write && !pte_write(pte))
+ return false;
+
+ return true;
+}
+
+/* Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * Even if PTEs can be unsigned long long, a PFN is always an unsigned
+ * long for now.
+ */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
+ return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
+ pgprot_val(pgprot)); }
+static inline unsigned long pte_pfn(pte_t pte) {
+ return pte_val(pte) >> PTE_RPN_SHIFT; }
+
+/* Generic modifiers for PTE bits */
+static inline pte_t pte_exprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_EXEC);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+#ifndef pte_mkhuge
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ return __pte(pte_val(pte));
+}
+#endif
+
+#ifndef pte_mkprivileged
+static inline pte_t pte_mkprivileged(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_USER);
+}
+#endif
+
+#ifndef pte_mkuser
+static inline pte_t pte_mkuser(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_USER);
+}
+#endif
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/* Insert a PTE, top-level function is out of line. It uses an inline
+ * low level function in the respective pgtable-* files
+ */
+extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+ pte_t pte);
+
+/* This low level function performs the actual PTE insertion
+ * Setting the PTE depends on the MMU type and other factors. It's
+ * an horrible mess that I'm not going to try to clean up now but
+ * I'm keeping it in one place rather than spread around
+ */
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, int percpu)
+{
+ /* Second case is 32-bit with 64-bit PTE. In this case, we
+ * can just store as long as we do the two halves in the right order
+ * with a barrier in between.
+ * In the percpu case, we also fallback to the simple update
+ */
+ if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
+ __asm__ __volatile__("\
+ stw%X0 %2,%0\n\
+ mbar\n\
+ stw%X1 %L2,%1"
+ : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+ : "r" (pte) : "memory");
+ return;
+ }
+ /* Anything else just stores the PTE normally. That covers all 64-bit
+ * cases, and 32-bit non-hash with 32-bit PTEs.
+ */
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
+ ptep->pte = ptep->pte1 = ptep->pte2 = ptep->pte3 = pte_val(pte);
+#else
+ *ptep = pte;
+#endif
+
+ /*
+ * With hardware tablewalk, a sync is needed to ensure that
+ * subsequent accesses see the PTE we just wrote. Unlike userspace
+ * mappings, we can't tolerate spurious faults, so make sure
+ * the new PTE will be seen the first time.
+ */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
+ mb();
+}
+
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, pte_t entry, int dirty);
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+
+#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
+ _PAGE_WRITETHRU)
+
+#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+ _PAGE_NO_CACHE | _PAGE_GUARDED))
+
+#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+ _PAGE_NO_CACHE))
+
+#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+ _PAGE_COHERENT))
+
+#if _PAGE_WRITETHRU != 0
+#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+ _PAGE_COHERENT | _PAGE_WRITETHRU))
+#else
+#define pgprot_cached_wthru(prot) pgprot_noncached(prot)
+#endif
+
+#define pgprot_cached_noncoherent(prot) \
+ (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
+
+#define pgprot_writecombine pgprot_noncached_wc
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+#ifdef CONFIG_HUGETLB_PAGE
+static inline int hugepd_ok(hugepd_t hpd)
+{
+#ifdef CONFIG_PPC_8xx
+ return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M);
+#else
+ /* We clear the top bit to indicate hugepd */
+ return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
+#endif
+}
+
+static inline int pmd_huge(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline int pud_huge(pud_t pud)
+{
+ return 0;
+}
+
+#define is_hugepd(hpd) (hugepd_ok(hpd))
+#endif
+
+/*
+ * This gets called at the end of handling a page fault, when
+ * the kernel has put a new PTE into the page table for the process.
+ * We use it to ensure coherency between the i-cache and d-cache
+ * for the page which has just been mapped in.
+ */
+#if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE)
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+#else
+static inline
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/powerpc/include/asm/nohash/pte-e500.h b/arch/powerpc/include/asm/nohash/pte-e500.h
new file mode 100644
index 000000000..0934e8965
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/pte-e500.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_PTE_E500_H
+#define _ASM_POWERPC_NOHASH_PTE_E500_H
+#ifdef __KERNEL__
+
+/* PTE bit definitions for processors compliant to the Book3E
+ * architecture 2.06 or later. The position of the PTE bits
+ * matches the HW definition of the optional Embedded Page Table
+ * category.
+ */
+
+/* Architected bits */
+#define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */
+#define _PAGE_SW1 0x000002
+#define _PAGE_BIT_SWAP_TYPE 2
+#define _PAGE_BAP_SR 0x000004
+#define _PAGE_BAP_UR 0x000008
+#define _PAGE_BAP_SW 0x000010
+#define _PAGE_BAP_UW 0x000020
+#define _PAGE_BAP_SX 0x000040
+#define _PAGE_BAP_UX 0x000080
+#define _PAGE_PSIZE_MSK 0x000f00
+#define _PAGE_PSIZE_4K 0x000200
+#define _PAGE_PSIZE_8K 0x000300
+#define _PAGE_PSIZE_16K 0x000400
+#define _PAGE_PSIZE_32K 0x000500
+#define _PAGE_PSIZE_64K 0x000600
+#define _PAGE_PSIZE_128K 0x000700
+#define _PAGE_PSIZE_256K 0x000800
+#define _PAGE_PSIZE_512K 0x000900
+#define _PAGE_PSIZE_1M 0x000a00
+#define _PAGE_PSIZE_2M 0x000b00
+#define _PAGE_PSIZE_4M 0x000c00
+#define _PAGE_PSIZE_8M 0x000d00
+#define _PAGE_PSIZE_16M 0x000e00
+#define _PAGE_PSIZE_32M 0x000f00
+#define _PAGE_DIRTY 0x001000 /* C: page changed */
+#define _PAGE_SW0 0x002000
+#define _PAGE_U3 0x004000
+#define _PAGE_U2 0x008000
+#define _PAGE_U1 0x010000
+#define _PAGE_U0 0x020000
+#define _PAGE_ACCESSED 0x040000
+#define _PAGE_ENDIAN 0x080000
+#define _PAGE_GUARDED 0x100000
+#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
+#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */
+
+/* "Higher level" linux bit combinations */
+#define _PAGE_EXEC (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */
+#define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
+#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
+#define _PAGE_KERNEL_RO (_PAGE_BAP_SR)
+#define _PAGE_KERNEL_RWX (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX)
+#define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX)
+#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
+#define _PAGE_PRIVILEGED (_PAGE_BAP_SR)
+
+#define _PAGE_SPECIAL _PAGE_SW0
+
+/* Base page size */
+#define _PAGE_PSIZE _PAGE_PSIZE_4K
+#define PTE_RPN_SHIFT (24)
+
+#define PTE_WIMGE_SHIFT (19)
+#define PTE_BAP_SHIFT (2)
+
+/* On 32-bit, we never clear the top part of the PTE */
+#ifdef CONFIG_PPC32
+#define _PTE_NONE_MASK 0xffffffff00000000ULL
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+#define _PMD_USER 0
+#else
+#define _PTE_NONE_MASK 0
+#endif
+
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
+#if defined(CONFIG_SMP)
+#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
+#else
+#define _PAGE_BASE (_PAGE_BASE_NC)
+#endif
+
+/* Permission masks used to generate the __P and __S table */
+#define PAGE_NONE __pgprot(_PAGE_BASE)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
+
+#ifndef __ASSEMBLY__
+static inline pte_t pte_mkprivileged(pte_t pte)
+{
+ return __pte((pte_val(pte) & ~_PAGE_USER) | _PAGE_PRIVILEGED);
+}
+
+#define pte_mkprivileged pte_mkprivileged
+
+static inline pte_t pte_mkuser(pte_t pte)
+{
+ return __pte((pte_val(pte) & ~_PAGE_PRIVILEGED) | _PAGE_USER);
+}
+
+#define pte_mkuser pte_mkuser
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ if (pte_val(pte) & _PAGE_BAP_UR)
+ return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX);
+ else
+ return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX);
+}
+#define pte_mkexec pte_mkexec
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_NOHASH_PTE_E500_H */
diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h
new file mode 100644
index 000000000..bdaf34ad4
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/tlbflush.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_TLBFLUSH_H
+#define _ASM_POWERPC_NOHASH_TLBFLUSH_H
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - local_flush_tlb_mm(mm, full) flushes the specified mm context on
+ * the local processor
+ * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ *
+ */
+
+/*
+ * TLB flushing for software loaded TLB chips
+ *
+ * TODO: (CONFIG_PPC_85xx) determine if flush_tlb_range &
+ * flush_tlb_kernel_range are best implemented as tlbia vs
+ * specific tlbie's
+ */
+
+struct vm_area_struct;
+struct mm_struct;
+
+#define MMU_NO_CONTEXT ((unsigned int)-1)
+
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+
+#ifdef CONFIG_PPC_8xx
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ unsigned int pid = READ_ONCE(mm->context.id);
+
+ if (pid != MMU_NO_CONTEXT)
+ asm volatile ("sync; tlbia; isync" : : : "memory");
+}
+
+static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ asm volatile ("tlbie %0; sync" : : "r" (vmaddr) : "memory");
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ start &= PAGE_MASK;
+
+ if (end - start <= PAGE_SIZE)
+ asm volatile ("tlbie %0; sync" : : "r" (start) : "memory");
+ else
+ asm volatile ("sync; tlbia; isync" : : : "memory");
+}
+#else
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void local_flush_tlb_mm(struct mm_struct *mm);
+extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+
+extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
+ int tsize, int ind);
+#endif
+
+#ifdef CONFIG_SMP
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
+ int tsize, int ind);
+#else
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
+#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i)
+#endif
+
+#endif /* _ASM_POWERPC_NOHASH_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
new file mode 100644
index 000000000..eda7fac35
--- /dev/null
+++ b/arch/powerpc/include/asm/nvram.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * NVRAM definitions and access functions.
+ */
+#ifndef _ASM_POWERPC_NVRAM_H
+#define _ASM_POWERPC_NVRAM_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <uapi/asm/nvram.h>
+
+/*
+ * Set oops header version to distinguish between old and new format header.
+ * lnx,oops-log partition max size is 4000, header version > 4000 will
+ * help in identifying new header.
+ */
+#define OOPS_HDR_VERSION 5000
+
+struct err_log_info {
+ __be32 error_type;
+ __be32 seq_num;
+};
+
+struct nvram_os_partition {
+ const char *name;
+ int req_size; /* desired size, in bytes */
+ int min_size; /* minimum acceptable size (0 means req_size) */
+ long size; /* size of data portion (excluding err_log_info) */
+ long index; /* offset of data portion of partition */
+ bool os_partition; /* partition initialized by OS, not FW */
+};
+
+struct oops_log_info {
+ __be16 version;
+ __be16 report_length;
+ __be64 timestamp;
+} __attribute__((packed));
+
+extern struct nvram_os_partition oops_log_partition;
+
+#ifdef CONFIG_PPC_PSERIES
+extern struct nvram_os_partition rtas_log_partition;
+
+extern int nvram_write_error_log(char * buff, int length,
+ unsigned int err_type, unsigned int err_seq);
+extern int nvram_read_error_log(char * buff, int length,
+ unsigned int * err_type, unsigned int *err_seq);
+extern int nvram_clear_error_log(void);
+extern int pSeries_nvram_init(void);
+#endif /* CONFIG_PPC_PSERIES */
+
+#ifdef CONFIG_MMIO_NVRAM
+extern int mmio_nvram_init(void);
+#else
+static inline int mmio_nvram_init(void)
+{
+ return -ENODEV;
+}
+#endif
+
+extern int __init nvram_scan_partitions(void);
+extern loff_t nvram_create_partition(const char *name, int sig,
+ int req_size, int min_size);
+extern int nvram_remove_partition(const char *name, int sig,
+ const char *exceptions[]);
+extern int nvram_get_partition_size(loff_t data_index);
+extern loff_t nvram_find_partition(const char *name, int sig, int *out_size);
+
+/* Return partition offset in nvram */
+extern int pmac_get_partition(int partition);
+
+/* Direct access to XPRAM on PowerMacs */
+extern u8 pmac_xpram_read(int xpaddr);
+extern void pmac_xpram_write(int xpaddr, u8 data);
+
+/* Initialize NVRAM OS partition */
+extern int __init nvram_init_os_partition(struct nvram_os_partition *part);
+
+/* Initialize NVRAM oops partition */
+extern void __init nvram_init_oops_partition(int rtas_partition_exists);
+
+/* Read a NVRAM partition */
+extern int nvram_read_partition(struct nvram_os_partition *part, char *buff,
+ int length, unsigned int *err_type,
+ unsigned int *error_log_cnt);
+
+/* Write to NVRAM OS partition */
+extern int nvram_write_os_partition(struct nvram_os_partition *part,
+ char *buff, int length,
+ unsigned int err_type,
+ unsigned int error_log_cnt);
+
+#endif /* _ASM_POWERPC_NVRAM_H */
diff --git a/arch/powerpc/include/asm/ohare.h b/arch/powerpc/include/asm/ohare.h
new file mode 100644
index 000000000..da3371fc3
--- /dev/null
+++ b/arch/powerpc/include/asm/ohare.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_OHARE_H
+#define _ASM_POWERPC_OHARE_H
+#ifdef __KERNEL__
+/*
+ * ohare.h: definitions for using the "O'Hare" I/O controller chip.
+ *
+ * Copyright (C) 1997 Paul Mackerras.
+ *
+ * BenH: Changed to match those of heathrow (but not all of them). Please
+ * check if I didn't break anything (especially the media bay).
+ */
+
+/* offset from ohare base for feature control register */
+#define OHARE_MBCR 0x34
+#define OHARE_FCR 0x38
+
+/*
+ * Bits in feature control register.
+ * These were mostly derived by experiment on a powerbook 3400
+ * and may differ for other machines.
+ */
+#define OH_SCC_RESET 1
+#define OH_BAY_POWER_N 2 /* a guess */
+#define OH_BAY_PCI_ENABLE 4 /* a guess */
+#define OH_BAY_IDE_ENABLE 8
+#define OH_BAY_FLOPPY_ENABLE 0x10
+#define OH_IDE0_ENABLE 0x20
+#define OH_IDE0_RESET_N 0x40 /* a guess */
+#define OH_BAY_DEV_MASK 0x1c
+#define OH_BAY_RESET_N 0x80
+#define OH_IOBUS_ENABLE 0x100 /* IOBUS seems to be IDE */
+#define OH_SCC_ENABLE 0x200
+#define OH_MESH_ENABLE 0x400
+#define OH_FLOPPY_ENABLE 0x800
+#define OH_SCCA_IO 0x4000
+#define OH_SCCB_IO 0x8000
+#define OH_VIA_ENABLE 0x10000 /* Is apparently wrong, to be verified */
+#define OH_IDE1_RESET_N 0x800000
+
+/*
+ * Bits to set in the feature control register on PowerBooks.
+ */
+#define PBOOK_FEATURES (OH_IDE_ENABLE | OH_SCC_ENABLE | \
+ OH_MESH_ENABLE | OH_SCCA_IO | OH_SCCB_IO)
+
+/*
+ * A magic value to put into the feature control register of the
+ * "ohare" I/O controller on Starmaxes to enable the IDE CD interface.
+ * Contributed by Harry Eaton.
+ */
+#define STARMAX_FEATURES 0xbeff7a
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_OHARE_H */
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
new file mode 100644
index 000000000..a2bc4b95e
--- /dev/null
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -0,0 +1,1188 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * OPAL API definitions.
+ *
+ * Copyright 2011-2015 IBM Corp.
+ */
+
+#ifndef __OPAL_API_H
+#define __OPAL_API_H
+
+/****** OPAL APIs ******/
+
+/* Return codes */
+#define OPAL_SUCCESS 0
+#define OPAL_PARAMETER -1
+#define OPAL_BUSY -2
+#define OPAL_PARTIAL -3
+#define OPAL_CONSTRAINED -4
+#define OPAL_CLOSED -5
+#define OPAL_HARDWARE -6
+#define OPAL_UNSUPPORTED -7
+#define OPAL_PERMISSION -8
+#define OPAL_NO_MEM -9
+#define OPAL_RESOURCE -10
+#define OPAL_INTERNAL_ERROR -11
+#define OPAL_BUSY_EVENT -12
+#define OPAL_HARDWARE_FROZEN -13
+#define OPAL_WRONG_STATE -14
+#define OPAL_ASYNC_COMPLETION -15
+#define OPAL_EMPTY -16
+#define OPAL_I2C_TIMEOUT -17
+#define OPAL_I2C_INVALID_CMD -18
+#define OPAL_I2C_LBUS_PARITY -19
+#define OPAL_I2C_BKEND_OVERRUN -20
+#define OPAL_I2C_BKEND_ACCESS -21
+#define OPAL_I2C_ARBT_LOST -22
+#define OPAL_I2C_NACK_RCVD -23
+#define OPAL_I2C_STOP_ERR -24
+#define OPAL_XIVE_PROVISIONING -31
+#define OPAL_XIVE_FREE_ACTIVE -32
+#define OPAL_TIMEOUT -33
+
+/* API Tokens (in r0) */
+#define OPAL_INVALID_CALL -1
+#define OPAL_TEST 0
+#define OPAL_CONSOLE_WRITE 1
+#define OPAL_CONSOLE_READ 2
+#define OPAL_RTC_READ 3
+#define OPAL_RTC_WRITE 4
+#define OPAL_CEC_POWER_DOWN 5
+#define OPAL_CEC_REBOOT 6
+#define OPAL_READ_NVRAM 7
+#define OPAL_WRITE_NVRAM 8
+#define OPAL_HANDLE_INTERRUPT 9
+#define OPAL_POLL_EVENTS 10
+#define OPAL_PCI_SET_HUB_TCE_MEMORY 11
+#define OPAL_PCI_SET_PHB_TCE_MEMORY 12
+#define OPAL_PCI_CONFIG_READ_BYTE 13
+#define OPAL_PCI_CONFIG_READ_HALF_WORD 14
+#define OPAL_PCI_CONFIG_READ_WORD 15
+#define OPAL_PCI_CONFIG_WRITE_BYTE 16
+#define OPAL_PCI_CONFIG_WRITE_HALF_WORD 17
+#define OPAL_PCI_CONFIG_WRITE_WORD 18
+#define OPAL_SET_XIVE 19
+#define OPAL_GET_XIVE 20
+#define OPAL_GET_COMPLETION_TOKEN_STATUS 21 /* obsolete */
+#define OPAL_REGISTER_OPAL_EXCEPTION_HANDLER 22
+#define OPAL_PCI_EEH_FREEZE_STATUS 23
+#define OPAL_PCI_SHPC 24
+#define OPAL_CONSOLE_WRITE_BUFFER_SPACE 25
+#define OPAL_PCI_EEH_FREEZE_CLEAR 26
+#define OPAL_PCI_PHB_MMIO_ENABLE 27
+#define OPAL_PCI_SET_PHB_MEM_WINDOW 28
+#define OPAL_PCI_MAP_PE_MMIO_WINDOW 29
+#define OPAL_PCI_SET_PHB_TABLE_MEMORY 30
+#define OPAL_PCI_SET_PE 31
+#define OPAL_PCI_SET_PELTV 32
+#define OPAL_PCI_SET_MVE 33
+#define OPAL_PCI_SET_MVE_ENABLE 34
+#define OPAL_PCI_GET_XIVE_REISSUE 35
+#define OPAL_PCI_SET_XIVE_REISSUE 36
+#define OPAL_PCI_SET_XIVE_PE 37
+#define OPAL_GET_XIVE_SOURCE 38
+#define OPAL_GET_MSI_32 39
+#define OPAL_GET_MSI_64 40
+#define OPAL_START_CPU 41
+#define OPAL_QUERY_CPU_STATUS 42
+#define OPAL_WRITE_OPPANEL 43 /* unimplemented */
+#define OPAL_PCI_MAP_PE_DMA_WINDOW 44
+#define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL 45
+#define OPAL_PCI_RESET 49
+#define OPAL_PCI_GET_HUB_DIAG_DATA 50
+#define OPAL_PCI_GET_PHB_DIAG_DATA 51
+#define OPAL_PCI_FENCE_PHB 52
+#define OPAL_PCI_REINIT 53
+#define OPAL_PCI_MASK_PE_ERROR 54
+#define OPAL_SET_SLOT_LED_STATUS 55
+#define OPAL_GET_EPOW_STATUS 56
+#define OPAL_SET_SYSTEM_ATTENTION_LED 57
+#define OPAL_RESERVED1 58
+#define OPAL_RESERVED2 59
+#define OPAL_PCI_NEXT_ERROR 60
+#define OPAL_PCI_EEH_FREEZE_STATUS2 61
+#define OPAL_PCI_POLL 62
+#define OPAL_PCI_MSI_EOI 63
+#define OPAL_PCI_GET_PHB_DIAG_DATA2 64
+#define OPAL_XSCOM_READ 65
+#define OPAL_XSCOM_WRITE 66
+#define OPAL_LPC_READ 67
+#define OPAL_LPC_WRITE 68
+#define OPAL_RETURN_CPU 69
+#define OPAL_REINIT_CPUS 70
+#define OPAL_ELOG_READ 71
+#define OPAL_ELOG_WRITE 72
+#define OPAL_ELOG_ACK 73
+#define OPAL_ELOG_RESEND 74
+#define OPAL_ELOG_SIZE 75
+#define OPAL_FLASH_VALIDATE 76
+#define OPAL_FLASH_MANAGE 77
+#define OPAL_FLASH_UPDATE 78
+#define OPAL_RESYNC_TIMEBASE 79
+#define OPAL_CHECK_TOKEN 80
+#define OPAL_DUMP_INIT 81
+#define OPAL_DUMP_INFO 82
+#define OPAL_DUMP_READ 83
+#define OPAL_DUMP_ACK 84
+#define OPAL_GET_MSG 85
+#define OPAL_CHECK_ASYNC_COMPLETION 86
+#define OPAL_SYNC_HOST_REBOOT 87
+#define OPAL_SENSOR_READ 88
+#define OPAL_GET_PARAM 89
+#define OPAL_SET_PARAM 90
+#define OPAL_DUMP_RESEND 91
+#define OPAL_ELOG_SEND 92 /* Deprecated */
+#define OPAL_PCI_SET_PHB_CAPI_MODE 93
+#define OPAL_DUMP_INFO2 94
+#define OPAL_WRITE_OPPANEL_ASYNC 95
+#define OPAL_PCI_ERR_INJECT 96
+#define OPAL_PCI_EEH_FREEZE_SET 97
+#define OPAL_HANDLE_HMI 98
+#define OPAL_CONFIG_CPU_IDLE_STATE 99
+#define OPAL_SLW_SET_REG 100
+#define OPAL_REGISTER_DUMP_REGION 101
+#define OPAL_UNREGISTER_DUMP_REGION 102
+#define OPAL_WRITE_TPO 103
+#define OPAL_READ_TPO 104
+#define OPAL_GET_DPO_STATUS 105
+#define OPAL_OLD_I2C_REQUEST 106 /* Deprecated */
+#define OPAL_IPMI_SEND 107
+#define OPAL_IPMI_RECV 108
+#define OPAL_I2C_REQUEST 109
+#define OPAL_FLASH_READ 110
+#define OPAL_FLASH_WRITE 111
+#define OPAL_FLASH_ERASE 112
+#define OPAL_PRD_MSG 113
+#define OPAL_LEDS_GET_INDICATOR 114
+#define OPAL_LEDS_SET_INDICATOR 115
+#define OPAL_CEC_REBOOT2 116
+#define OPAL_CONSOLE_FLUSH 117
+#define OPAL_GET_DEVICE_TREE 118
+#define OPAL_PCI_GET_PRESENCE_STATE 119
+#define OPAL_PCI_GET_POWER_STATE 120
+#define OPAL_PCI_SET_POWER_STATE 121
+#define OPAL_INT_GET_XIRR 122
+#define OPAL_INT_SET_CPPR 123
+#define OPAL_INT_EOI 124
+#define OPAL_INT_SET_MFRR 125
+#define OPAL_PCI_TCE_KILL 126
+#define OPAL_NMMU_SET_PTCR 127
+#define OPAL_XIVE_RESET 128
+#define OPAL_XIVE_GET_IRQ_INFO 129
+#define OPAL_XIVE_GET_IRQ_CONFIG 130
+#define OPAL_XIVE_SET_IRQ_CONFIG 131
+#define OPAL_XIVE_GET_QUEUE_INFO 132
+#define OPAL_XIVE_SET_QUEUE_INFO 133
+#define OPAL_XIVE_DONATE_PAGE 134
+#define OPAL_XIVE_ALLOCATE_VP_BLOCK 135
+#define OPAL_XIVE_FREE_VP_BLOCK 136
+#define OPAL_XIVE_GET_VP_INFO 137
+#define OPAL_XIVE_SET_VP_INFO 138
+#define OPAL_XIVE_ALLOCATE_IRQ 139
+#define OPAL_XIVE_FREE_IRQ 140
+#define OPAL_XIVE_SYNC 141
+#define OPAL_XIVE_DUMP 142
+#define OPAL_XIVE_GET_QUEUE_STATE 143
+#define OPAL_XIVE_SET_QUEUE_STATE 144
+#define OPAL_SIGNAL_SYSTEM_RESET 145
+#define OPAL_NPU_INIT_CONTEXT 146
+#define OPAL_NPU_DESTROY_CONTEXT 147
+#define OPAL_NPU_MAP_LPAR 148
+#define OPAL_IMC_COUNTERS_INIT 149
+#define OPAL_IMC_COUNTERS_START 150
+#define OPAL_IMC_COUNTERS_STOP 151
+#define OPAL_GET_POWERCAP 152
+#define OPAL_SET_POWERCAP 153
+#define OPAL_GET_POWER_SHIFT_RATIO 154
+#define OPAL_SET_POWER_SHIFT_RATIO 155
+#define OPAL_SENSOR_GROUP_CLEAR 156
+#define OPAL_PCI_SET_P2P 157
+#define OPAL_QUIESCE 158
+#define OPAL_NPU_SPA_SETUP 159
+#define OPAL_NPU_SPA_CLEAR_CACHE 160
+#define OPAL_NPU_TL_SET 161
+#define OPAL_SENSOR_READ_U64 162
+#define OPAL_SENSOR_GROUP_ENABLE 163
+#define OPAL_PCI_GET_PBCQ_TUNNEL_BAR 164
+#define OPAL_PCI_SET_PBCQ_TUNNEL_BAR 165
+#define OPAL_HANDLE_HMI2 166
+#define OPAL_NX_COPROC_INIT 167
+#define OPAL_XIVE_GET_VP_STATE 170
+#define OPAL_MPIPL_UPDATE 173
+#define OPAL_MPIPL_REGISTER_TAG 174
+#define OPAL_MPIPL_QUERY_TAG 175
+#define OPAL_SECVAR_GET 176
+#define OPAL_SECVAR_GET_NEXT 177
+#define OPAL_SECVAR_ENQUEUE_UPDATE 178
+#define OPAL_LAST 178
+
+#define QUIESCE_HOLD 1 /* Spin all calls at entry */
+#define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */
+#define QUIESCE_LOCK_BREAK 3 /* Set to ignore locks. */
+#define QUIESCE_RESUME 4 /* Un-quiesce */
+#define QUIESCE_RESUME_FAST_REBOOT 5 /* Un-quiesce, fast reboot */
+
+/* Device tree flags */
+
+/*
+ * Flags set in power-mgmt nodes in device tree describing
+ * idle states that are supported in the platform.
+ */
+
+#define OPAL_PM_TIMEBASE_STOP 0x00000002
+#define OPAL_PM_LOSE_HYP_CONTEXT 0x00002000
+#define OPAL_PM_LOSE_FULL_CONTEXT 0x00004000
+#define OPAL_PM_NAP_ENABLED 0x00010000
+#define OPAL_PM_SLEEP_ENABLED 0x00020000
+#define OPAL_PM_WINKLE_ENABLED 0x00040000
+#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000 /* with workaround */
+#define OPAL_PM_STOP_INST_FAST 0x00100000
+#define OPAL_PM_STOP_INST_DEEP 0x00200000
+
+/*
+ * OPAL_CONFIG_CPU_IDLE_STATE parameters
+ */
+#define OPAL_CONFIG_IDLE_FASTSLEEP 1
+#define OPAL_CONFIG_IDLE_UNDO 0
+#define OPAL_CONFIG_IDLE_APPLY 1
+
+#ifndef __ASSEMBLY__
+
+/* Other enums */
+enum OpalFreezeState {
+ OPAL_EEH_STOPPED_NOT_FROZEN = 0,
+ OPAL_EEH_STOPPED_MMIO_FREEZE = 1,
+ OPAL_EEH_STOPPED_DMA_FREEZE = 2,
+ OPAL_EEH_STOPPED_MMIO_DMA_FREEZE = 3,
+ OPAL_EEH_STOPPED_RESET = 4,
+ OPAL_EEH_STOPPED_TEMP_UNAVAIL = 5,
+ OPAL_EEH_STOPPED_PERM_UNAVAIL = 6
+};
+
+enum OpalEehFreezeActionToken {
+ OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3,
+
+ OPAL_EEH_ACTION_SET_FREEZE_MMIO = 1,
+ OPAL_EEH_ACTION_SET_FREEZE_DMA = 2,
+ OPAL_EEH_ACTION_SET_FREEZE_ALL = 3
+};
+
+enum OpalPciStatusToken {
+ OPAL_EEH_NO_ERROR = 0,
+ OPAL_EEH_IOC_ERROR = 1,
+ OPAL_EEH_PHB_ERROR = 2,
+ OPAL_EEH_PE_ERROR = 3,
+ OPAL_EEH_PE_MMIO_ERROR = 4,
+ OPAL_EEH_PE_DMA_ERROR = 5
+};
+
+enum OpalPciErrorSeverity {
+ OPAL_EEH_SEV_NO_ERROR = 0,
+ OPAL_EEH_SEV_IOC_DEAD = 1,
+ OPAL_EEH_SEV_PHB_DEAD = 2,
+ OPAL_EEH_SEV_PHB_FENCED = 3,
+ OPAL_EEH_SEV_PE_ER = 4,
+ OPAL_EEH_SEV_INF = 5
+};
+
+enum OpalErrinjectType {
+ OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR = 0,
+ OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64 = 1,
+};
+
+enum OpalErrinjectFunc {
+ /* IOA bus specific errors */
+ OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR = 0,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_DATA = 1,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_IO_ADDR = 2,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_IO_DATA = 3,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_ADDR = 4,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_DATA = 5,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_ADDR = 6,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_DATA = 7,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_IO_ADDR = 8,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_IO_DATA = 9,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_ADDR = 10,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_DATA = 11,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_ADDR = 12,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_DATA = 13,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_MASTER = 14,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_TARGET = 15,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_ADDR = 16,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_DATA = 17,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_MASTER = 18,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET = 19,
+};
+
+enum OpalMmioWindowType {
+ OPAL_M32_WINDOW_TYPE = 1,
+ OPAL_M64_WINDOW_TYPE = 2,
+ OPAL_IO_WINDOW_TYPE = 3
+};
+
+enum OpalExceptionHandler {
+ OPAL_MACHINE_CHECK_HANDLER = 1,
+ OPAL_HYPERVISOR_MAINTENANCE_HANDLER = 2,
+ OPAL_SOFTPATCH_HANDLER = 3
+};
+
+enum OpalPendingState {
+ OPAL_EVENT_OPAL_INTERNAL = 0x1,
+ OPAL_EVENT_NVRAM = 0x2,
+ OPAL_EVENT_RTC = 0x4,
+ OPAL_EVENT_CONSOLE_OUTPUT = 0x8,
+ OPAL_EVENT_CONSOLE_INPUT = 0x10,
+ OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
+ OPAL_EVENT_ERROR_LOG = 0x40,
+ OPAL_EVENT_EPOW = 0x80,
+ OPAL_EVENT_LED_STATUS = 0x100,
+ OPAL_EVENT_PCI_ERROR = 0x200,
+ OPAL_EVENT_DUMP_AVAIL = 0x400,
+ OPAL_EVENT_MSG_PENDING = 0x800,
+};
+
+enum OpalThreadStatus {
+ OPAL_THREAD_INACTIVE = 0x0,
+ OPAL_THREAD_STARTED = 0x1,
+ OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
+};
+
+enum OpalPciBusCompare {
+ OpalPciBusAny = 0, /* Any bus number match */
+ OpalPciBus3Bits = 2, /* Match top 3 bits of bus number */
+ OpalPciBus4Bits = 3, /* Match top 4 bits of bus number */
+ OpalPciBus5Bits = 4, /* Match top 5 bits of bus number */
+ OpalPciBus6Bits = 5, /* Match top 6 bits of bus number */
+ OpalPciBus7Bits = 6, /* Match top 7 bits of bus number */
+ OpalPciBusAll = 7, /* Match bus number exactly */
+};
+
+enum OpalDeviceCompare {
+ OPAL_IGNORE_RID_DEVICE_NUMBER = 0,
+ OPAL_COMPARE_RID_DEVICE_NUMBER = 1
+};
+
+enum OpalFuncCompare {
+ OPAL_IGNORE_RID_FUNCTION_NUMBER = 0,
+ OPAL_COMPARE_RID_FUNCTION_NUMBER = 1
+};
+
+enum OpalPeAction {
+ OPAL_UNMAP_PE = 0,
+ OPAL_MAP_PE = 1
+};
+
+enum OpalPeltvAction {
+ OPAL_REMOVE_PE_FROM_DOMAIN = 0,
+ OPAL_ADD_PE_TO_DOMAIN = 1
+};
+
+enum OpalMveEnableAction {
+ OPAL_DISABLE_MVE = 0,
+ OPAL_ENABLE_MVE = 1
+};
+
+enum OpalM64Action {
+ OPAL_DISABLE_M64 = 0,
+ OPAL_ENABLE_M64_SPLIT = 1,
+ OPAL_ENABLE_M64_NON_SPLIT = 2
+};
+
+enum OpalPciResetScope {
+ OPAL_RESET_PHB_COMPLETE = 1,
+ OPAL_RESET_PCI_LINK = 2,
+ OPAL_RESET_PHB_ERROR = 3,
+ OPAL_RESET_PCI_HOT = 4,
+ OPAL_RESET_PCI_FUNDAMENTAL = 5,
+ OPAL_RESET_PCI_IODA_TABLE = 6
+};
+
+enum OpalPciReinitScope {
+ /*
+ * Note: we chose values that do not overlap
+ * OpalPciResetScope as OPAL v2 used the same
+ * enum for both
+ */
+ OPAL_REINIT_PCI_DEV = 1000
+};
+
+enum OpalPciResetState {
+ OPAL_DEASSERT_RESET = 0,
+ OPAL_ASSERT_RESET = 1
+};
+
+enum OpalPciSlotPresence {
+ OPAL_PCI_SLOT_EMPTY = 0,
+ OPAL_PCI_SLOT_PRESENT = 1
+};
+
+enum OpalPciSlotPower {
+ OPAL_PCI_SLOT_POWER_OFF = 0,
+ OPAL_PCI_SLOT_POWER_ON = 1,
+ OPAL_PCI_SLOT_OFFLINE = 2,
+ OPAL_PCI_SLOT_ONLINE = 3
+};
+
+enum OpalSlotLedType {
+ OPAL_SLOT_LED_TYPE_ID = 0, /* IDENTIFY LED */
+ OPAL_SLOT_LED_TYPE_FAULT = 1, /* FAULT LED */
+ OPAL_SLOT_LED_TYPE_ATTN = 2, /* System Attention LED */
+ OPAL_SLOT_LED_TYPE_MAX = 3
+};
+
+enum OpalSlotLedState {
+ OPAL_SLOT_LED_STATE_OFF = 0, /* LED is OFF */
+ OPAL_SLOT_LED_STATE_ON = 1 /* LED is ON */
+};
+
+/*
+ * Address cycle types for LPC accesses. These also correspond
+ * to the content of the first cell of the "reg" property for
+ * device nodes on the LPC bus
+ */
+enum OpalLPCAddressType {
+ OPAL_LPC_MEM = 0,
+ OPAL_LPC_IO = 1,
+ OPAL_LPC_FW = 2,
+};
+
+enum opal_msg_type {
+ OPAL_MSG_ASYNC_COMP = 0, /* params[0] = token, params[1] = rc,
+ * additional params function-specific
+ */
+ OPAL_MSG_MEM_ERR = 1,
+ OPAL_MSG_EPOW = 2,
+ OPAL_MSG_SHUTDOWN = 3, /* params[0] = 1 reboot, 0 shutdown */
+ OPAL_MSG_HMI_EVT = 4,
+ OPAL_MSG_DPO = 5,
+ OPAL_MSG_PRD = 6,
+ OPAL_MSG_OCC = 7,
+ OPAL_MSG_PRD2 = 8,
+ OPAL_MSG_TYPE_MAX,
+};
+
+struct opal_msg {
+ __be32 msg_type;
+ __be32 reserved;
+ __be64 params[8];
+};
+
+/* System parameter permission */
+enum OpalSysparamPerm {
+ OPAL_SYSPARAM_READ = 0x1,
+ OPAL_SYSPARAM_WRITE = 0x2,
+ OPAL_SYSPARAM_RW = (OPAL_SYSPARAM_READ | OPAL_SYSPARAM_WRITE),
+};
+
+enum {
+ OPAL_IPMI_MSG_FORMAT_VERSION_1 = 1,
+};
+
+struct opal_ipmi_msg {
+ uint8_t version;
+ uint8_t netfn;
+ uint8_t cmd;
+ uint8_t data[];
+};
+
+/* FSP memory errors handling */
+enum OpalMemErr_Version {
+ OpalMemErr_V1 = 1,
+};
+
+enum OpalMemErrType {
+ OPAL_MEM_ERR_TYPE_RESILIENCE = 0,
+ OPAL_MEM_ERR_TYPE_DYN_DALLOC,
+};
+
+/* Memory Reilience error type */
+enum OpalMemErr_ResilErrType {
+ OPAL_MEM_RESILIENCE_CE = 0,
+ OPAL_MEM_RESILIENCE_UE,
+ OPAL_MEM_RESILIENCE_UE_SCRUB,
+};
+
+/* Dynamic Memory Deallocation type */
+enum OpalMemErr_DynErrType {
+ OPAL_MEM_DYNAMIC_DEALLOC = 0,
+};
+
+struct OpalMemoryErrorData {
+ enum OpalMemErr_Version version:8; /* 0x00 */
+ enum OpalMemErrType type:8; /* 0x01 */
+ __be16 flags; /* 0x02 */
+ uint8_t reserved_1[4]; /* 0x04 */
+
+ union {
+ /* Memory Resilience corrected/uncorrected error info */
+ struct {
+ enum OpalMemErr_ResilErrType resil_err_type:8;
+ uint8_t reserved_1[7];
+ __be64 physical_address_start;
+ __be64 physical_address_end;
+ } resilience;
+ /* Dynamic memory deallocation error info */
+ struct {
+ enum OpalMemErr_DynErrType dyn_err_type:8;
+ uint8_t reserved_1[7];
+ __be64 physical_address_start;
+ __be64 physical_address_end;
+ } dyn_dealloc;
+ } u;
+};
+
+/* HMI interrupt event */
+enum OpalHMI_Version {
+ OpalHMIEvt_V1 = 1,
+ OpalHMIEvt_V2 = 2,
+};
+
+enum OpalHMI_Severity {
+ OpalHMI_SEV_NO_ERROR = 0,
+ OpalHMI_SEV_WARNING = 1,
+ OpalHMI_SEV_ERROR_SYNC = 2,
+ OpalHMI_SEV_FATAL = 3,
+};
+
+enum OpalHMI_Disposition {
+ OpalHMI_DISPOSITION_RECOVERED = 0,
+ OpalHMI_DISPOSITION_NOT_RECOVERED = 1,
+};
+
+enum OpalHMI_ErrType {
+ OpalHMI_ERROR_MALFUNC_ALERT = 0,
+ OpalHMI_ERROR_PROC_RECOV_DONE,
+ OpalHMI_ERROR_PROC_RECOV_DONE_AGAIN,
+ OpalHMI_ERROR_PROC_RECOV_MASKED,
+ OpalHMI_ERROR_TFAC,
+ OpalHMI_ERROR_TFMR_PARITY,
+ OpalHMI_ERROR_HA_OVERFLOW_WARN,
+ OpalHMI_ERROR_XSCOM_FAIL,
+ OpalHMI_ERROR_XSCOM_DONE,
+ OpalHMI_ERROR_SCOM_FIR,
+ OpalHMI_ERROR_DEBUG_TRIG_FIR,
+ OpalHMI_ERROR_HYP_RESOURCE,
+ OpalHMI_ERROR_CAPP_RECOVERY,
+};
+
+enum OpalHMI_XstopType {
+ CHECKSTOP_TYPE_UNKNOWN = 0,
+ CHECKSTOP_TYPE_CORE = 1,
+ CHECKSTOP_TYPE_NX = 2,
+ CHECKSTOP_TYPE_NPU = 3
+};
+
+enum OpalHMI_CoreXstopReason {
+ CORE_CHECKSTOP_IFU_REGFILE = 0x00000001,
+ CORE_CHECKSTOP_IFU_LOGIC = 0x00000002,
+ CORE_CHECKSTOP_PC_DURING_RECOV = 0x00000004,
+ CORE_CHECKSTOP_ISU_REGFILE = 0x00000008,
+ CORE_CHECKSTOP_ISU_LOGIC = 0x00000010,
+ CORE_CHECKSTOP_FXU_LOGIC = 0x00000020,
+ CORE_CHECKSTOP_VSU_LOGIC = 0x00000040,
+ CORE_CHECKSTOP_PC_RECOV_IN_MAINT_MODE = 0x00000080,
+ CORE_CHECKSTOP_LSU_REGFILE = 0x00000100,
+ CORE_CHECKSTOP_PC_FWD_PROGRESS = 0x00000200,
+ CORE_CHECKSTOP_LSU_LOGIC = 0x00000400,
+ CORE_CHECKSTOP_PC_LOGIC = 0x00000800,
+ CORE_CHECKSTOP_PC_HYP_RESOURCE = 0x00001000,
+ CORE_CHECKSTOP_PC_HANG_RECOV_FAILED = 0x00002000,
+ CORE_CHECKSTOP_PC_AMBI_HANG_DETECTED = 0x00004000,
+ CORE_CHECKSTOP_PC_DEBUG_TRIG_ERR_INJ = 0x00008000,
+ CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ = 0x00010000,
+};
+
+enum OpalHMI_NestAccelXstopReason {
+ NX_CHECKSTOP_SHM_INVAL_STATE_ERR = 0x00000001,
+ NX_CHECKSTOP_DMA_INVAL_STATE_ERR_1 = 0x00000002,
+ NX_CHECKSTOP_DMA_INVAL_STATE_ERR_2 = 0x00000004,
+ NX_CHECKSTOP_DMA_CH0_INVAL_STATE_ERR = 0x00000008,
+ NX_CHECKSTOP_DMA_CH1_INVAL_STATE_ERR = 0x00000010,
+ NX_CHECKSTOP_DMA_CH2_INVAL_STATE_ERR = 0x00000020,
+ NX_CHECKSTOP_DMA_CH3_INVAL_STATE_ERR = 0x00000040,
+ NX_CHECKSTOP_DMA_CH4_INVAL_STATE_ERR = 0x00000080,
+ NX_CHECKSTOP_DMA_CH5_INVAL_STATE_ERR = 0x00000100,
+ NX_CHECKSTOP_DMA_CH6_INVAL_STATE_ERR = 0x00000200,
+ NX_CHECKSTOP_DMA_CH7_INVAL_STATE_ERR = 0x00000400,
+ NX_CHECKSTOP_DMA_CRB_UE = 0x00000800,
+ NX_CHECKSTOP_DMA_CRB_SUE = 0x00001000,
+ NX_CHECKSTOP_PBI_ISN_UE = 0x00002000,
+};
+
+struct OpalHMIEvent {
+ uint8_t version; /* 0x00 */
+ uint8_t severity; /* 0x01 */
+ uint8_t type; /* 0x02 */
+ uint8_t disposition; /* 0x03 */
+ uint8_t reserved_1[4]; /* 0x04 */
+
+ __be64 hmer;
+ /* TFMR register. Valid only for TFAC and TFMR_PARITY error type. */
+ __be64 tfmr;
+
+ /* version 2 and later */
+ union {
+ /*
+ * checkstop info (Core/NX).
+ * Valid for OpalHMI_ERROR_MALFUNC_ALERT.
+ */
+ struct {
+ uint8_t xstop_type; /* enum OpalHMI_XstopType */
+ uint8_t reserved_1[3];
+ __be32 xstop_reason;
+ union {
+ __be32 pir; /* for CHECKSTOP_TYPE_CORE */
+ __be32 chip_id; /* for CHECKSTOP_TYPE_NX */
+ } u;
+ } xstop_error;
+ } u;
+};
+
+/* OPAL_HANDLE_HMI2 out_flags */
+enum {
+ OPAL_HMI_FLAGS_TB_RESYNC = (1ull << 0), /* Timebase has been resynced */
+ OPAL_HMI_FLAGS_DEC_LOST = (1ull << 1), /* DEC lost, needs to be reprogrammed */
+ OPAL_HMI_FLAGS_HDEC_LOST = (1ull << 2), /* HDEC lost, needs to be reprogrammed */
+ OPAL_HMI_FLAGS_TOD_TB_FAIL = (1ull << 3), /* TOD/TB recovery failed. */
+ OPAL_HMI_FLAGS_NEW_EVENT = (1ull << 63), /* An event has been created */
+};
+
+enum {
+ OPAL_P7IOC_DIAG_TYPE_NONE = 0,
+ OPAL_P7IOC_DIAG_TYPE_RGC = 1,
+ OPAL_P7IOC_DIAG_TYPE_BI = 2,
+ OPAL_P7IOC_DIAG_TYPE_CI = 3,
+ OPAL_P7IOC_DIAG_TYPE_MISC = 4,
+ OPAL_P7IOC_DIAG_TYPE_I2C = 5,
+ OPAL_P7IOC_DIAG_TYPE_LAST = 6
+};
+
+struct OpalIoP7IOCErrorData {
+ __be16 type;
+
+ /* GEM */
+ __be64 gemXfir;
+ __be64 gemRfir;
+ __be64 gemRirqfir;
+ __be64 gemMask;
+ __be64 gemRwof;
+
+ /* LEM */
+ __be64 lemFir;
+ __be64 lemErrMask;
+ __be64 lemAction0;
+ __be64 lemAction1;
+ __be64 lemWof;
+
+ union {
+ struct OpalIoP7IOCRgcErrorData {
+ __be64 rgcStatus; /* 3E1C10 */
+ __be64 rgcLdcp; /* 3E1C18 */
+ }rgc;
+ struct OpalIoP7IOCBiErrorData {
+ __be64 biLdcp0; /* 3C0100, 3C0118 */
+ __be64 biLdcp1; /* 3C0108, 3C0120 */
+ __be64 biLdcp2; /* 3C0110, 3C0128 */
+ __be64 biFenceStatus; /* 3C0130, 3C0130 */
+
+ uint8_t biDownbound; /* BI Downbound or Upbound */
+ }bi;
+ struct OpalIoP7IOCCiErrorData {
+ __be64 ciPortStatus; /* 3Dn008 */
+ __be64 ciPortLdcp; /* 3Dn010 */
+
+ uint8_t ciPort; /* Index of CI port: 0/1 */
+ }ci;
+ };
+};
+
+/**
+ * This structure defines the overlay which will be used to store PHB error
+ * data upon request.
+ */
+enum {
+ OPAL_PHB_ERROR_DATA_VERSION_1 = 1,
+};
+
+enum {
+ OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
+ OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2,
+ OPAL_PHB_ERROR_DATA_TYPE_PHB4 = 3
+};
+
+enum {
+ OPAL_P7IOC_NUM_PEST_REGS = 128,
+ OPAL_PHB3_NUM_PEST_REGS = 256,
+ OPAL_PHB4_NUM_PEST_REGS = 512
+};
+
+struct OpalIoPhbErrorCommon {
+ __be32 version;
+ __be32 ioType;
+ __be32 len;
+};
+
+struct OpalIoP7IOCPhbErrorData {
+ struct OpalIoPhbErrorCommon common;
+
+ __be32 brdgCtl;
+
+ // P7IOC utl regs
+ __be32 portStatusReg;
+ __be32 rootCmplxStatus;
+ __be32 busAgentStatus;
+
+ // P7IOC cfg regs
+ __be32 deviceStatus;
+ __be32 slotStatus;
+ __be32 linkStatus;
+ __be32 devCmdStatus;
+ __be32 devSecStatus;
+
+ // cfg AER regs
+ __be32 rootErrorStatus;
+ __be32 uncorrErrorStatus;
+ __be32 corrErrorStatus;
+ __be32 tlpHdr1;
+ __be32 tlpHdr2;
+ __be32 tlpHdr3;
+ __be32 tlpHdr4;
+ __be32 sourceId;
+
+ __be32 rsv3;
+
+ // Record data about the call to allocate a buffer.
+ __be64 errorClass;
+ __be64 correlator;
+
+ //P7IOC MMIO Error Regs
+ __be64 p7iocPlssr; // n120
+ __be64 p7iocCsr; // n110
+ __be64 lemFir; // nC00
+ __be64 lemErrorMask; // nC18
+ __be64 lemWOF; // nC40
+ __be64 phbErrorStatus; // nC80
+ __be64 phbFirstErrorStatus; // nC88
+ __be64 phbErrorLog0; // nCC0
+ __be64 phbErrorLog1; // nCC8
+ __be64 mmioErrorStatus; // nD00
+ __be64 mmioFirstErrorStatus; // nD08
+ __be64 mmioErrorLog0; // nD40
+ __be64 mmioErrorLog1; // nD48
+ __be64 dma0ErrorStatus; // nD80
+ __be64 dma0FirstErrorStatus; // nD88
+ __be64 dma0ErrorLog0; // nDC0
+ __be64 dma0ErrorLog1; // nDC8
+ __be64 dma1ErrorStatus; // nE00
+ __be64 dma1FirstErrorStatus; // nE08
+ __be64 dma1ErrorLog0; // nE40
+ __be64 dma1ErrorLog1; // nE48
+ __be64 pestA[OPAL_P7IOC_NUM_PEST_REGS];
+ __be64 pestB[OPAL_P7IOC_NUM_PEST_REGS];
+};
+
+struct OpalIoPhb3ErrorData {
+ struct OpalIoPhbErrorCommon common;
+
+ __be32 brdgCtl;
+
+ /* PHB3 UTL regs */
+ __be32 portStatusReg;
+ __be32 rootCmplxStatus;
+ __be32 busAgentStatus;
+
+ /* PHB3 cfg regs */
+ __be32 deviceStatus;
+ __be32 slotStatus;
+ __be32 linkStatus;
+ __be32 devCmdStatus;
+ __be32 devSecStatus;
+
+ /* cfg AER regs */
+ __be32 rootErrorStatus;
+ __be32 uncorrErrorStatus;
+ __be32 corrErrorStatus;
+ __be32 tlpHdr1;
+ __be32 tlpHdr2;
+ __be32 tlpHdr3;
+ __be32 tlpHdr4;
+ __be32 sourceId;
+
+ __be32 rsv3;
+
+ /* Record data about the call to allocate a buffer */
+ __be64 errorClass;
+ __be64 correlator;
+
+ /* PHB3 MMIO Error Regs */
+ __be64 nFir; /* 000 */
+ __be64 nFirMask; /* 003 */
+ __be64 nFirWOF; /* 008 */
+ __be64 phbPlssr; /* 120 */
+ __be64 phbCsr; /* 110 */
+ __be64 lemFir; /* C00 */
+ __be64 lemErrorMask; /* C18 */
+ __be64 lemWOF; /* C40 */
+ __be64 phbErrorStatus; /* C80 */
+ __be64 phbFirstErrorStatus; /* C88 */
+ __be64 phbErrorLog0; /* CC0 */
+ __be64 phbErrorLog1; /* CC8 */
+ __be64 mmioErrorStatus; /* D00 */
+ __be64 mmioFirstErrorStatus; /* D08 */
+ __be64 mmioErrorLog0; /* D40 */
+ __be64 mmioErrorLog1; /* D48 */
+ __be64 dma0ErrorStatus; /* D80 */
+ __be64 dma0FirstErrorStatus; /* D88 */
+ __be64 dma0ErrorLog0; /* DC0 */
+ __be64 dma0ErrorLog1; /* DC8 */
+ __be64 dma1ErrorStatus; /* E00 */
+ __be64 dma1FirstErrorStatus; /* E08 */
+ __be64 dma1ErrorLog0; /* E40 */
+ __be64 dma1ErrorLog1; /* E48 */
+ __be64 pestA[OPAL_PHB3_NUM_PEST_REGS];
+ __be64 pestB[OPAL_PHB3_NUM_PEST_REGS];
+};
+
+struct OpalIoPhb4ErrorData {
+ struct OpalIoPhbErrorCommon common;
+
+ __be32 brdgCtl;
+
+ /* PHB4 cfg regs */
+ __be32 deviceStatus;
+ __be32 slotStatus;
+ __be32 linkStatus;
+ __be32 devCmdStatus;
+ __be32 devSecStatus;
+
+ /* cfg AER regs */
+ __be32 rootErrorStatus;
+ __be32 uncorrErrorStatus;
+ __be32 corrErrorStatus;
+ __be32 tlpHdr1;
+ __be32 tlpHdr2;
+ __be32 tlpHdr3;
+ __be32 tlpHdr4;
+ __be32 sourceId;
+
+ /* PHB4 ETU Error Regs */
+ __be64 nFir; /* 000 */
+ __be64 nFirMask; /* 003 */
+ __be64 nFirWOF; /* 008 */
+ __be64 phbPlssr; /* 120 */
+ __be64 phbCsr; /* 110 */
+ __be64 lemFir; /* C00 */
+ __be64 lemErrorMask; /* C18 */
+ __be64 lemWOF; /* C40 */
+ __be64 phbErrorStatus; /* C80 */
+ __be64 phbFirstErrorStatus; /* C88 */
+ __be64 phbErrorLog0; /* CC0 */
+ __be64 phbErrorLog1; /* CC8 */
+ __be64 phbTxeErrorStatus; /* D00 */
+ __be64 phbTxeFirstErrorStatus; /* D08 */
+ __be64 phbTxeErrorLog0; /* D40 */
+ __be64 phbTxeErrorLog1; /* D48 */
+ __be64 phbRxeArbErrorStatus; /* D80 */
+ __be64 phbRxeArbFirstErrorStatus; /* D88 */
+ __be64 phbRxeArbErrorLog0; /* DC0 */
+ __be64 phbRxeArbErrorLog1; /* DC8 */
+ __be64 phbRxeMrgErrorStatus; /* E00 */
+ __be64 phbRxeMrgFirstErrorStatus; /* E08 */
+ __be64 phbRxeMrgErrorLog0; /* E40 */
+ __be64 phbRxeMrgErrorLog1; /* E48 */
+ __be64 phbRxeTceErrorStatus; /* E80 */
+ __be64 phbRxeTceFirstErrorStatus; /* E88 */
+ __be64 phbRxeTceErrorLog0; /* EC0 */
+ __be64 phbRxeTceErrorLog1; /* EC8 */
+
+ /* PHB4 REGB Error Regs */
+ __be64 phbPblErrorStatus; /* 1900 */
+ __be64 phbPblFirstErrorStatus; /* 1908 */
+ __be64 phbPblErrorLog0; /* 1940 */
+ __be64 phbPblErrorLog1; /* 1948 */
+ __be64 phbPcieDlpErrorLog1; /* 1AA0 */
+ __be64 phbPcieDlpErrorLog2; /* 1AA8 */
+ __be64 phbPcieDlpErrorStatus; /* 1AB0 */
+ __be64 phbRegbErrorStatus; /* 1C00 */
+ __be64 phbRegbFirstErrorStatus; /* 1C08 */
+ __be64 phbRegbErrorLog0; /* 1C40 */
+ __be64 phbRegbErrorLog1; /* 1C48 */
+
+ __be64 pestA[OPAL_PHB4_NUM_PEST_REGS];
+ __be64 pestB[OPAL_PHB4_NUM_PEST_REGS];
+};
+
+enum {
+ OPAL_REINIT_CPUS_HILE_BE = (1 << 0),
+ OPAL_REINIT_CPUS_HILE_LE = (1 << 1),
+
+ /* These two define the base MMU mode of the host on P9
+ *
+ * On P9 Nimbus DD2.0 and Cumlus (and later), KVM can still
+ * create hash guests in "radix" mode with care (full core
+ * switch only).
+ */
+ OPAL_REINIT_CPUS_MMU_HASH = (1 << 2),
+ OPAL_REINIT_CPUS_MMU_RADIX = (1 << 3),
+
+ OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED = (1 << 4),
+};
+
+typedef struct oppanel_line {
+ __be64 line;
+ __be64 line_len;
+} oppanel_line_t;
+
+enum opal_prd_msg_type {
+ OPAL_PRD_MSG_TYPE_INIT = 0, /* HBRT --> OPAL */
+ OPAL_PRD_MSG_TYPE_FINI, /* HBRT/kernel --> OPAL */
+ OPAL_PRD_MSG_TYPE_ATTN, /* HBRT <-- OPAL */
+ OPAL_PRD_MSG_TYPE_ATTN_ACK, /* HBRT --> OPAL */
+ OPAL_PRD_MSG_TYPE_OCC_ERROR, /* HBRT <-- OPAL */
+ OPAL_PRD_MSG_TYPE_OCC_RESET, /* HBRT <-- OPAL */
+};
+
+struct opal_prd_msg_header {
+ uint8_t type;
+ uint8_t pad[1];
+ __be16 size;
+};
+
+struct opal_prd_msg;
+
+#define OCC_RESET 0
+#define OCC_LOAD 1
+#define OCC_THROTTLE 2
+#define OCC_MAX_THROTTLE_STATUS 5
+
+struct opal_occ_msg {
+ __be64 type;
+ __be64 chip;
+ __be64 throttle_status;
+};
+
+/*
+ * SG entries
+ *
+ * WARNING: The current implementation requires each entry
+ * to represent a block that is 4k aligned *and* each block
+ * size except the last one in the list to be as well.
+ */
+struct opal_sg_entry {
+ __be64 data;
+ __be64 length;
+};
+
+/*
+ * Candidate image SG list.
+ *
+ * length = VER | length
+ */
+struct opal_sg_list {
+ __be64 length;
+ __be64 next;
+ struct opal_sg_entry entry[];
+};
+
+/*
+ * Dump region ID range usable by the OS
+ */
+#define OPAL_DUMP_REGION_HOST_START 0x80
+#define OPAL_DUMP_REGION_LOG_BUF 0x80
+#define OPAL_DUMP_REGION_HOST_END 0xFF
+
+/* CAPI modes for PHB */
+enum {
+ OPAL_PHB_CAPI_MODE_PCIE = 0,
+ OPAL_PHB_CAPI_MODE_CAPI = 1,
+ OPAL_PHB_CAPI_MODE_SNOOP_OFF = 2,
+ OPAL_PHB_CAPI_MODE_SNOOP_ON = 3,
+ OPAL_PHB_CAPI_MODE_DMA = 4,
+ OPAL_PHB_CAPI_MODE_DMA_TVT1 = 5,
+};
+
+/* OPAL I2C request */
+struct opal_i2c_request {
+ uint8_t type;
+#define OPAL_I2C_RAW_READ 0
+#define OPAL_I2C_RAW_WRITE 1
+#define OPAL_I2C_SM_READ 2
+#define OPAL_I2C_SM_WRITE 3
+ uint8_t flags;
+#define OPAL_I2C_ADDR_10 0x01 /* Not supported yet */
+ uint8_t subaddr_sz; /* Max 4 */
+ uint8_t reserved;
+ __be16 addr; /* 7 or 10 bit address */
+ __be16 reserved2;
+ __be32 subaddr; /* Sub-address if any */
+ __be32 size; /* Data size */
+ __be64 buffer_ra; /* Buffer real address */
+};
+
+/*
+ * EPOW status sharing (OPAL and the host)
+ *
+ * The host will pass on OPAL, a buffer of length OPAL_SYSEPOW_MAX
+ * with individual elements being 16 bits wide to fetch the system
+ * wide EPOW status. Each element in the buffer will contain the
+ * EPOW status in it's bit representation for a particular EPOW sub
+ * class as defined here. So multiple detailed EPOW status bits
+ * specific for any sub class can be represented in a single buffer
+ * element as it's bit representation.
+ */
+
+/* System EPOW type */
+enum OpalSysEpow {
+ OPAL_SYSEPOW_POWER = 0, /* Power EPOW */
+ OPAL_SYSEPOW_TEMP = 1, /* Temperature EPOW */
+ OPAL_SYSEPOW_COOLING = 2, /* Cooling EPOW */
+ OPAL_SYSEPOW_MAX = 3, /* Max EPOW categories */
+};
+
+/* Power EPOW */
+enum OpalSysPower {
+ OPAL_SYSPOWER_UPS = 0x0001, /* System on UPS power */
+ OPAL_SYSPOWER_CHNG = 0x0002, /* System power config change */
+ OPAL_SYSPOWER_FAIL = 0x0004, /* System impending power failure */
+ OPAL_SYSPOWER_INCL = 0x0008, /* System incomplete power */
+};
+
+/* Temperature EPOW */
+enum OpalSysTemp {
+ OPAL_SYSTEMP_AMB = 0x0001, /* System over ambient temperature */
+ OPAL_SYSTEMP_INT = 0x0002, /* System over internal temperature */
+ OPAL_SYSTEMP_HMD = 0x0004, /* System over ambient humidity */
+};
+
+/* Cooling EPOW */
+enum OpalSysCooling {
+ OPAL_SYSCOOL_INSF = 0x0001, /* System insufficient cooling */
+};
+
+/* Argument to OPAL_CEC_REBOOT2() */
+enum {
+ OPAL_REBOOT_NORMAL = 0,
+ OPAL_REBOOT_PLATFORM_ERROR = 1,
+ OPAL_REBOOT_FULL_IPL = 2,
+ OPAL_REBOOT_MPIPL = 3,
+ OPAL_REBOOT_FAST = 4,
+};
+
+/* Argument to OPAL_PCI_TCE_KILL */
+enum {
+ OPAL_PCI_TCE_KILL_PAGES,
+ OPAL_PCI_TCE_KILL_PE,
+ OPAL_PCI_TCE_KILL_ALL,
+};
+
+/* The xive operation mode indicates the active "API" and
+ * corresponds to the "mode" parameter of the opal_xive_reset()
+ * call
+ */
+enum {
+ OPAL_XIVE_MODE_EMU = 0,
+ OPAL_XIVE_MODE_EXPL = 1,
+};
+
+/* Flags for OPAL_XIVE_GET_IRQ_INFO */
+enum {
+ OPAL_XIVE_IRQ_TRIGGER_PAGE = 0x00000001,
+ OPAL_XIVE_IRQ_STORE_EOI = 0x00000002,
+ OPAL_XIVE_IRQ_LSI = 0x00000004,
+ OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008, /* P9 DD1.0 workaround */
+ OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010, /* P9 DD1.0 workaround */
+ OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020, /* P9 DD1.0 workaround */
+ OPAL_XIVE_IRQ_STORE_EOI2 = 0x00000040,
+};
+
+/* Flags for OPAL_XIVE_GET/SET_QUEUE_INFO */
+enum {
+ OPAL_XIVE_EQ_ENABLED = 0x00000001,
+ OPAL_XIVE_EQ_ALWAYS_NOTIFY = 0x00000002,
+ OPAL_XIVE_EQ_ESCALATE = 0x00000004,
+};
+
+/* Flags for OPAL_XIVE_GET/SET_VP_INFO */
+enum {
+ OPAL_XIVE_VP_ENABLED = 0x00000001,
+ OPAL_XIVE_VP_SINGLE_ESCALATION = 0x00000002,
+};
+
+/* "Any chip" replacement for chip ID for allocation functions */
+enum {
+ OPAL_XIVE_ANY_CHIP = 0xffffffff,
+};
+
+/* Xive sync options */
+enum {
+ /* This bits are cumulative, arg is a girq */
+ XIVE_SYNC_EAS = 0x00000001, /* Sync irq source */
+ XIVE_SYNC_QUEUE = 0x00000002, /* Sync irq target */
+};
+
+/* Dump options */
+enum {
+ XIVE_DUMP_TM_HYP = 0,
+ XIVE_DUMP_TM_POOL = 1,
+ XIVE_DUMP_TM_OS = 2,
+ XIVE_DUMP_TM_USER = 3,
+ XIVE_DUMP_VP = 4,
+ XIVE_DUMP_EMU_STATE = 5,
+};
+
+/* "type" argument options for OPAL_IMC_COUNTERS_* calls */
+enum {
+ OPAL_IMC_COUNTERS_NEST = 1,
+ OPAL_IMC_COUNTERS_CORE = 2,
+ OPAL_IMC_COUNTERS_TRACE = 3,
+};
+
+
+/* PCI p2p descriptor */
+#define OPAL_PCI_P2P_ENABLE 0x1
+#define OPAL_PCI_P2P_LOAD 0x2
+#define OPAL_PCI_P2P_STORE 0x4
+
+/* MPIPL update operations */
+enum opal_mpipl_ops {
+ OPAL_MPIPL_ADD_RANGE = 0,
+ OPAL_MPIPL_REMOVE_RANGE = 1,
+ OPAL_MPIPL_REMOVE_ALL = 2,
+ OPAL_MPIPL_FREE_PRESERVED_MEMORY = 3,
+};
+
+/* Tag will point to various metadata area. Kernel will
+ * use tag to get metadata value.
+ */
+enum opal_mpipl_tags {
+ OPAL_MPIPL_TAG_CPU = 0,
+ OPAL_MPIPL_TAG_OPAL = 1,
+ OPAL_MPIPL_TAG_KERNEL = 2,
+ OPAL_MPIPL_TAG_BOOT_MEM = 3,
+};
+
+/* Preserved memory details */
+struct opal_mpipl_region {
+ __be64 src;
+ __be64 dest;
+ __be64 size;
+};
+
+/* Structure version */
+#define OPAL_MPIPL_VERSION 0x01
+
+struct opal_mpipl_fadump {
+ u8 version;
+ u8 reserved[7];
+ __be32 crashing_pir; /* OPAL crashing CPU PIR */
+ __be32 cpu_data_version;
+ __be32 cpu_data_size;
+ __be32 region_cnt;
+ struct opal_mpipl_region region[];
+} __packed;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __OPAL_API_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
new file mode 100644
index 000000000..726125a53
--- /dev/null
+++ b/arch/powerpc/include/asm/opal.h
@@ -0,0 +1,396 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PowerNV OPAL definitions.
+ *
+ * Copyright 2011 IBM Corp.
+ */
+
+#ifndef _ASM_POWERPC_OPAL_H
+#define _ASM_POWERPC_OPAL_H
+
+#include <asm/opal-api.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/notifier.h>
+
+/* We calculate number of sg entries based on PAGE_SIZE */
+#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
+
+/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */
+#define OPAL_BUSY_DELAY_MS 10
+
+/* /sys/firmware/opal */
+extern struct kobject *opal_kobj;
+
+/* /ibm,opal */
+extern struct device_node *opal_node;
+
+/* API functions */
+int64_t opal_invalid_call(void);
+int64_t opal_npu_map_lpar(uint64_t phb_id, uint64_t bdf, uint64_t lparid,
+ uint64_t lpcr);
+int64_t opal_npu_spa_setup(uint64_t phb_id, uint32_t bdfn,
+ uint64_t addr, uint64_t PE_mask);
+int64_t opal_npu_spa_clear_cache(uint64_t phb_id, uint32_t bdfn,
+ uint64_t PE_handle);
+int64_t opal_npu_tl_set(uint64_t phb_id, uint32_t bdfn, long cap,
+ uint64_t rate_phys, uint32_t size);
+
+int64_t opal_console_write(int64_t term_number, __be64 *length,
+ const uint8_t *buffer);
+int64_t opal_console_read(int64_t term_number, __be64 *length,
+ uint8_t *buffer);
+int64_t opal_console_write_buffer_space(int64_t term_number,
+ __be64 *length);
+int64_t opal_console_flush(int64_t term_number);
+int64_t opal_rtc_read(__be32 *year_month_day,
+ __be64 *hour_minute_second_millisecond);
+int64_t opal_rtc_write(uint32_t year_month_day,
+ uint64_t hour_minute_second_millisecond);
+int64_t opal_tpo_read(uint64_t token, __be32 *year_mon_day, __be32 *hour_min);
+int64_t opal_tpo_write(uint64_t token, uint32_t year_mon_day,
+ uint32_t hour_min);
+int64_t opal_cec_power_down(uint64_t request);
+int64_t opal_cec_reboot(void);
+int64_t opal_cec_reboot2(uint32_t reboot_type, const char *diag);
+int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
+int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
+int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask);
+int64_t opal_poll_events(__be64 *outstanding_event_mask);
+int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id, uint64_t tce_mem_addr,
+ uint64_t tce_mem_size);
+int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr,
+ uint64_t tce_mem_size);
+int64_t opal_pci_config_read_byte(uint64_t phb_id, uint64_t bus_dev_func,
+ uint64_t offset, uint8_t *data);
+int64_t opal_pci_config_read_half_word(uint64_t phb_id, uint64_t bus_dev_func,
+ uint64_t offset, __be16 *data);
+int64_t opal_pci_config_read_word(uint64_t phb_id, uint64_t bus_dev_func,
+ uint64_t offset, __be32 *data);
+int64_t opal_pci_config_write_byte(uint64_t phb_id, uint64_t bus_dev_func,
+ uint64_t offset, uint8_t data);
+int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func,
+ uint64_t offset, uint16_t data);
+int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func,
+ uint64_t offset, uint32_t data);
+int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority);
+int64_t opal_get_xive(uint32_t isn, __be16 *server, uint8_t *priority);
+int64_t opal_register_exception_handler(uint64_t opal_exception,
+ uint64_t handler_address,
+ uint64_t glue_cache_line);
+int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number,
+ uint8_t *freeze_state,
+ __be16 *pci_error_type,
+ __be64 *phb_status);
+int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number,
+ uint64_t eeh_action_token);
+int64_t opal_pci_eeh_freeze_set(uint64_t phb_id, uint64_t pe_number,
+ uint64_t eeh_action_token);
+int64_t opal_pci_err_inject(uint64_t phb_id, uint32_t pe_no, uint32_t type,
+ uint32_t func, uint64_t addr, uint64_t mask);
+int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state);
+
+
+
+int64_t opal_pci_phb_mmio_enable(uint64_t phb_id, uint16_t window_type,
+ uint16_t window_num, uint16_t enable);
+int64_t opal_pci_set_phb_mem_window(uint64_t phb_id, uint16_t window_type,
+ uint16_t window_num,
+ uint64_t starting_real_address,
+ uint64_t starting_pci_address,
+ uint64_t size);
+int64_t opal_pci_map_pe_mmio_window(uint64_t phb_id, uint16_t pe_number,
+ uint16_t window_type, uint16_t window_num,
+ uint16_t segment_num);
+int64_t opal_pci_set_phb_table_memory(uint64_t phb_id, uint64_t rtt_addr,
+ uint64_t ivt_addr, uint64_t ivt_len,
+ uint64_t reject_array_addr,
+ uint64_t peltv_addr);
+int64_t opal_pci_set_pe(uint64_t phb_id, uint64_t pe_number, uint64_t bus_dev_func,
+ uint8_t bus_compare, uint8_t dev_compare, uint8_t func_compare,
+ uint8_t pe_action);
+int64_t opal_pci_set_peltv(uint64_t phb_id, uint32_t parent_pe, uint32_t child_pe,
+ uint8_t state);
+int64_t opal_pci_set_mve(uint64_t phb_id, uint32_t mve_number, uint32_t pe_number);
+int64_t opal_pci_set_mve_enable(uint64_t phb_id, uint32_t mve_number,
+ uint32_t state);
+int64_t opal_pci_get_xive_reissue(uint64_t phb_id, uint32_t xive_number,
+ uint8_t *p_bit, uint8_t *q_bit);
+int64_t opal_pci_set_xive_reissue(uint64_t phb_id, uint32_t xive_number,
+ uint8_t p_bit, uint8_t q_bit);
+int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq);
+int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,
+ uint32_t xive_num);
+int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num,
+ __be32 *interrupt_source_number);
+int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num,
+ uint8_t msi_range, __be32 *msi_address,
+ __be32 *message_data);
+int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number,
+ uint32_t xive_num, uint8_t msi_range,
+ __be64 *msi_address, __be32 *message_data);
+int64_t opal_start_cpu(uint64_t thread_number, uint64_t start_address);
+int64_t opal_query_cpu_status(uint64_t thread_number, uint8_t *thread_status);
+int64_t opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines);
+int64_t opal_pci_map_pe_dma_window(uint64_t phb_id, uint16_t pe_number, uint16_t window_id,
+ uint16_t tce_levels, uint64_t tce_table_addr,
+ uint64_t tce_table_size, uint64_t tce_page_size);
+int64_t opal_pci_map_pe_dma_window_real(uint64_t phb_id, uint16_t pe_number,
+ uint16_t dma_window_number, uint64_t pci_start_addr,
+ uint64_t pci_mem_size);
+int64_t opal_pci_reset(uint64_t id, uint8_t reset_scope, uint8_t assert_state);
+
+int64_t opal_pci_get_hub_diag_data(uint64_t hub_id, void *diag_buffer,
+ uint64_t diag_buffer_len);
+int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer,
+ uint64_t diag_buffer_len);
+int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer,
+ uint64_t diag_buffer_len);
+int64_t opal_pci_fence_phb(uint64_t phb_id);
+int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data);
+int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
+int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
+int64_t opal_get_epow_status(__be16 *epow_status, __be16 *num_epow_classes);
+int64_t opal_get_dpo_status(__be64 *dpo_timeout);
+int64_t opal_set_system_attention_led(uint8_t led_action);
+int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe,
+ __be16 *pci_error_type, __be16 *severity);
+int64_t opal_pci_poll(uint64_t id);
+int64_t opal_return_cpu(void);
+int64_t opal_check_token(uint64_t token);
+int64_t opal_reinit_cpus(uint64_t flags);
+
+int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val);
+int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val);
+
+int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+ uint32_t addr, uint32_t data, uint32_t sz);
+int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+ uint32_t addr, __be32 *data, uint32_t sz);
+
+int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id);
+int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type);
+int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset);
+int64_t opal_send_ack_elog(uint64_t log_id);
+void opal_resend_pending_logs(void);
+
+int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
+int64_t opal_manage_flash(uint8_t op);
+int64_t opal_update_flash(uint64_t blk_list);
+int64_t opal_dump_init(uint8_t dump_type);
+int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size);
+int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type);
+int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer);
+int64_t opal_dump_ack(uint32_t dump_id);
+int64_t opal_dump_resend_notification(void);
+
+int64_t opal_get_msg(uint64_t buffer, uint64_t size);
+int64_t opal_write_oppanel_async(uint64_t token, oppanel_line_t *lines,
+ uint64_t num_lines);
+int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token);
+int64_t opal_sync_host_reboot(void);
+int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
+ uint64_t length);
+int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
+ uint64_t length);
+int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
+int64_t opal_sensor_read_u64(u32 sensor_hndl, int token, __be64 *sensor_data);
+int64_t opal_handle_hmi(void);
+int64_t opal_handle_hmi2(__be64 *out_flags);
+int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end);
+int64_t opal_unregister_dump_region(uint32_t id);
+int64_t opal_slw_set_reg(uint64_t cpu_pir, uint64_t sprn, uint64_t val);
+int64_t opal_config_cpu_idle_state(uint64_t state, uint64_t flag);
+int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number);
+int64_t opal_pci_get_pbcq_tunnel_bar(uint64_t phb_id, uint64_t *addr);
+int64_t opal_pci_set_pbcq_tunnel_bar(uint64_t phb_id, uint64_t addr);
+int64_t opal_ipmi_send(uint64_t interface, struct opal_ipmi_msg *msg,
+ uint64_t msg_len);
+int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg,
+ uint64_t *msg_len);
+int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id,
+ struct opal_i2c_request *oreq);
+int64_t opal_prd_msg(struct opal_prd_msg *msg);
+int64_t opal_leds_get_ind(char *loc_code, __be64 *led_mask,
+ __be64 *led_value, __be64 *max_led_type);
+int64_t opal_leds_set_ind(uint64_t token, char *loc_code, const u64 led_mask,
+ const u64 led_value, __be64 *max_led_type);
+
+int64_t opal_flash_read(uint64_t id, uint64_t offset, uint64_t buf,
+ uint64_t size, uint64_t token);
+int64_t opal_flash_write(uint64_t id, uint64_t offset, uint64_t buf,
+ uint64_t size, uint64_t token);
+int64_t opal_flash_erase(uint64_t id, uint64_t offset, uint64_t size,
+ uint64_t token);
+int64_t opal_get_device_tree(uint32_t phandle, uint64_t buf, uint64_t len);
+int64_t opal_pci_get_presence_state(uint64_t id, uint64_t data);
+int64_t opal_pci_get_power_state(uint64_t id, uint64_t data);
+int64_t opal_pci_set_power_state(uint64_t async_token, uint64_t id,
+ uint64_t data);
+int64_t opal_pci_poll2(uint64_t id, uint64_t data);
+
+int64_t opal_int_get_xirr(uint32_t *out_xirr, bool just_poll);
+int64_t opal_int_set_cppr(uint8_t cppr);
+int64_t opal_int_eoi(uint32_t xirr);
+int64_t opal_int_set_mfrr(uint32_t cpu, uint8_t mfrr);
+int64_t opal_pci_tce_kill(uint64_t phb_id, uint32_t kill_type,
+ uint32_t pe_num, uint32_t tce_size,
+ uint64_t dma_addr, uint32_t npages);
+int64_t opal_nmmu_set_ptcr(uint64_t chip_id, uint64_t ptcr);
+int64_t opal_xive_reset(uint64_t version);
+int64_t opal_xive_get_irq_info(uint32_t girq,
+ __be64 *out_flags,
+ __be64 *out_eoi_page,
+ __be64 *out_trig_page,
+ __be32 *out_esb_shift,
+ __be32 *out_src_chip);
+int64_t opal_xive_get_irq_config(uint32_t girq, __be64 *out_vp,
+ uint8_t *out_prio, __be32 *out_lirq);
+int64_t opal_xive_set_irq_config(uint32_t girq, uint64_t vp, uint8_t prio,
+ uint32_t lirq);
+int64_t opal_xive_get_queue_info(uint64_t vp, uint32_t prio,
+ __be64 *out_qpage,
+ __be64 *out_qsize,
+ __be64 *out_qeoi_page,
+ __be32 *out_escalate_irq,
+ __be64 *out_qflags);
+int64_t opal_xive_set_queue_info(uint64_t vp, uint32_t prio,
+ uint64_t qpage,
+ uint64_t qsize,
+ uint64_t qflags);
+int64_t opal_xive_donate_page(uint32_t chip_id, uint64_t addr);
+int64_t opal_xive_alloc_vp_block(uint32_t alloc_order);
+int64_t opal_xive_free_vp_block(uint64_t vp);
+int64_t opal_xive_get_vp_info(uint64_t vp,
+ __be64 *out_flags,
+ __be64 *out_cam_value,
+ __be64 *out_report_cl_pair,
+ __be32 *out_chip_id);
+int64_t opal_xive_set_vp_info(uint64_t vp,
+ uint64_t flags,
+ uint64_t report_cl_pair);
+int64_t opal_xive_allocate_irq_raw(uint32_t chip_id);
+int64_t opal_xive_free_irq(uint32_t girq);
+int64_t opal_xive_sync(uint32_t type, uint32_t id);
+int64_t opal_xive_dump(uint32_t type, uint32_t id);
+int64_t opal_xive_get_queue_state(uint64_t vp, uint32_t prio,
+ __be32 *out_qtoggle,
+ __be32 *out_qindex);
+int64_t opal_xive_set_queue_state(uint64_t vp, uint32_t prio,
+ uint32_t qtoggle,
+ uint32_t qindex);
+int64_t opal_xive_get_vp_state(uint64_t vp, __be64 *out_w01);
+
+int64_t opal_imc_counters_init(uint32_t type, uint64_t address,
+ uint64_t cpu_pir);
+int64_t opal_imc_counters_start(uint32_t type, uint64_t cpu_pir);
+int64_t opal_imc_counters_stop(uint32_t type, uint64_t cpu_pir);
+
+int opal_get_powercap(u32 handle, int token, u32 *pcap);
+int opal_set_powercap(u32 handle, int token, u32 pcap);
+int opal_get_power_shift_ratio(u32 handle, int token, u32 *psr);
+int opal_set_power_shift_ratio(u32 handle, int token, u32 psr);
+int opal_sensor_group_clear(u32 group_hndl, int token);
+int opal_sensor_group_enable(u32 group_hndl, int token, bool enable);
+int opal_nx_coproc_init(uint32_t chip_id, uint32_t ct);
+
+int opal_secvar_get(const char *key, uint64_t key_len, u8 *data,
+ uint64_t *data_size);
+int opal_secvar_get_next(const char *key, uint64_t *key_len,
+ uint64_t key_buf_size);
+int opal_secvar_enqueue_update(const char *key, uint64_t key_len, u8 *data,
+ uint64_t data_size);
+
+s64 opal_mpipl_update(enum opal_mpipl_ops op, u64 src, u64 dest, u64 size);
+s64 opal_mpipl_register_tag(enum opal_mpipl_tags tag, u64 addr);
+s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, __be64 *addr);
+
+s64 opal_signal_system_reset(s32 cpu);
+s64 opal_quiesce(u64 shutdown_type, s32 cpu);
+
+/* Internal functions */
+extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
+ int depth, void *data);
+extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
+ const char *uname, int depth, void *data);
+void __init opal_configure_cores(void);
+
+extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
+extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
+extern int opal_put_chars_atomic(uint32_t vtermno, const char *buf, int total_len);
+extern int opal_flush_chars(uint32_t vtermno, bool wait);
+extern int opal_flush_console(uint32_t vtermno);
+
+extern void hvc_opal_init_early(void);
+
+extern int opal_message_notifier_register(enum opal_msg_type msg_type,
+ struct notifier_block *nb);
+extern int opal_message_notifier_unregister(enum opal_msg_type msg_type,
+ struct notifier_block *nb);
+
+extern int opal_async_get_token_interruptible(void);
+extern int opal_async_release_token(int token);
+extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
+extern int opal_async_wait_response_interruptible(uint64_t token,
+ struct opal_msg *msg);
+extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
+extern int opal_get_sensor_data_u64(u32 sensor_hndl, u64 *sensor_data);
+extern int sensor_group_enable(u32 grp_hndl, bool enable);
+
+struct rtc_time;
+extern time64_t opal_get_boot_time(void);
+extern void opal_nvram_init(void);
+extern void opal_flash_update_init(void);
+extern void opal_flash_update_print_message(void);
+extern int opal_elog_init(void);
+extern void opal_platform_dump_init(void);
+extern void opal_sys_param_init(void);
+extern void opal_msglog_init(void);
+extern void opal_msglog_sysfs_init(void);
+extern int opal_async_comp_init(void);
+extern int opal_sensor_init(void);
+extern int opal_hmi_handler_init(void);
+extern int opal_event_init(void);
+int opal_power_control_init(void);
+
+extern int opal_machine_check(struct pt_regs *regs);
+extern bool opal_mce_check_early_recovery(struct pt_regs *regs);
+extern int opal_hmi_exception_early(struct pt_regs *regs);
+extern int opal_hmi_exception_early2(struct pt_regs *regs);
+extern int opal_handle_hmi_exception(struct pt_regs *regs);
+
+extern void opal_shutdown(void);
+extern int opal_resync_timebase(void);
+
+extern void opal_lpc_init(void);
+
+extern void opal_kmsg_init(void);
+
+extern int opal_event_request(unsigned int opal_event_nr);
+
+struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
+ unsigned long vmalloc_size);
+void opal_free_sg_list(struct opal_sg_list *sg);
+
+extern int opal_error_code(int rc);
+
+ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count);
+
+static inline int opal_get_async_rc(struct opal_msg msg)
+{
+ if (msg.msg_type != OPAL_MSG_ASYNC_COMP)
+ return OPAL_PARAMETER;
+ else
+ return be64_to_cpu(msg.params[1]);
+}
+
+void opal_wake_poller(void);
+
+void opal_powercap_init(void);
+void opal_psr_init(void);
+void opal_sensor_groups_init(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_OPAL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
new file mode 100644
index 000000000..183b5a251
--- /dev/null
+++ b/arch/powerpc/include/asm/paca.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * This control block defines the PACA which defines the processor
+ * specific data for each logical processor on the system.
+ * There are some pointers defined that are utilized by PLIC.
+ *
+ * C 2001 PPC 64 Team, IBM Corp
+ */
+#ifndef _ASM_POWERPC_PACA_H
+#define _ASM_POWERPC_PACA_H
+#ifdef __KERNEL__
+
+#ifdef CONFIG_PPC64
+
+#include <linux/cache.h>
+#include <linux/string.h>
+#include <asm/types.h>
+#include <asm/mmu.h>
+#include <asm/page.h>
+#ifdef CONFIG_PPC_BOOK3E_64
+#include <asm/exception-64e.h>
+#else
+#include <asm/exception-64s.h>
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/kvm_book3s_asm.h>
+#endif
+#include <asm/accounting.h>
+#include <asm/hmi.h>
+#include <asm/cpuidle.h>
+#include <asm/atomic.h>
+#include <asm/mce.h>
+
+#include <asm-generic/mmiowb_types.h>
+
+register struct paca_struct *local_paca asm("r13");
+
+#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP)
+extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
+/*
+ * Add standard checks that preemption cannot occur when using get_paca():
+ * otherwise the paca_struct it points to may be the wrong one just after.
+ */
+#define get_paca() ((void) debug_smp_processor_id(), local_paca)
+#else
+#define get_paca() local_paca
+#endif
+
+#define get_slb_shadow() (get_paca()->slb_shadow_ptr)
+
+struct task_struct;
+struct rtas_args;
+struct lppaca;
+
+/*
+ * Defines the layout of the paca.
+ *
+ * This structure is not directly accessed by firmware or the service
+ * processor.
+ */
+struct paca_struct {
+#ifdef CONFIG_PPC_PSERIES
+ /*
+ * Because hw_cpu_id, unlike other paca fields, is accessed
+ * routinely from other CPUs (from the IRQ code), we stick to
+ * read-only (after boot) fields in the first cacheline to
+ * avoid cacheline bouncing.
+ */
+
+ struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
+#endif /* CONFIG_PPC_PSERIES */
+
+ /*
+ * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c
+ * load lock_token and paca_index with a single lwz
+ * instruction. They must travel together and be properly
+ * aligned.
+ */
+#ifdef __BIG_ENDIAN__
+ u16 lock_token; /* Constant 0x8000, used in locks */
+ u16 paca_index; /* Logical processor number */
+#else
+ u16 paca_index; /* Logical processor number */
+ u16 lock_token; /* Constant 0x8000, used in locks */
+#endif
+
+ u64 kernel_toc; /* Kernel TOC address */
+ u64 kernelbase; /* Base address of kernel */
+ u64 kernel_msr; /* MSR while running in kernel */
+ void *emergency_sp; /* pointer to emergency stack */
+ u64 data_offset; /* per cpu data offset */
+ s16 hw_cpu_id; /* Physical processor number */
+ u8 cpu_start; /* At startup, processor spins until */
+ /* this becomes non-zero. */
+ u8 kexec_state; /* set when kexec down has irqs off */
+#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ struct slb_shadow *slb_shadow_ptr;
+#endif
+ struct dtl_entry *dispatch_log;
+ struct dtl_entry *dispatch_log_end;
+#endif
+ u64 dscr_default; /* per-CPU default DSCR */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * Now, starting in cacheline 2, the exception save areas
+ */
+ /* used for most interrupts/exceptions */
+ u64 exgen[EX_SIZE] __attribute__((aligned(0x80)));
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ /* SLB related definitions */
+ u16 vmalloc_sllp;
+ u8 slb_cache_ptr;
+ u8 stab_rr; /* stab/slb round-robin counter */
+#ifdef CONFIG_DEBUG_VM
+ u8 in_kernel_slb_handler;
+#endif
+ u32 slb_used_bitmap; /* Bitmaps for first 32 SLB entries. */
+ u32 slb_kern_bitmap;
+ u32 slb_cache[SLB_CACHE_ENTRIES];
+#endif
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+#ifdef CONFIG_PPC_BOOK3E_64
+ u64 exgen[8] __aligned(0x40);
+ /* Keep pgd in the same cacheline as the start of extlb */
+ pgd_t *pgd __aligned(0x40); /* Current PGD */
+ pgd_t *kernel_pgd; /* Kernel PGD */
+
+ /* Shared by all threads of a core -- points to tcd of first thread */
+ struct tlb_core_data *tcd_ptr;
+
+ /*
+ * We can have up to 3 levels of reentrancy in the TLB miss handler,
+ * in each of four exception levels (normal, crit, mcheck, debug).
+ */
+ u64 extlb[12][EX_TLB_SIZE / sizeof(u64)];
+ u64 exmc[8]; /* used for machine checks */
+ u64 excrit[8]; /* used for crit interrupts */
+ u64 exdbg[8]; /* used for debug interrupts */
+
+ /* Kernel stack pointers for use by special exceptions */
+ void *mc_kstack;
+ void *crit_kstack;
+ void *dbg_kstack;
+
+ struct tlb_core_data tcd;
+#endif /* CONFIG_PPC_BOOK3E_64 */
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
+ unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
+#endif
+
+ /*
+ * then miscellaneous read-write fields
+ */
+ struct task_struct *__current; /* Pointer to current */
+ u64 kstack; /* Saved Kernel stack addr */
+ u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
+ u64 saved_msr; /* MSR saved here by enter_rtas */
+#ifdef CONFIG_PPC64
+ u64 exit_save_r1; /* Syscall/interrupt R1 save */
+#endif
+#ifdef CONFIG_PPC_BOOK3E_64
+ u16 trap_save; /* Used when bad stack is encountered */
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ u8 hsrr_valid; /* HSRRs set for HRFID */
+ u8 srr_valid; /* SRRs set for RFID */
+#endif
+ u8 irq_soft_mask; /* mask for irq soft masking */
+ u8 irq_happened; /* irq happened while soft-disabled */
+ u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ u8 pmcregs_in_use; /* pseries puts this in lppaca */
+#endif
+ u64 sprg_vdso; /* Saved user-visible sprg */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u64 tm_scratch; /* TM scratch area for reclaim */
+#endif
+
+#ifdef CONFIG_PPC_POWERNV
+ /* PowerNV idle fields */
+ /* PNV_CORE_IDLE_* bits, all siblings work on thread 0 paca */
+ unsigned long idle_state;
+ union {
+ /* P7/P8 specific fields */
+ struct {
+ /* PNV_THREAD_RUNNING/NAP/SLEEP */
+ u8 thread_idle_state;
+ /* Mask to denote subcore sibling threads */
+ u8 subcore_sibling_mask;
+ };
+
+ /* P9 specific fields */
+ struct {
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* The PSSCR value that the kernel requested before going to stop */
+ u64 requested_psscr;
+ /* Flag to request this thread not to stop */
+ atomic_t dont_stop;
+#endif
+ };
+ };
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* Non-maskable exceptions that are not performance critical */
+ u64 exnmi[EX_SIZE]; /* used for system reset (nmi) */
+ u64 exmc[EX_SIZE]; /* used for machine checks */
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* Exclusive stacks for system reset and machine check exception. */
+ void *nmi_emergency_sp;
+ void *mc_emergency_sp;
+
+ u16 in_nmi; /* In nmi handler */
+
+ /*
+ * Flag to check whether we are in machine check early handler
+ * and already using emergency stack.
+ */
+ u16 in_mce;
+ u8 hmi_event_available; /* HMI event is available */
+ u8 hmi_p9_special_emu; /* HMI P9 special emulation */
+ u32 hmi_irqs; /* HMI irq stat */
+#endif
+ u8 ftrace_enabled; /* Hard disable ftrace */
+
+ /* Stuff for accurate time accounting */
+ struct cpu_accounting_data accounting;
+ u64 dtl_ridx; /* read index in dispatch log */
+ struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
+
+#ifdef CONFIG_KVM_BOOK3S_HANDLER
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ /* We use this to store guest state in */
+ struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
+#endif
+ struct kvmppc_host_state kvm_hstate;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /*
+ * Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
+ * more details
+ */
+ struct sibling_subcore_state *sibling_subcore_state;
+#endif
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * rfi fallback flush must be in its own cacheline to prevent
+ * other paca data leaking into the L1d
+ */
+ u64 exrfi[EX_SIZE] __aligned(0x80);
+ void *rfi_flush_fallback_area;
+ u64 l1d_flush_size;
+#endif
+#ifdef CONFIG_PPC_PSERIES
+ u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */
+#endif /* CONFIG_PPC_PSERIES */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ /* Capture SLB related old contents in MCE handler. */
+ struct slb_entry *mce_faulty_slbs;
+ u16 slb_save_cache_ptr;
+#endif
+#endif /* CONFIG_PPC_BOOK3S_64 */
+#ifdef CONFIG_STACKPROTECTOR
+ unsigned long canary;
+#endif
+#ifdef CONFIG_MMIOWB
+ struct mmiowb_state mmiowb_state;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct mce_info *mce_info;
+ u8 mce_pending_irq_work;
+#endif /* CONFIG_PPC_BOOK3S_64 */
+} ____cacheline_aligned;
+
+extern void copy_mm_to_paca(struct mm_struct *mm);
+extern struct paca_struct **paca_ptrs;
+extern void initialise_paca(struct paca_struct *new_paca, int cpu);
+extern void setup_paca(struct paca_struct *new_paca);
+extern void allocate_paca_ptrs(void);
+extern void allocate_paca(int cpu);
+extern void free_unused_pacas(void);
+
+#else /* CONFIG_PPC64 */
+
+static inline void allocate_paca(int cpu) { }
+static inline void free_unused_pacas(void) { }
+
+#endif /* CONFIG_PPC64 */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PACA_H */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
new file mode 100644
index 000000000..edf1dd1b0
--- /dev/null
+++ b/arch/powerpc/include/asm/page.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_PAGE_H
+#define _ASM_POWERPC_PAGE_H
+
+/*
+ * Copyright (C) 2001,2005 IBM Corporation.
+ */
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <linux/kernel.h>
+#else
+#include <asm/types.h>
+#endif
+#include <asm/asm-const.h>
+
+/*
+ * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
+ * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software
+ * page size. When using 64K pages however, whether we are really supporting
+ * 64K pages in HW or not is irrelevant to those definitions.
+ */
+#define PAGE_SHIFT CONFIG_PPC_PAGE_SHIFT
+#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+#ifndef CONFIG_HUGETLB_PAGE
+#define HPAGE_SHIFT PAGE_SHIFT
+#elif defined(CONFIG_PPC_BOOK3S_64)
+extern unsigned int hpage_shift;
+#define HPAGE_SHIFT hpage_shift
+#elif defined(CONFIG_PPC_8xx)
+#define HPAGE_SHIFT 19 /* 512k pages */
+#elif defined(CONFIG_PPC_E500)
+#define HPAGE_SHIFT 22 /* 4M pages */
+#endif
+#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
+#endif
+
+/*
+ * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
+ * assign PAGE_MASK to a larger type it gets extended the way we want
+ * (i.e. with 1s in the high bits)
+ */
+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+
+/*
+ * KERNELBASE is the virtual address of the start of the kernel, it's often
+ * the same as PAGE_OFFSET, but _might not be_.
+ *
+ * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
+ *
+ * PAGE_OFFSET is the virtual address of the start of lowmem.
+ *
+ * PHYSICAL_START is the physical address of the start of the kernel.
+ *
+ * MEMORY_START is the physical address of the start of lowmem.
+ *
+ * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
+ * ppc32 and based on how they are set we determine MEMORY_START.
+ *
+ * For the linear mapping the following equation should be true:
+ * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
+ *
+ * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
+ *
+ * There are two ways to determine a physical address from a virtual one:
+ * va = pa + PAGE_OFFSET - MEMORY_START
+ * va = pa + KERNELBASE - PHYSICAL_START
+ *
+ * If you want to know something's offset from the start of the kernel you
+ * should subtract KERNELBASE.
+ *
+ * If you want to test if something's a kernel address, use is_kernel_addr().
+ */
+
+#define KERNELBASE ASM_CONST(CONFIG_KERNEL_START)
+#define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
+#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
+
+#if defined(CONFIG_NONSTATIC_KERNEL)
+#ifndef __ASSEMBLY__
+
+extern phys_addr_t memstart_addr;
+extern phys_addr_t kernstart_addr;
+
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
+extern long long virt_phys_offset;
+#endif
+
+#endif /* __ASSEMBLY__ */
+#define PHYSICAL_START kernstart_addr
+
+#else /* !CONFIG_NONSTATIC_KERNEL */
+#define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
+#endif
+
+/* See Description below for VIRT_PHYS_OFFSET */
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+#ifdef CONFIG_RELOCATABLE
+#define VIRT_PHYS_OFFSET virt_phys_offset
+#else
+#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
+#endif
+#endif
+
+#ifdef CONFIG_PPC64
+#define MEMORY_START 0UL
+#elif defined(CONFIG_NONSTATIC_KERNEL)
+#define MEMORY_START memstart_addr
+#else
+#define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
+#endif
+
+#ifdef CONFIG_FLATMEM
+#define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
+#ifndef __ASSEMBLY__
+extern unsigned long max_mapnr;
+static inline bool pfn_valid(unsigned long pfn)
+{
+ unsigned long min_pfn = ARCH_PFN_OFFSET;
+
+ return pfn >= min_pfn && pfn < max_mapnr;
+}
+#endif
+#endif
+
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+#define virt_addr_valid(vaddr) ({ \
+ unsigned long _addr = (unsigned long)vaddr; \
+ _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
+ pfn_valid(virt_to_pfn(_addr)); \
+})
+
+/*
+ * On Book-E parts we need __va to parse the device tree and we can't
+ * determine MEMORY_START until then. However we can determine PHYSICAL_START
+ * from information at hand (program counter, TLB lookup).
+ *
+ * On BookE with RELOCATABLE && PPC32
+ *
+ * With RELOCATABLE && PPC32, we support loading the kernel at any physical
+ * address without any restriction on the page alignment.
+ *
+ * We find the runtime address of _stext and relocate ourselves based on
+ * the following calculation:
+ *
+ * virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
+ * MODULO(_stext.run,256M)
+ * and create the following mapping:
+ *
+ * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
+ *
+ * When we process relocations, we cannot depend on the
+ * existing equation for the __va()/__pa() translations:
+ *
+ * __va(x) = (x) - PHYSICAL_START + KERNELBASE
+ *
+ * Where:
+ * PHYSICAL_START = kernstart_addr = Physical address of _stext
+ * KERNELBASE = Compiled virtual address of _stext.
+ *
+ * This formula holds true iff, kernel load address is TLB page aligned.
+ *
+ * In our case, we need to also account for the shift in the kernel Virtual
+ * address.
+ *
+ * E.g.,
+ *
+ * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
+ * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
+ *
+ * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
+ * = 0xbc100000 , which is wrong.
+ *
+ * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
+ * according to our mapping.
+ *
+ * Hence we use the following formula to get the translations right:
+ *
+ * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
+ *
+ * Where :
+ * PHYSICAL_START = dynamic load address.(kernstart_addr variable)
+ * Effective KERNELBASE = virtual_base =
+ * = ALIGN_DOWN(KERNELBASE,256M) +
+ * MODULO(PHYSICAL_START,256M)
+ *
+ * To make the cost of __va() / __pa() more light weight, we introduce
+ * a new variable virt_phys_offset, which will hold :
+ *
+ * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
+ * = ALIGN_DOWN(KERNELBASE,256M) -
+ * ALIGN_DOWN(PHYSICALSTART,256M)
+ *
+ * Hence :
+ *
+ * __va(x) = x - PHYSICAL_START + Effective KERNELBASE
+ * = x + virt_phys_offset
+ *
+ * and
+ * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE
+ * = x - virt_phys_offset
+ *
+ * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
+ * the other definitions for __va & __pa.
+ */
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
+#define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
+#else
+#ifdef CONFIG_PPC64
+
+#define VIRTUAL_WARN_ON(x) WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x))
+
+/*
+ * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
+ * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
+ * This also results in better code generation.
+ */
+#define __va(x) \
+({ \
+ VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET); \
+ (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \
+})
+
+#define __pa(x) \
+({ \
+ VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET); \
+ (unsigned long)(x) & 0x0fffffffffffffffUL; \
+})
+
+#else /* 32-bit, non book E */
+#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
+#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
+#endif
+#endif
+
+/*
+ * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+#define VM_DATA_DEFAULT_FLAGS32 VM_DATA_FLAGS_TSK_EXEC
+#define VM_DATA_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
+
+#ifdef __powerpc64__
+#include <asm/page_64.h>
+#else
+#include <asm/page_32.h>
+#endif
+
+/*
+ * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
+ * "kernelness", use is_kernel_addr() - it should do what you want.
+ */
+#ifdef CONFIG_PPC_BOOK3E_64
+#define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
+#elif defined(CONFIG_PPC_BOOK3S_64)
+#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+#else
+#define is_kernel_addr(x) ((x) >= TASK_SIZE)
+#endif
+
+#ifndef CONFIG_PPC_BOOK3S_64
+/*
+ * Use the top bit of the higher-level page table entries to indicate whether
+ * the entries we point to contain hugepages. This works because we know that
+ * the page tables live in kernel space. If we ever decide to support having
+ * page tables at arbitrary addresses, this breaks and will have to change.
+ */
+#ifdef CONFIG_PPC64
+#define PD_HUGE 0x8000000000000000UL
+#else
+#define PD_HUGE 0x80000000
+#endif
+
+#else /* CONFIG_PPC_BOOK3S_64 */
+/*
+ * Book3S 64 stores real addresses in the hugepd entries to
+ * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
+ */
+#define HUGEPD_ADDR_MASK (0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+/*
+ * Some number of bits at the level of the page table that points to
+ * a hugepte are used to encode the size. This masks those bits.
+ * On 8xx, HW assistance requires 4k alignment for the hugepte.
+ */
+#ifdef CONFIG_PPC_8xx
+#define HUGEPD_SHIFT_MASK 0xfff
+#else
+#define HUGEPD_SHIFT_MASK 0x3f
+#endif
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/pgtable-be-types.h>
+#else
+#include <asm/pgtable-types.h>
+#endif
+
+struct page;
+extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
+extern void copy_user_page(void *to, void *from, unsigned long vaddr,
+ struct page *p);
+extern int devmem_is_allowed(unsigned long pfn);
+
+#ifdef CONFIG_PPC_SMLPAR
+void arch_free_page(struct page *page, int order);
+#define HAVE_ARCH_FREE_PAGE
+#endif
+
+struct vm_area_struct;
+
+extern unsigned long kernstart_virt_addr;
+
+static inline unsigned long kaslr_offset(void)
+{
+ return kernstart_virt_addr - KERNELBASE;
+}
+
+#include <asm-generic/memory_model.h>
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PAGE_H */
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
new file mode 100644
index 000000000..56f217606
--- /dev/null
+++ b/arch/powerpc/include/asm/page_32.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PAGE_32_H
+#define _ASM_POWERPC_PAGE_32_H
+
+#include <asm/cache.h>
+
+#if defined(CONFIG_PHYSICAL_ALIGN) && (CONFIG_PHYSICAL_START != 0)
+#if (CONFIG_PHYSICAL_START % CONFIG_PHYSICAL_ALIGN) != 0
+#error "CONFIG_PHYSICAL_START must be a multiple of CONFIG_PHYSICAL_ALIGN"
+#endif
+#endif
+
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#endif
+
+#if defined(CONFIG_PPC_256K_PAGES) || \
+ (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES))
+#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */
+#else
+#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */
+#endif
+
+#ifndef __ASSEMBLY__
+/*
+ * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
+ * physical addressing.
+ */
+#ifdef CONFIG_PTE_64BIT
+typedef unsigned long long pte_basic_t;
+#else
+typedef unsigned long pte_basic_t;
+#endif
+
+#include <asm/bug.h>
+
+/*
+ * Clear page using the dcbz instruction, which doesn't cause any
+ * memory traffic (except to write out any cache lines which get
+ * displaced). This only works on cacheable memory.
+ */
+static inline void clear_page(void *addr)
+{
+ unsigned int i;
+
+ WARN_ON((unsigned long)addr & (L1_CACHE_BYTES - 1));
+
+ for (i = 0; i < PAGE_SIZE / L1_CACHE_BYTES; i++, addr += L1_CACHE_BYTES)
+ dcbz(addr);
+}
+extern void copy_page(void *to, void *from);
+
+#include <asm-generic/getorder.h>
+
+#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
+#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PAGE_32_H */
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
new file mode 100644
index 000000000..79a9b7c6a
--- /dev/null
+++ b/arch/powerpc/include/asm/page_64.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_PAGE_64_H
+#define _ASM_POWERPC_PAGE_64_H
+
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ */
+
+#include <asm/asm-const.h>
+
+/*
+ * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
+ * specific, every notion of page number shared with the firmware, TCEs,
+ * iommu, etc... still uses a page size of 4K.
+ */
+#define HW_PAGE_SHIFT 12
+#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
+#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
+
+/*
+ * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
+ * HW_PAGE_SHIFT, that is 4K pages.
+ */
+#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
+
+/* Segment size; normal 256M segments */
+#define SID_SHIFT 28
+#define SID_MASK ASM_CONST(0xfffffffff)
+#define ESID_MASK 0xfffffffff0000000UL
+#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
+
+/* 1T segments */
+#define SID_SHIFT_1T 40
+#define SID_MASK_1T 0xffffffUL
+#define ESID_MASK_1T 0xffffff0000000000UL
+#define GET_ESID_1T(x) (((x) >> SID_SHIFT_1T) & SID_MASK_1T)
+
+#ifndef __ASSEMBLY__
+#include <asm/cache.h>
+
+typedef unsigned long pte_basic_t;
+
+static inline void clear_page(void *addr)
+{
+ unsigned long iterations;
+ unsigned long onex, twox, fourx, eightx;
+
+ iterations = ppc64_caches.l1d.blocks_per_page / 8;
+
+ /*
+ * Some verisions of gcc use multiply instructions to
+ * calculate the offsets so lets give it a hand to
+ * do better.
+ */
+ onex = ppc64_caches.l1d.block_size;
+ twox = onex << 1;
+ fourx = onex << 2;
+ eightx = onex << 3;
+
+ asm volatile(
+ "mtctr %1 # clear_page\n\
+ .balign 16\n\
+1: dcbz 0,%0\n\
+ dcbz %3,%0\n\
+ dcbz %4,%0\n\
+ dcbz %5,%0\n\
+ dcbz %6,%0\n\
+ dcbz %7,%0\n\
+ dcbz %8,%0\n\
+ dcbz %9,%0\n\
+ add %0,%0,%10\n\
+ bdnz+ 1b"
+ : "=&r" (addr)
+ : "r" (iterations), "0" (addr), "b" (onex), "b" (twox),
+ "b" (twox+onex), "b" (fourx), "b" (fourx+onex),
+ "b" (twox+fourx), "b" (eightx-onex), "r" (eightx)
+ : "ctr", "memory");
+}
+
+extern void copy_page(void *to, void *from);
+
+/* Log 2 of page table size */
+extern u64 ppc64_pft_size;
+
+#endif /* __ASSEMBLY__ */
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (is_32bit_task() ? \
+ VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
+
+/*
+ * This is the default if a program doesn't have a PT_GNU_STACK
+ * program header entry. The PPC64 ELF ABI has a non executable stack
+ * stack by default, so in the absence of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+#define VM_STACK_DEFAULT_FLAGS32 VM_DATA_FLAGS_EXEC
+#define VM_STACK_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
+
+#define VM_STACK_DEFAULT_FLAGS \
+ (is_32bit_task() ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
+
+#include <asm-generic/getorder.h>
+
+#endif /* _ASM_POWERPC_PAGE_64_H */
diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
new file mode 100644
index 000000000..e08513d73
--- /dev/null
+++ b/arch/powerpc/include/asm/paravirt.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_PARAVIRT_H
+#define _ASM_POWERPC_PARAVIRT_H
+
+#include <linux/jump_label.h>
+#include <asm/smp.h>
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#include <asm/lppaca.h>
+#include <asm/hvcall.h>
+#endif
+
+#ifdef CONFIG_PPC_SPLPAR
+#include <linux/smp.h>
+#include <asm/kvm_guest.h>
+#include <asm/cputhreads.h>
+
+DECLARE_STATIC_KEY_FALSE(shared_processor);
+
+static inline bool is_shared_processor(void)
+{
+ return static_branch_unlikely(&shared_processor);
+}
+
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+u64 pseries_paravirt_steal_clock(int cpu);
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return pseries_paravirt_steal_clock(cpu);
+}
+#endif
+
+/* If bit 0 is set, the cpu has been ceded, conferred, or preempted */
+static inline u32 yield_count_of(int cpu)
+{
+ __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
+ return be32_to_cpu(yield_count);
+}
+
+/*
+ * Spinlock code confers and prods, so don't trace the hcalls because the
+ * tracing code takes spinlocks which can cause recursion deadlocks.
+ *
+ * These calls are made while the lock is not held: the lock slowpath yields if
+ * it can not acquire the lock, and unlock slow path might prod if a waiter has
+ * yielded). So this may not be a problem for simple spin locks because the
+ * tracing does not technically recurse on the lock, but we avoid it anyway.
+ *
+ * However the queued spin lock contended path is more strictly ordered: the
+ * H_CONFER hcall is made after the task has queued itself on the lock, so then
+ * recursing on that lock will cause the task to then queue up again behind the
+ * first instance (or worse: queued spinlocks use tricks that assume a context
+ * never waits on more than one spinlock, so such recursion may cause random
+ * corruption in the lock code).
+ */
+static inline void yield_to_preempted(int cpu, u32 yield_count)
+{
+ plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
+}
+
+static inline void prod_cpu(int cpu)
+{
+ plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
+}
+
+static inline void yield_to_any(void)
+{
+ plpar_hcall_norets_notrace(H_CONFER, -1, 0);
+}
+#else
+static inline bool is_shared_processor(void)
+{
+ return false;
+}
+
+static inline u32 yield_count_of(int cpu)
+{
+ return 0;
+}
+
+extern void ___bad_yield_to_preempted(void);
+static inline void yield_to_preempted(int cpu, u32 yield_count)
+{
+ ___bad_yield_to_preempted(); /* This would be a bug */
+}
+
+extern void ___bad_yield_to_any(void);
+static inline void yield_to_any(void)
+{
+ ___bad_yield_to_any(); /* This would be a bug */
+}
+
+extern void ___bad_prod_cpu(void);
+static inline void prod_cpu(int cpu)
+{
+ ___bad_prod_cpu(); /* This would be a bug */
+}
+
+#endif
+
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+ /*
+ * The dispatch/yield bit alone is an imperfect indicator of
+ * whether the hypervisor has dispatched @cpu to run on a physical
+ * processor. When it is clear, @cpu is definitely not preempted.
+ * But when it is set, it means only that it *might* be, subject to
+ * other conditions. So we check other properties of the VM and
+ * @cpu first, resorting to the yield count last.
+ */
+
+ /*
+ * Hypervisor preemption isn't possible in dedicated processor
+ * mode by definition.
+ */
+ if (!is_shared_processor())
+ return false;
+
+#ifdef CONFIG_PPC_SPLPAR
+ if (!is_kvm_guest()) {
+ int first_cpu;
+
+ /*
+ * The result of vcpu_is_preempted() is used in a
+ * speculative way, and is always subject to invalidation
+ * by events internal and external to Linux. While we can
+ * be called in preemptable context (in the Linux sense),
+ * we're not accessing per-cpu resources in a way that can
+ * race destructively with Linux scheduler preemption and
+ * migration, and callers can tolerate the potential for
+ * error introduced by sampling the CPU index without
+ * pinning the task to it. So it is permissible to use
+ * raw_smp_processor_id() here to defeat the preempt debug
+ * warnings that can arise from using smp_processor_id()
+ * in arbitrary contexts.
+ */
+ first_cpu = cpu_first_thread_sibling(raw_smp_processor_id());
+
+ /*
+ * The PowerVM hypervisor dispatches VMs on a whole core
+ * basis. So we know that a thread sibling of the local CPU
+ * cannot have been preempted by the hypervisor, even if it
+ * has called H_CONFER, which will set the yield bit.
+ */
+ if (cpu_first_thread_sibling(cpu) == first_cpu)
+ return false;
+ }
+#endif
+
+ if (yield_count_of(cpu) & 1)
+ return true;
+ return false;
+}
+
+static inline bool pv_is_native_spin_unlock(void)
+{
+ return !is_shared_processor();
+}
+
+#endif /* _ASM_POWERPC_PARAVIRT_H */
diff --git a/arch/powerpc/include/asm/paravirt_api_clock.h b/arch/powerpc/include/asm/paravirt_api_clock.h
new file mode 100644
index 000000000..d25ca7ac5
--- /dev/null
+++ b/arch/powerpc/include/asm/paravirt_api_clock.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/paravirt.h>
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
new file mode 100644
index 000000000..42cc321ed
--- /dev/null
+++ b/arch/powerpc/include/asm/parport.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * parport.h: platform-specific PC-style parport initialisation
+ *
+ * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+
+#ifndef _ASM_POWERPC_PARPORT_H
+#define _ASM_POWERPC_PARPORT_H
+#ifdef __KERNEL__
+
+#include <linux/of_irq.h>
+
+static int parport_pc_find_nonpci_ports (int autoirq, int autodma)
+{
+ struct device_node *np;
+ const u32 *prop;
+ u32 io1, io2;
+ int propsize;
+ int count = 0;
+ int virq;
+
+ for_each_compatible_node(np, "parallel", "pnpPNP,400") {
+ prop = of_get_property(np, "reg", &propsize);
+ if (!prop || propsize > 6*sizeof(u32))
+ continue;
+ io1 = prop[1]; io2 = prop[2];
+
+ virq = irq_of_parse_and_map(np, 0);
+ if (!virq)
+ continue;
+
+ if (parport_pc_probe_port(io1, io2, virq, autodma, NULL, 0)
+ != NULL)
+ count++;
+ }
+ return count;
+}
+
+#endif /* __KERNEL__ */
+#endif /* !(_ASM_POWERPC_PARPORT_H) */
diff --git a/arch/powerpc/include/asm/pasemi_dma.h b/arch/powerpc/include/asm/pasemi_dma.h
new file mode 100644
index 000000000..712a0b321
--- /dev/null
+++ b/arch/powerpc/include/asm/pasemi_dma.h
@@ -0,0 +1,526 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2006-2008 PA Semi, Inc
+ *
+ * Hardware register layout and descriptor formats for the on-board
+ * DMA engine on PA Semi PWRficient. Used by ethernet, function and security
+ * drivers.
+ */
+
+#ifndef ASM_PASEMI_DMA_H
+#define ASM_PASEMI_DMA_H
+
+/* status register layout in IOB region, at 0xfb800000 */
+struct pasdma_status {
+ u64 rx_sta[64]; /* RX channel status */
+ u64 tx_sta[20]; /* TX channel status */
+};
+
+
+/* All these registers live in the PCI configuration space for the DMA PCI
+ * device. Use the normal PCI config access functions for them.
+ */
+enum {
+ PAS_DMA_CAP_TXCH = 0x44, /* Transmit Channel Info */
+ PAS_DMA_CAP_RXCH = 0x48, /* Transmit Channel Info */
+ PAS_DMA_CAP_IFI = 0x4c, /* Interface Info */
+ PAS_DMA_COM_TXCMD = 0x100, /* Transmit Command Register */
+ PAS_DMA_COM_TXSTA = 0x104, /* Transmit Status Register */
+ PAS_DMA_COM_RXCMD = 0x108, /* Receive Command Register */
+ PAS_DMA_COM_RXSTA = 0x10c, /* Receive Status Register */
+ PAS_DMA_COM_CFG = 0x114, /* Common config reg */
+ PAS_DMA_TXF_SFLG0 = 0x140, /* Set flags */
+ PAS_DMA_TXF_SFLG1 = 0x144, /* Set flags */
+ PAS_DMA_TXF_CFLG0 = 0x148, /* Set flags */
+ PAS_DMA_TXF_CFLG1 = 0x14c, /* Set flags */
+};
+
+
+#define PAS_DMA_CAP_TXCH_TCHN_M 0x00ff0000 /* # of TX channels */
+#define PAS_DMA_CAP_TXCH_TCHN_S 16
+
+#define PAS_DMA_CAP_RXCH_RCHN_M 0x00ff0000 /* # of RX channels */
+#define PAS_DMA_CAP_RXCH_RCHN_S 16
+
+#define PAS_DMA_CAP_IFI_IOFF_M 0xff000000 /* Cfg reg for intf pointers */
+#define PAS_DMA_CAP_IFI_IOFF_S 24
+#define PAS_DMA_CAP_IFI_NIN_M 0x00ff0000 /* # of interfaces */
+#define PAS_DMA_CAP_IFI_NIN_S 16
+
+#define PAS_DMA_COM_TXCMD_EN 0x00000001 /* enable */
+#define PAS_DMA_COM_TXSTA_ACT 0x00000001 /* active */
+#define PAS_DMA_COM_RXCMD_EN 0x00000001 /* enable */
+#define PAS_DMA_COM_RXSTA_ACT 0x00000001 /* active */
+
+
+/* Per-interface and per-channel registers */
+#define _PAS_DMA_RXINT_STRIDE 0x20
+#define PAS_DMA_RXINT_RCMDSTA(i) (0x200+(i)*_PAS_DMA_RXINT_STRIDE)
+#define PAS_DMA_RXINT_RCMDSTA_EN 0x00000001
+#define PAS_DMA_RXINT_RCMDSTA_ST 0x00000002
+#define PAS_DMA_RXINT_RCMDSTA_MBT 0x00000008
+#define PAS_DMA_RXINT_RCMDSTA_MDR 0x00000010
+#define PAS_DMA_RXINT_RCMDSTA_MOO 0x00000020
+#define PAS_DMA_RXINT_RCMDSTA_MBP 0x00000040
+#define PAS_DMA_RXINT_RCMDSTA_BT 0x00000800
+#define PAS_DMA_RXINT_RCMDSTA_DR 0x00001000
+#define PAS_DMA_RXINT_RCMDSTA_OO 0x00002000
+#define PAS_DMA_RXINT_RCMDSTA_BP 0x00004000
+#define PAS_DMA_RXINT_RCMDSTA_TB 0x00008000
+#define PAS_DMA_RXINT_RCMDSTA_ACT 0x00010000
+#define PAS_DMA_RXINT_RCMDSTA_DROPS_M 0xfffe0000
+#define PAS_DMA_RXINT_RCMDSTA_DROPS_S 17
+#define PAS_DMA_RXINT_CFG(i) (0x204+(i)*_PAS_DMA_RXINT_STRIDE)
+#define PAS_DMA_RXINT_CFG_RBP 0x80000000
+#define PAS_DMA_RXINT_CFG_ITRR 0x40000000
+#define PAS_DMA_RXINT_CFG_DHL_M 0x07000000
+#define PAS_DMA_RXINT_CFG_DHL_S 24
+#define PAS_DMA_RXINT_CFG_DHL(x) (((x) << PAS_DMA_RXINT_CFG_DHL_S) & \
+ PAS_DMA_RXINT_CFG_DHL_M)
+#define PAS_DMA_RXINT_CFG_ITR 0x00400000
+#define PAS_DMA_RXINT_CFG_LW 0x00200000
+#define PAS_DMA_RXINT_CFG_L2 0x00100000
+#define PAS_DMA_RXINT_CFG_HEN 0x00080000
+#define PAS_DMA_RXINT_CFG_WIF 0x00000002
+#define PAS_DMA_RXINT_CFG_WIL 0x00000001
+
+#define PAS_DMA_RXINT_INCR(i) (0x210+(i)*_PAS_DMA_RXINT_STRIDE)
+#define PAS_DMA_RXINT_INCR_INCR_M 0x0000ffff
+#define PAS_DMA_RXINT_INCR_INCR_S 0
+#define PAS_DMA_RXINT_INCR_INCR(x) ((x) & 0x0000ffff)
+#define PAS_DMA_RXINT_BASEL(i) (0x218+(i)*_PAS_DMA_RXINT_STRIDE)
+#define PAS_DMA_RXINT_BASEL_BRBL(x) ((x) & ~0x3f)
+#define PAS_DMA_RXINT_BASEU(i) (0x21c+(i)*_PAS_DMA_RXINT_STRIDE)
+#define PAS_DMA_RXINT_BASEU_BRBH(x) ((x) & 0xfff)
+#define PAS_DMA_RXINT_BASEU_SIZ_M 0x3fff0000 /* # of cache lines worth of buffer ring */
+#define PAS_DMA_RXINT_BASEU_SIZ_S 16 /* 0 = 16K */
+#define PAS_DMA_RXINT_BASEU_SIZ(x) (((x) << PAS_DMA_RXINT_BASEU_SIZ_S) & \
+ PAS_DMA_RXINT_BASEU_SIZ_M)
+
+
+#define _PAS_DMA_TXCHAN_STRIDE 0x20 /* Size per channel */
+#define _PAS_DMA_TXCHAN_TCMDSTA 0x300 /* Command / Status */
+#define _PAS_DMA_TXCHAN_CFG 0x304 /* Configuration */
+#define _PAS_DMA_TXCHAN_DSCRBU 0x308 /* Descriptor BU Allocation */
+#define _PAS_DMA_TXCHAN_INCR 0x310 /* Descriptor increment */
+#define _PAS_DMA_TXCHAN_CNT 0x314 /* Descriptor count/offset */
+#define _PAS_DMA_TXCHAN_BASEL 0x318 /* Descriptor ring base (low) */
+#define _PAS_DMA_TXCHAN_BASEU 0x31c /* (high) */
+#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_TCMDSTA_EN 0x00000001 /* Enabled */
+#define PAS_DMA_TXCHAN_TCMDSTA_ST 0x00000002 /* Stop interface */
+#define PAS_DMA_TXCHAN_TCMDSTA_ACT 0x00010000 /* Active */
+#define PAS_DMA_TXCHAN_TCMDSTA_SZ 0x00000800
+#define PAS_DMA_TXCHAN_TCMDSTA_DB 0x00000400
+#define PAS_DMA_TXCHAN_TCMDSTA_DE 0x00000200
+#define PAS_DMA_TXCHAN_TCMDSTA_DA 0x00000100
+#define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000 /* Type = interface */
+#define PAS_DMA_TXCHAN_CFG_TY_COPY 0x00000001 /* Type = copy only */
+#define PAS_DMA_TXCHAN_CFG_TY_FUNC 0x00000002 /* Type = function */
+#define PAS_DMA_TXCHAN_CFG_TY_XOR 0x00000003 /* Type = xor only */
+#define PAS_DMA_TXCHAN_CFG_TATTR_M 0x0000003c
+#define PAS_DMA_TXCHAN_CFG_TATTR_S 2
+#define PAS_DMA_TXCHAN_CFG_TATTR(x) (((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
+ PAS_DMA_TXCHAN_CFG_TATTR_M)
+#define PAS_DMA_TXCHAN_CFG_LPDQ 0x00000800
+#define PAS_DMA_TXCHAN_CFG_LPSQ 0x00000400
+#define PAS_DMA_TXCHAN_CFG_WT_M 0x000003c0
+#define PAS_DMA_TXCHAN_CFG_WT_S 6
+#define PAS_DMA_TXCHAN_CFG_WT(x) (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
+ PAS_DMA_TXCHAN_CFG_WT_M)
+#define PAS_DMA_TXCHAN_CFG_TRD 0x00010000 /* translate data */
+#define PAS_DMA_TXCHAN_CFG_TRR 0x00008000 /* translate rings */
+#define PAS_DMA_TXCHAN_CFG_UP 0x00004000 /* update tx descr when sent */
+#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */
+#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */
+#define PAS_DMA_TXCHAN_INCR(c) (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEL(c) (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0
+#define PAS_DMA_TXCHAN_BASEL_BRBL_S 0
+#define PAS_DMA_TXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
+ PAS_DMA_TXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_TXCHAN_BASEU(c) (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEU_BRBH_M 0x00000fff
+#define PAS_DMA_TXCHAN_BASEU_BRBH_S 0
+#define PAS_DMA_TXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
+ PAS_DMA_TXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define PAS_DMA_TXCHAN_BASEU_SIZ_M 0x3fff0000
+#define PAS_DMA_TXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */
+#define PAS_DMA_TXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
+ PAS_DMA_TXCHAN_BASEU_SIZ_M)
+
+#define _PAS_DMA_RXCHAN_STRIDE 0x20 /* Size per channel */
+#define _PAS_DMA_RXCHAN_CCMDSTA 0x800 /* Command / Status */
+#define _PAS_DMA_RXCHAN_CFG 0x804 /* Configuration */
+#define _PAS_DMA_RXCHAN_INCR 0x810 /* Descriptor increment */
+#define _PAS_DMA_RXCHAN_CNT 0x814 /* Descriptor count/offset */
+#define _PAS_DMA_RXCHAN_BASEL 0x818 /* Descriptor ring base (low) */
+#define _PAS_DMA_RXCHAN_BASEU 0x81c /* (high) */
+#define PAS_DMA_RXCHAN_CCMDSTA(c) (0x800+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_CCMDSTA_EN 0x00000001 /* Enabled */
+#define PAS_DMA_RXCHAN_CCMDSTA_ST 0x00000002 /* Stop interface */
+#define PAS_DMA_RXCHAN_CCMDSTA_ACT 0x00010000 /* Active */
+#define PAS_DMA_RXCHAN_CCMDSTA_DU 0x00020000
+#define PAS_DMA_RXCHAN_CCMDSTA_OD 0x00002000
+#define PAS_DMA_RXCHAN_CCMDSTA_FD 0x00001000
+#define PAS_DMA_RXCHAN_CCMDSTA_DT 0x00000800
+#define PAS_DMA_RXCHAN_CFG(c) (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_CFG_CTR 0x00000400
+#define PAS_DMA_RXCHAN_CFG_HBU_M 0x00000380
+#define PAS_DMA_RXCHAN_CFG_HBU_S 7
+#define PAS_DMA_RXCHAN_CFG_HBU(x) (((x) << PAS_DMA_RXCHAN_CFG_HBU_S) & \
+ PAS_DMA_RXCHAN_CFG_HBU_M)
+#define PAS_DMA_RXCHAN_INCR(c) (0x810+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_BASEL(c) (0x818+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_BASEL_BRBL_M 0xffffffc0
+#define PAS_DMA_RXCHAN_BASEL_BRBL_S 0
+#define PAS_DMA_RXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_RXCHAN_BASEL_BRBL_S) & \
+ PAS_DMA_RXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_RXCHAN_BASEU(c) (0x81c+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_BASEU_BRBH_M 0x00000fff
+#define PAS_DMA_RXCHAN_BASEU_BRBH_S 0
+#define PAS_DMA_RXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_RXCHAN_BASEU_BRBH_S) & \
+ PAS_DMA_RXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define PAS_DMA_RXCHAN_BASEU_SIZ_M 0x3fff0000
+#define PAS_DMA_RXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */
+#define PAS_DMA_RXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_RXCHAN_BASEU_SIZ_S) & \
+ PAS_DMA_RXCHAN_BASEU_SIZ_M)
+
+#define PAS_STATUS_PCNT_M 0x000000000000ffffull
+#define PAS_STATUS_PCNT_S 0
+#define PAS_STATUS_DCNT_M 0x00000000ffff0000ull
+#define PAS_STATUS_DCNT_S 16
+#define PAS_STATUS_BPCNT_M 0x0000ffff00000000ull
+#define PAS_STATUS_BPCNT_S 32
+#define PAS_STATUS_CAUSE_M 0xf000000000000000ull
+#define PAS_STATUS_TIMER 0x1000000000000000ull
+#define PAS_STATUS_ERROR 0x2000000000000000ull
+#define PAS_STATUS_SOFT 0x4000000000000000ull
+#define PAS_STATUS_INT 0x8000000000000000ull
+
+#define PAS_IOB_COM_PKTHDRCNT 0x120
+#define PAS_IOB_COM_PKTHDRCNT_PKTHDR1_M 0x0fff0000
+#define PAS_IOB_COM_PKTHDRCNT_PKTHDR1_S 16
+#define PAS_IOB_COM_PKTHDRCNT_PKTHDR0_M 0x00000fff
+#define PAS_IOB_COM_PKTHDRCNT_PKTHDR0_S 0
+
+#define PAS_IOB_DMA_RXCH_CFG(i) (0x1100 + (i)*4)
+#define PAS_IOB_DMA_RXCH_CFG_CNTTH_M 0x00000fff
+#define PAS_IOB_DMA_RXCH_CFG_CNTTH_S 0
+#define PAS_IOB_DMA_RXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
+ PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_TXCH_CFG(i) (0x1200 + (i)*4)
+#define PAS_IOB_DMA_TXCH_CFG_CNTTH_M 0x00000fff
+#define PAS_IOB_DMA_TXCH_CFG_CNTTH_S 0
+#define PAS_IOB_DMA_TXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
+ PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_RXCH_STAT(i) (0x1300 + (i)*4)
+#define PAS_IOB_DMA_RXCH_STAT_INTGEN 0x00001000
+#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_M 0x00000fff
+#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_S 0
+#define PAS_IOB_DMA_RXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
+ PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_TXCH_STAT(i) (0x1400 + (i)*4)
+#define PAS_IOB_DMA_TXCH_STAT_INTGEN 0x00001000
+#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_M 0x00000fff
+#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_S 0
+#define PAS_IOB_DMA_TXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
+ PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_RXCH_RESET(i) (0x1500 + (i)*4)
+#define PAS_IOB_DMA_RXCH_RESET_PCNT_M 0xffff0000
+#define PAS_IOB_DMA_RXCH_RESET_PCNT_S 16
+#define PAS_IOB_DMA_RXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
+ PAS_IOB_DMA_RXCH_RESET_PCNT_M)
+#define PAS_IOB_DMA_RXCH_RESET_PCNTRST 0x00000020
+#define PAS_IOB_DMA_RXCH_RESET_DCNTRST 0x00000010
+#define PAS_IOB_DMA_RXCH_RESET_TINTC 0x00000008
+#define PAS_IOB_DMA_RXCH_RESET_DINTC 0x00000004
+#define PAS_IOB_DMA_RXCH_RESET_SINTC 0x00000002
+#define PAS_IOB_DMA_RXCH_RESET_PINTC 0x00000001
+#define PAS_IOB_DMA_TXCH_RESET(i) (0x1600 + (i)*4)
+#define PAS_IOB_DMA_TXCH_RESET_PCNT_M 0xffff0000
+#define PAS_IOB_DMA_TXCH_RESET_PCNT_S 16
+#define PAS_IOB_DMA_TXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
+ PAS_IOB_DMA_TXCH_RESET_PCNT_M)
+#define PAS_IOB_DMA_TXCH_RESET_PCNTRST 0x00000020
+#define PAS_IOB_DMA_TXCH_RESET_DCNTRST 0x00000010
+#define PAS_IOB_DMA_TXCH_RESET_TINTC 0x00000008
+#define PAS_IOB_DMA_TXCH_RESET_DINTC 0x00000004
+#define PAS_IOB_DMA_TXCH_RESET_SINTC 0x00000002
+#define PAS_IOB_DMA_TXCH_RESET_PINTC 0x00000001
+
+#define PAS_IOB_DMA_COM_TIMEOUTCFG 0x1700
+#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M 0x00ffffff
+#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S 0
+#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x) (((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
+ PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
+
+/* Transmit descriptor fields */
+#define XCT_MACTX_T 0x8000000000000000ull
+#define XCT_MACTX_ST 0x4000000000000000ull
+#define XCT_MACTX_NORES 0x0000000000000000ull
+#define XCT_MACTX_8BRES 0x1000000000000000ull
+#define XCT_MACTX_24BRES 0x2000000000000000ull
+#define XCT_MACTX_40BRES 0x3000000000000000ull
+#define XCT_MACTX_I 0x0800000000000000ull
+#define XCT_MACTX_O 0x0400000000000000ull
+#define XCT_MACTX_E 0x0200000000000000ull
+#define XCT_MACTX_VLAN_M 0x0180000000000000ull
+#define XCT_MACTX_VLAN_NOP 0x0000000000000000ull
+#define XCT_MACTX_VLAN_REMOVE 0x0080000000000000ull
+#define XCT_MACTX_VLAN_INSERT 0x0100000000000000ull
+#define XCT_MACTX_VLAN_REPLACE 0x0180000000000000ull
+#define XCT_MACTX_CRC_M 0x0060000000000000ull
+#define XCT_MACTX_CRC_NOP 0x0000000000000000ull
+#define XCT_MACTX_CRC_INSERT 0x0020000000000000ull
+#define XCT_MACTX_CRC_PAD 0x0040000000000000ull
+#define XCT_MACTX_CRC_REPLACE 0x0060000000000000ull
+#define XCT_MACTX_SS 0x0010000000000000ull
+#define XCT_MACTX_LLEN_M 0x00007fff00000000ull
+#define XCT_MACTX_LLEN_S 32ull
+#define XCT_MACTX_LLEN(x) ((((long)(x)) << XCT_MACTX_LLEN_S) & \
+ XCT_MACTX_LLEN_M)
+#define XCT_MACTX_IPH_M 0x00000000f8000000ull
+#define XCT_MACTX_IPH_S 27ull
+#define XCT_MACTX_IPH(x) ((((long)(x)) << XCT_MACTX_IPH_S) & \
+ XCT_MACTX_IPH_M)
+#define XCT_MACTX_IPO_M 0x0000000007c00000ull
+#define XCT_MACTX_IPO_S 22ull
+#define XCT_MACTX_IPO(x) ((((long)(x)) << XCT_MACTX_IPO_S) & \
+ XCT_MACTX_IPO_M)
+#define XCT_MACTX_CSUM_M 0x0000000000000060ull
+#define XCT_MACTX_CSUM_NOP 0x0000000000000000ull
+#define XCT_MACTX_CSUM_TCP 0x0000000000000040ull
+#define XCT_MACTX_CSUM_UDP 0x0000000000000060ull
+#define XCT_MACTX_V6 0x0000000000000010ull
+#define XCT_MACTX_C 0x0000000000000004ull
+#define XCT_MACTX_AL2 0x0000000000000002ull
+
+/* Receive descriptor fields */
+#define XCT_MACRX_T 0x8000000000000000ull
+#define XCT_MACRX_ST 0x4000000000000000ull
+#define XCT_MACRX_RR_M 0x3000000000000000ull
+#define XCT_MACRX_RR_NORES 0x0000000000000000ull
+#define XCT_MACRX_RR_8BRES 0x1000000000000000ull
+#define XCT_MACRX_O 0x0400000000000000ull
+#define XCT_MACRX_E 0x0200000000000000ull
+#define XCT_MACRX_FF 0x0100000000000000ull
+#define XCT_MACRX_PF 0x0080000000000000ull
+#define XCT_MACRX_OB 0x0040000000000000ull
+#define XCT_MACRX_OD 0x0020000000000000ull
+#define XCT_MACRX_FS 0x0010000000000000ull
+#define XCT_MACRX_NB_M 0x000fc00000000000ull
+#define XCT_MACRX_NB_S 46ULL
+#define XCT_MACRX_NB(x) ((((long)(x)) << XCT_MACRX_NB_S) & \
+ XCT_MACRX_NB_M)
+#define XCT_MACRX_LLEN_M 0x00003fff00000000ull
+#define XCT_MACRX_LLEN_S 32ULL
+#define XCT_MACRX_LLEN(x) ((((long)(x)) << XCT_MACRX_LLEN_S) & \
+ XCT_MACRX_LLEN_M)
+#define XCT_MACRX_CRC 0x0000000080000000ull
+#define XCT_MACRX_LEN_M 0x0000000060000000ull
+#define XCT_MACRX_LEN_TOOSHORT 0x0000000020000000ull
+#define XCT_MACRX_LEN_BELOWMIN 0x0000000040000000ull
+#define XCT_MACRX_LEN_TRUNC 0x0000000060000000ull
+#define XCT_MACRX_CAST_M 0x0000000018000000ull
+#define XCT_MACRX_CAST_UNI 0x0000000000000000ull
+#define XCT_MACRX_CAST_MULTI 0x0000000008000000ull
+#define XCT_MACRX_CAST_BROAD 0x0000000010000000ull
+#define XCT_MACRX_CAST_PAUSE 0x0000000018000000ull
+#define XCT_MACRX_VLC_M 0x0000000006000000ull
+#define XCT_MACRX_FM 0x0000000001000000ull
+#define XCT_MACRX_HTY_M 0x0000000000c00000ull
+#define XCT_MACRX_HTY_IPV4_OK 0x0000000000000000ull
+#define XCT_MACRX_HTY_IPV6 0x0000000000400000ull
+#define XCT_MACRX_HTY_IPV4_BAD 0x0000000000800000ull
+#define XCT_MACRX_HTY_NONIP 0x0000000000c00000ull
+#define XCT_MACRX_IPP_M 0x00000000003f0000ull
+#define XCT_MACRX_IPP_S 16
+#define XCT_MACRX_CSUM_M 0x000000000000ffffull
+#define XCT_MACRX_CSUM_S 0
+
+#define XCT_PTR_T 0x8000000000000000ull
+#define XCT_PTR_LEN_M 0x7ffff00000000000ull
+#define XCT_PTR_LEN_S 44
+#define XCT_PTR_LEN(x) ((((long)(x)) << XCT_PTR_LEN_S) & \
+ XCT_PTR_LEN_M)
+#define XCT_PTR_ADDR_M 0x00000fffffffffffull
+#define XCT_PTR_ADDR_S 0
+#define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \
+ XCT_PTR_ADDR_M)
+
+/* Receive interface 8byte result fields */
+#define XCT_RXRES_8B_L4O_M 0xff00000000000000ull
+#define XCT_RXRES_8B_L4O_S 56
+#define XCT_RXRES_8B_RULE_M 0x00ffff0000000000ull
+#define XCT_RXRES_8B_RULE_S 40
+#define XCT_RXRES_8B_EVAL_M 0x000000ffff000000ull
+#define XCT_RXRES_8B_EVAL_S 24
+#define XCT_RXRES_8B_HTYPE_M 0x0000000000f00000ull
+#define XCT_RXRES_8B_HASH_M 0x00000000000fffffull
+#define XCT_RXRES_8B_HASH_S 0
+
+/* Receive interface buffer fields */
+#define XCT_RXB_LEN_M 0x0ffff00000000000ull
+#define XCT_RXB_LEN_S 44
+#define XCT_RXB_LEN(x) ((((long)(x)) << XCT_RXB_LEN_S) & \
+ XCT_RXB_LEN_M)
+#define XCT_RXB_ADDR_M 0x00000fffffffffffull
+#define XCT_RXB_ADDR_S 0
+#define XCT_RXB_ADDR(x) ((((long)(x)) << XCT_RXB_ADDR_S) & \
+ XCT_RXB_ADDR_M)
+
+/* Copy descriptor fields */
+#define XCT_COPY_T 0x8000000000000000ull
+#define XCT_COPY_ST 0x4000000000000000ull
+#define XCT_COPY_RR_M 0x3000000000000000ull
+#define XCT_COPY_RR_NORES 0x0000000000000000ull
+#define XCT_COPY_RR_8BRES 0x1000000000000000ull
+#define XCT_COPY_RR_24BRES 0x2000000000000000ull
+#define XCT_COPY_RR_40BRES 0x3000000000000000ull
+#define XCT_COPY_I 0x0800000000000000ull
+#define XCT_COPY_O 0x0400000000000000ull
+#define XCT_COPY_E 0x0200000000000000ull
+#define XCT_COPY_STY_ZERO 0x01c0000000000000ull
+#define XCT_COPY_DTY_PREF 0x0038000000000000ull
+#define XCT_COPY_LLEN_M 0x0007ffff00000000ull
+#define XCT_COPY_LLEN_S 32
+#define XCT_COPY_LLEN(x) ((((long)(x)) << XCT_COPY_LLEN_S) & \
+ XCT_COPY_LLEN_M)
+#define XCT_COPY_SE 0x0000000000000001ull
+
+/* Function descriptor fields */
+#define XCT_FUN_T 0x8000000000000000ull
+#define XCT_FUN_ST 0x4000000000000000ull
+#define XCT_FUN_RR_M 0x3000000000000000ull
+#define XCT_FUN_RR_NORES 0x0000000000000000ull
+#define XCT_FUN_RR_8BRES 0x1000000000000000ull
+#define XCT_FUN_RR_24BRES 0x2000000000000000ull
+#define XCT_FUN_RR_40BRES 0x3000000000000000ull
+#define XCT_FUN_I 0x0800000000000000ull
+#define XCT_FUN_O 0x0400000000000000ull
+#define XCT_FUN_E 0x0200000000000000ull
+#define XCT_FUN_FUN_M 0x01c0000000000000ull
+#define XCT_FUN_FUN_S 54
+#define XCT_FUN_FUN(x) ((((long)(x)) << XCT_FUN_FUN_S) & XCT_FUN_FUN_M)
+#define XCT_FUN_CRM_M 0x0038000000000000ull
+#define XCT_FUN_CRM_NOP 0x0000000000000000ull
+#define XCT_FUN_CRM_SIG 0x0008000000000000ull
+#define XCT_FUN_LLEN_M 0x0007ffff00000000ull
+#define XCT_FUN_LLEN_S 32
+#define XCT_FUN_LLEN(x) ((((long)(x)) << XCT_FUN_LLEN_S) & XCT_FUN_LLEN_M)
+#define XCT_FUN_SHL_M 0x00000000f8000000ull
+#define XCT_FUN_SHL_S 27
+#define XCT_FUN_SHL(x) ((((long)(x)) << XCT_FUN_SHL_S) & XCT_FUN_SHL_M)
+#define XCT_FUN_CHL_M 0x0000000007c00000ull
+#define XCT_FUN_HSZ_M 0x00000000003c0000ull
+#define XCT_FUN_ALG_M 0x0000000000038000ull
+#define XCT_FUN_HP 0x0000000000004000ull
+#define XCT_FUN_BCM_M 0x0000000000003800ull
+#define XCT_FUN_BCP_M 0x0000000000000600ull
+#define XCT_FUN_SIG_M 0x00000000000001f0ull
+#define XCT_FUN_SIG_TCP4 0x0000000000000140ull
+#define XCT_FUN_SIG_TCP6 0x0000000000000150ull
+#define XCT_FUN_SIG_UDP4 0x0000000000000160ull
+#define XCT_FUN_SIG_UDP6 0x0000000000000170ull
+#define XCT_FUN_A 0x0000000000000008ull
+#define XCT_FUN_C 0x0000000000000004ull
+#define XCT_FUN_AL2 0x0000000000000002ull
+#define XCT_FUN_SE 0x0000000000000001ull
+
+/* Function descriptor 8byte result fields */
+#define XCT_FUNRES_8B_CS_M 0x0000ffff00000000ull
+#define XCT_FUNRES_8B_CS_S 32
+#define XCT_FUNRES_8B_CRC_M 0x00000000ffffffffull
+#define XCT_FUNRES_8B_CRC_S 0
+
+/* Control descriptor fields */
+#define CTRL_CMD_T 0x8000000000000000ull
+#define CTRL_CMD_META_EVT 0x2000000000000000ull
+#define CTRL_CMD_O 0x0400000000000000ull
+#define CTRL_CMD_ETYPE_M 0x0038000000000000ull
+#define CTRL_CMD_ETYPE_EXT 0x0000000000000000ull
+#define CTRL_CMD_ETYPE_WSET 0x0020000000000000ull
+#define CTRL_CMD_ETYPE_WCLR 0x0028000000000000ull
+#define CTRL_CMD_ETYPE_SET 0x0030000000000000ull
+#define CTRL_CMD_ETYPE_CLR 0x0038000000000000ull
+#define CTRL_CMD_REG_M 0x000000000000007full
+#define CTRL_CMD_REG_S 0
+#define CTRL_CMD_REG(x) ((((long)(x)) << CTRL_CMD_REG_S) & \
+ CTRL_CMD_REG_M)
+
+
+
+/* Prototypes for the shared DMA functions in the platform code. */
+
+/* DMA TX Channel type. Right now only limitations used are event types 0/1,
+ * for event-triggered DMA transactions.
+ */
+
+enum pasemi_dmachan_type {
+ RXCHAN = 0, /* Any RX chan */
+ TXCHAN = 1, /* Any TX chan */
+ TXCHAN_EVT0 = 0x1001, /* TX chan in event class 0 (chan 0-9) */
+ TXCHAN_EVT1 = 0x2001, /* TX chan in event class 1 (chan 10-19) */
+};
+
+struct pasemi_dmachan {
+ int chno; /* Channel number */
+ enum pasemi_dmachan_type chan_type; /* TX / RX */
+ u64 *status; /* Ptr to cacheable status */
+ int irq; /* IRQ used by channel */
+ unsigned int ring_size; /* size of allocated ring */
+ dma_addr_t ring_dma; /* DMA address for ring */
+ u64 *ring_virt; /* Virt address for ring */
+ void *priv; /* Ptr to start of client struct */
+};
+
+/* Read/write the different registers in the I/O Bridge, Ethernet
+ * and DMA Controller
+ */
+extern unsigned int pasemi_read_iob_reg(unsigned int reg);
+extern void pasemi_write_iob_reg(unsigned int reg, unsigned int val);
+
+extern unsigned int pasemi_read_mac_reg(int intf, unsigned int reg);
+extern void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val);
+
+extern unsigned int pasemi_read_dma_reg(unsigned int reg);
+extern void pasemi_write_dma_reg(unsigned int reg, unsigned int val);
+
+/* Channel management routines */
+
+extern void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,
+ int total_size, int offset);
+extern void pasemi_dma_free_chan(struct pasemi_dmachan *chan);
+
+extern void pasemi_dma_start_chan(const struct pasemi_dmachan *chan,
+ const u32 cmdsta);
+extern int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan);
+
+/* Common routines to allocate rings and buffers */
+
+extern int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size);
+extern void pasemi_dma_free_ring(struct pasemi_dmachan *chan);
+
+extern void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
+ dma_addr_t *handle);
+extern void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
+ dma_addr_t *handle);
+
+/* Routines to allocate flags (events) for channel synchronization */
+extern int pasemi_dma_alloc_flag(void);
+extern void pasemi_dma_free_flag(int flag);
+extern void pasemi_dma_set_flag(int flag);
+extern void pasemi_dma_clear_flag(int flag);
+
+/* Routines to allocate function engines */
+extern int pasemi_dma_alloc_fun(void);
+extern void pasemi_dma_free_fun(int fun);
+
+/* Initialize the library, must be called before any other functions */
+extern int pasemi_dma_init(void);
+
+#endif /* ASM_PASEMI_DMA_H */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
new file mode 100644
index 000000000..e18c95f4e
--- /dev/null
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_PCI_BRIDGE_H
+#define _ASM_POWERPC_PCI_BRIDGE_H
+#ifdef __KERNEL__
+/*
+ */
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/ioport.h>
+#include <linux/numa.h>
+
+struct device_node;
+
+/*
+ * PCI controller operations
+ */
+struct pci_controller_ops {
+ void (*dma_dev_setup)(struct pci_dev *pdev);
+ void (*dma_bus_setup)(struct pci_bus *bus);
+ bool (*iommu_bypass_supported)(struct pci_dev *pdev,
+ u64 mask);
+
+ int (*probe_mode)(struct pci_bus *bus);
+
+ /* Called when pci_enable_device() is called. Returns true to
+ * allow assignment/enabling of the device. */
+ bool (*enable_device_hook)(struct pci_dev *pdev);
+
+ void (*disable_device)(struct pci_dev *pdev);
+
+ void (*release_device)(struct pci_dev *pdev);
+
+ /* Called during PCI resource reassignment */
+ resource_size_t (*window_alignment)(struct pci_bus *bus,
+ unsigned long type);
+ void (*setup_bridge)(struct pci_bus *bus,
+ unsigned long type);
+ void (*reset_secondary_bus)(struct pci_dev *pdev);
+
+#ifdef CONFIG_PCI_MSI
+ int (*setup_msi_irqs)(struct pci_dev *pdev,
+ int nvec, int type);
+ void (*teardown_msi_irqs)(struct pci_dev *pdev);
+#endif
+
+ void (*shutdown)(struct pci_controller *hose);
+};
+
+/*
+ * Structure of a PCI controller (host bridge)
+ */
+struct pci_controller {
+ struct pci_bus *bus;
+ char is_dynamic;
+#ifdef CONFIG_PPC64
+ int node;
+#endif
+ struct device_node *dn;
+ struct list_head list_node;
+ struct device *parent;
+
+ int first_busno;
+ int last_busno;
+ int self_busno;
+ struct resource busn;
+
+ void __iomem *io_base_virt;
+#ifdef CONFIG_PPC64
+ void __iomem *io_base_alloc;
+#endif
+ resource_size_t io_base_phys;
+ resource_size_t pci_io_size;
+
+ /* Some machines have a special region to forward the ISA
+ * "memory" cycles such as VGA memory regions. Left to 0
+ * if unsupported
+ */
+ resource_size_t isa_mem_phys;
+ resource_size_t isa_mem_size;
+
+ struct pci_controller_ops controller_ops;
+ struct pci_ops *ops;
+ unsigned int __iomem *cfg_addr;
+ void __iomem *cfg_data;
+
+ /*
+ * Used for variants of PCI indirect handling and possible quirks:
+ * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1
+ * EXT_REG - provides access to PCI-e extended registers
+ * SURPRESS_PRIMARY_BUS - we suppress the setting of PCI_PRIMARY_BUS
+ * on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS
+ * to determine which bus number to match on when generating type0
+ * config cycles
+ * NO_PCIE_LINK - the Freescale PCI-e controllers have issues with
+ * hanging if we don't have link and try to do config cycles to
+ * anything but the PHB. Only allow talking to the PHB if this is
+ * set.
+ * BIG_ENDIAN - cfg_addr is a big endian register
+ * BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs on
+ * the PLB4. Effectively disable MRM commands by setting this.
+ * FSL_CFG_REG_LINK - Freescale controller version in which the PCIe
+ * link status is in a RC PCIe cfg register (vs being a SoC register)
+ */
+#define PPC_INDIRECT_TYPE_SET_CFG_TYPE 0x00000001
+#define PPC_INDIRECT_TYPE_EXT_REG 0x00000002
+#define PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS 0x00000004
+#define PPC_INDIRECT_TYPE_NO_PCIE_LINK 0x00000008
+#define PPC_INDIRECT_TYPE_BIG_ENDIAN 0x00000010
+#define PPC_INDIRECT_TYPE_BROKEN_MRM 0x00000020
+#define PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK 0x00000040
+ u32 indirect_type;
+ /* Currently, we limit ourselves to 1 IO range and 3 mem
+ * ranges since the common pci_bus structure can't handle more
+ */
+ struct resource io_resource;
+ struct resource mem_resources[3];
+ resource_size_t mem_offset[3];
+ int global_number; /* PCI domain number */
+
+ resource_size_t dma_window_base_cur;
+ resource_size_t dma_window_size;
+
+#ifdef CONFIG_PPC64
+ unsigned long buid;
+ struct pci_dn *pci_data;
+#endif /* CONFIG_PPC64 */
+
+ void *private_data;
+
+ /* IRQ domain hierarchy */
+ struct irq_domain *dev_domain;
+ struct irq_domain *msi_domain;
+ struct fwnode_handle *fwnode;
+};
+
+/* These are used for config access before all the PCI probing
+ has been done. */
+extern int early_read_config_byte(struct pci_controller *hose, int bus,
+ int dev_fn, int where, u8 *val);
+extern int early_read_config_word(struct pci_controller *hose, int bus,
+ int dev_fn, int where, u16 *val);
+extern int early_read_config_dword(struct pci_controller *hose, int bus,
+ int dev_fn, int where, u32 *val);
+extern int early_write_config_byte(struct pci_controller *hose, int bus,
+ int dev_fn, int where, u8 val);
+extern int early_write_config_word(struct pci_controller *hose, int bus,
+ int dev_fn, int where, u16 val);
+extern int early_write_config_dword(struct pci_controller *hose, int bus,
+ int dev_fn, int where, u32 val);
+
+extern int early_find_capability(struct pci_controller *hose, int bus,
+ int dev_fn, int cap);
+
+extern void setup_indirect_pci(struct pci_controller* hose,
+ resource_size_t cfg_addr,
+ resource_size_t cfg_data, u32 flags);
+
+extern int indirect_read_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 *val);
+
+extern int __indirect_read_config(struct pci_controller *hose,
+ unsigned char bus_number, unsigned int devfn,
+ int offset, int len, u32 *val);
+
+extern int indirect_write_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 val);
+
+static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
+{
+ return bus->sysdata;
+}
+
+#ifdef CONFIG_PPC_PMAC
+extern int pci_device_from_OF_node(struct device_node *node,
+ u8 *bus, u8 *devfn);
+#endif
+#ifndef CONFIG_PPC64
+
+#ifdef CONFIG_PPC_CHRP
+extern void pci_create_OF_bus_map(void);
+#endif
+
+#else /* CONFIG_PPC64 */
+
+/*
+ * PCI stuff, for nodes representing PCI devices, pointed to
+ * by device_node->data.
+ */
+struct iommu_table;
+
+struct pci_dn {
+ int flags;
+#define PCI_DN_FLAG_IOV_VF 0x01
+#define PCI_DN_FLAG_DEAD 0x02 /* Device has been hot-removed */
+
+ int busno; /* pci bus number */
+ int devfn; /* pci device and function number */
+ int vendor_id; /* Vendor ID */
+ int device_id; /* Device ID */
+ int class_code; /* Device class code */
+
+ struct pci_dn *parent;
+ struct pci_controller *phb; /* for pci devices */
+ struct iommu_table_group *table_group; /* for phb's or bridges */
+
+ int pci_ext_config_space; /* for pci devices */
+#ifdef CONFIG_EEH
+ struct eeh_dev *edev; /* eeh device */
+#endif
+#define IODA_INVALID_PE 0xFFFFFFFF
+ unsigned int pe_number;
+#ifdef CONFIG_PCI_IOV
+ u16 vfs_expanded; /* number of VFs IOV BAR expanded */
+ u16 num_vfs; /* number of VFs enabled*/
+ unsigned int *pe_num_map; /* PE# for the first VF PE or array */
+ bool m64_single_mode; /* Use M64 BAR in Single Mode */
+#define IODA_INVALID_M64 (-1)
+ int (*m64_map)[PCI_SRIOV_NUM_BARS]; /* Only used on powernv */
+ int last_allow_rc; /* Only used on pseries */
+#endif /* CONFIG_PCI_IOV */
+ int mps; /* Maximum Payload Size */
+ struct list_head child_list;
+ struct list_head list;
+ struct resource holes[PCI_SRIOV_NUM_BARS];
+};
+
+/* Get the pointer to a device_node's pci_dn */
+#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
+
+extern struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
+ int devfn);
+extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
+extern struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
+ struct device_node *dn);
+extern void pci_remove_device_node_info(struct device_node *dn);
+
+#ifdef CONFIG_PCI_IOV
+struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev);
+void remove_sriov_vf_pdns(struct pci_dev *pdev);
+#endif
+
+#if defined(CONFIG_EEH)
+static inline struct eeh_dev *pdn_to_eeh_dev(struct pci_dn *pdn)
+{
+ return pdn ? pdn->edev : NULL;
+}
+#else
+#define pdn_to_eeh_dev(x) (NULL)
+#endif
+
+/** Find the bus corresponding to the indicated device node */
+extern struct pci_bus *pci_find_bus_by_node(struct device_node *dn);
+
+/** Remove all of the PCI devices under this bus */
+extern void pci_hp_remove_devices(struct pci_bus *bus);
+
+/** Discover new pci devices under this bus, and add them */
+extern void pci_hp_add_devices(struct pci_bus *bus);
+
+extern int pcibios_unmap_io_space(struct pci_bus *bus);
+extern int pcibios_map_io_space(struct pci_bus *bus);
+
+#ifdef CONFIG_NUMA
+#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = (NODE))
+#else
+#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = NUMA_NO_NODE)
+#endif
+
+#endif /* CONFIG_PPC64 */
+
+/* Get the PCI host controller for an OF device */
+extern struct pci_controller *pci_find_hose_for_OF_device(
+ struct device_node* node);
+
+extern struct pci_controller *pci_find_controller_for_domain(int domain_nr);
+
+/* Fill up host controller resources from the OF node */
+extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
+ struct device_node *dev, int primary);
+
+/* Allocate & free a PCI host bridge structure */
+extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
+extern void pcibios_free_controller(struct pci_controller *phb);
+extern void pcibios_free_controller_deferred(struct pci_host_bridge *bridge);
+
+#ifdef CONFIG_PCI
+extern int pcibios_vaddr_is_ioport(void __iomem *address);
+#else
+static inline int pcibios_vaddr_is_ioport(void __iomem *address)
+{
+ return 0;
+}
+#endif /* CONFIG_PCI */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PCI_BRIDGE_H */
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
new file mode 100644
index 000000000..289f1ec85
--- /dev/null
+++ b/arch/powerpc/include/asm/pci.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_POWERPC_PCI_H
+#define __ASM_POWERPC_PCI_H
+#ifdef __KERNEL__
+
+/*
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/dma-map-ops.h>
+#include <linux/scatterlist.h>
+
+#include <asm/machdep.h>
+#include <asm/io.h>
+#include <asm/pci-bridge.h>
+
+/* Return values for pci_controller_ops.probe_mode function */
+#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */
+#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
+#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
+
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
+#define IOBASE_BRIDGE_NUMBER 0
+#define IOBASE_MEMORY 1
+#define IOBASE_IO 2
+#define IOBASE_ISA_IO 3
+#define IOBASE_ISA_MEM 4
+
+/*
+ * Set this to 1 if you want the kernel to re-assign all PCI
+ * bus numbers (don't do that on ppc64 yet !)
+ */
+#define pcibios_assign_all_busses() \
+ (pci_has_flag(PCI_REASSIGN_ALL_BUS))
+
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ if (ppc_md.pci_get_legacy_ide_irq)
+ return ppc_md.pci_get_legacy_ide_irq(dev, channel);
+ return channel ? 15 : 14;
+}
+
+#ifdef CONFIG_PCI
+void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops);
+#else /* CONFIG_PCI */
+#define set_pci_dma_ops(d)
+#endif
+
+#ifdef CONFIG_PPC64
+
+/*
+ * We want to avoid touching the cacheline size or MWI bit.
+ * pSeries firmware sets the cacheline size (which is not the cpu cacheline
+ * size in all cases) and hardware treats MWI the same as memory write.
+ */
+#define PCI_DISABLE_MWI
+
+#endif /* CONFIG_PPC64 */
+
+extern int pci_domain_nr(struct pci_bus *bus);
+
+/* Decide whether to display the domain number in /proc */
+extern int pci_proc_domain(struct pci_bus *bus);
+
+struct vm_area_struct;
+
+/* Tell PCI code what kind of PCI resource mappings we support */
+#define HAVE_PCI_MMAP 1
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
+#define arch_can_pci_mmap_io() 1
+#define arch_can_pci_mmap_wc() 1
+
+extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
+ size_t count);
+extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val,
+ size_t count);
+extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state);
+
+#define HAVE_PCI_LEGACY 1
+
+extern void pcibios_claim_one_bus(struct pci_bus *b);
+
+extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
+
+extern void pcibios_resource_survey(void);
+
+extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
+extern int remove_phb_dynamic(struct pci_controller *phb);
+
+extern struct pci_dev *of_create_pci_dev(struct device_node *node,
+ struct pci_bus *bus, int devfn);
+
+extern unsigned int pci_parse_of_flags(u32 addr0, int bridge);
+
+extern void of_scan_pci_bridge(struct pci_dev *dev);
+
+extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
+extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
+
+struct file;
+extern pgprot_t pci_phys_mem_access_prot(struct file *file,
+ unsigned long pfn,
+ unsigned long size,
+ pgprot_t prot);
+
+extern resource_size_t pcibios_io_space_offset(struct pci_controller *hose);
+extern void pcibios_setup_bus_self(struct pci_bus *bus);
+extern void pcibios_setup_phb_io_space(struct pci_controller *hose);
+extern void pcibios_scan_phb(struct pci_controller *hose);
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
new file mode 100644
index 000000000..8e5b7d0b8
--- /dev/null
+++ b/arch/powerpc/include/asm/percpu.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PERCPU_H_
+#define _ASM_POWERPC_PERCPU_H_
+#ifdef __powerpc64__
+
+/*
+ * Same as asm-generic/percpu.h, except that we store the per cpu offset
+ * in the paca. Based on the x86-64 implementation.
+ */
+
+#ifdef CONFIG_SMP
+
+#define __my_cpu_offset local_paca->data_offset
+
+#endif /* CONFIG_SMP */
+#endif /* __powerpc64__ */
+
+#include <asm-generic/percpu.h>
+
+#include <asm/paca.h>
+
+#endif /* _ASM_POWERPC_PERCPU_H_ */
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
new file mode 100644
index 000000000..164e910bf
--- /dev/null
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Performance event support - hardware-specific disambiguation
+ *
+ * For now this is a compile-time decision, but eventually it should be
+ * runtime. This would allow multiplatform perf event support for e300 (fsl
+ * embedded perf counters) plus server/classic, and would accommodate
+ * devices other than the core which provide their own performance counters.
+ *
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ */
+
+#ifdef CONFIG_PPC_PERF_CTRS
+#include <asm/perf_event_server.h>
+#else
+static inline bool is_sier_available(void) { return false; }
+static inline unsigned long get_pmcs_ext_regs(int idx) { return 0; }
+#endif
+
+#ifdef CONFIG_FSL_EMB_PERF_EVENT
+#include <asm/perf_event_fsl_emb.h>
+#endif
+
+#ifdef CONFIG_PERF_EVENTS
+#include <asm/ptrace.h>
+#include <asm/reg.h>
+
+#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
+
+/*
+ * Overload regs->result to specify whether we should use the MSR (result
+ * is zero) or the SIAR (result is non zero).
+ */
+#define perf_arch_fetch_caller_regs(regs, __ip) \
+ do { \
+ (regs)->result = 0; \
+ (regs)->nip = __ip; \
+ (regs)->gpr[1] = current_stack_frame(); \
+ asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \
+ } while (0)
+
+/* To support perf_regs sier update */
+extern bool is_sier_available(void);
+extern unsigned long get_pmcs_ext_regs(int idx);
+/* To define perf extended regs mask value */
+extern u64 PERF_REG_EXTENDED_MASK;
+#define PERF_REG_EXTENDED_MASK PERF_REG_EXTENDED_MASK
+#endif
diff --git a/arch/powerpc/include/asm/perf_event_fsl_emb.h b/arch/powerpc/include/asm/perf_event_fsl_emb.h
new file mode 100644
index 000000000..c4d9ceb03
--- /dev/null
+++ b/arch/powerpc/include/asm/perf_event_fsl_emb.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Performance event support - Freescale embedded specific definitions.
+ *
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/types.h>
+#include <asm/hw_irq.h>
+
+#define MAX_HWEVENTS 6
+
+/* event flags */
+#define FSL_EMB_EVENT_VALID 1
+#define FSL_EMB_EVENT_RESTRICTED 2
+
+/* upper half of event flags is PMLCb */
+#define FSL_EMB_EVENT_THRESHMUL 0x0000070000000000ULL
+#define FSL_EMB_EVENT_THRESH 0x0000003f00000000ULL
+
+struct fsl_emb_pmu {
+ const char *name;
+ int n_counter; /* total number of counters */
+
+ /*
+ * The number of contiguous counters starting at zero that
+ * can hold restricted events, or zero if there are no
+ * restricted events.
+ *
+ * This isn't a very flexible method of expressing constraints,
+ * but it's very simple and is adequate for existing chips.
+ */
+ int n_restricted;
+
+ /* Returns event flags and PMLCb (FSL_EMB_EVENT_*) */
+ u64 (*xlate_event)(u64 event_id);
+
+ int n_generic;
+ int *generic_events;
+ int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+};
+
+int register_fsl_emb_pmu(struct fsl_emb_pmu *);
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
new file mode 100644
index 000000000..e2221d29f
--- /dev/null
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Performance event support - PowerPC classic/server specific definitions.
+ *
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ */
+
+#include <linux/types.h>
+#include <asm/hw_irq.h>
+#include <linux/device.h>
+#include <uapi/asm/perf_event.h>
+
+/* Update perf_event_print_debug() if this changes */
+#define MAX_HWEVENTS 8
+#define MAX_EVENT_ALTERNATIVES 8
+#define MAX_LIMITED_HWCOUNTERS 2
+
+struct perf_event;
+
+struct mmcr_regs {
+ unsigned long mmcr0;
+ unsigned long mmcr1;
+ unsigned long mmcr2;
+ unsigned long mmcra;
+ unsigned long mmcr3;
+};
+/*
+ * This struct provides the constants and functions needed to
+ * describe the PMU on a particular POWER-family CPU.
+ */
+struct power_pmu {
+ const char *name;
+ int n_counter;
+ int max_alternatives;
+ unsigned long add_fields;
+ unsigned long test_adder;
+ int (*compute_mmcr)(u64 events[], int n_ev,
+ unsigned int hwc[], struct mmcr_regs *mmcr,
+ struct perf_event *pevents[], u32 flags);
+ int (*get_constraint)(u64 event_id, unsigned long *mskp,
+ unsigned long *valp, u64 event_config1);
+ int (*get_alternatives)(u64 event_id, unsigned int flags,
+ u64 alt[]);
+ void (*get_mem_data_src)(union perf_mem_data_src *dsrc,
+ u32 flags, struct pt_regs *regs);
+ void (*get_mem_weight)(u64 *weight, u64 type);
+ unsigned long group_constraint_mask;
+ unsigned long group_constraint_val;
+ u64 (*bhrb_filter_map)(u64 branch_sample_type);
+ void (*config_bhrb)(u64 pmu_bhrb_filter);
+ void (*disable_pmc)(unsigned int pmc, struct mmcr_regs *mmcr);
+ int (*limited_pmc_event)(u64 event_id);
+ u32 flags;
+ const struct attribute_group **attr_groups;
+ int n_generic;
+ int *generic_events;
+ u64 (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+
+ int n_blacklist_ev;
+ int *blacklist_ev;
+ /* BHRB entries in the PMU */
+ int bhrb_nr;
+ /*
+ * set this flag with `PERF_PMU_CAP_EXTENDED_REGS` if
+ * the pmu supports extended perf regs capability
+ */
+ int capabilities;
+ /*
+ * Function to check event code for values which are
+ * reserved. Function takes struct perf_event as input,
+ * since event code could be spread in attr.config*
+ */
+ int (*check_attr_config)(struct perf_event *ev);
+};
+
+/*
+ * Values for power_pmu.flags
+ */
+#define PPMU_LIMITED_PMC5_6 0x00000001 /* PMC5/6 have limited function */
+#define PPMU_ALT_SIPR 0x00000002 /* uses alternate posn for SIPR/HV */
+#define PPMU_NO_SIPR 0x00000004 /* no SIPR/HV in MMCRA at all */
+#define PPMU_NO_CONT_SAMPLING 0x00000008 /* no continuous sampling */
+#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */
+#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
+#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
+#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
+#define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */
+#define PPMU_ARCH_31 0x00000200 /* Has MMCR3, SIER2 and SIER3 */
+#define PPMU_P10_DD1 0x00000400 /* Is power10 DD1 processor version */
+#define PPMU_HAS_ATTR_CONFIG1 0x00000800 /* Using config1 attribute */
+
+/*
+ * Values for flags to get_alternatives()
+ */
+#define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */
+#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
+#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
+
+int __init register_power_pmu(struct power_pmu *pmu);
+
+struct pt_regs;
+extern unsigned long perf_misc_flags(struct pt_regs *regs);
+extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+extern unsigned long int read_bhrb(int n);
+
+/*
+ * Only override the default definitions in include/linux/perf_event.h
+ * if we have hardware PMU support.
+ */
+#ifdef CONFIG_PPC_PERF_CTRS
+#define perf_misc_flags(regs) perf_misc_flags(regs)
+#endif
+
+/*
+ * The power_pmu.get_constraint function returns a 32/64-bit value and
+ * a 32/64-bit mask that express the constraints between this event_id and
+ * other events.
+ *
+ * The value and mask are divided up into (non-overlapping) bitfields
+ * of three different types:
+ *
+ * Select field: this expresses the constraint that some set of bits
+ * in MMCR* needs to be set to a specific value for this event_id. For a
+ * select field, the mask contains 1s in every bit of the field, and
+ * the value contains a unique value for each possible setting of the
+ * MMCR* bits. The constraint checking code will ensure that two events
+ * that set the same field in their masks have the same value in their
+ * value dwords.
+ *
+ * Add field: this expresses the constraint that there can be at most
+ * N events in a particular class. A field of k bits can be used for
+ * N <= 2^(k-1) - 1. The mask has the most significant bit of the field
+ * set (and the other bits 0), and the value has only the least significant
+ * bit of the field set. In addition, the 'add_fields' and 'test_adder'
+ * in the struct power_pmu for this processor come into play. The
+ * add_fields value contains 1 in the LSB of the field, and the
+ * test_adder contains 2^(k-1) - 1 - N in the field.
+ *
+ * NAND field: this expresses the constraint that you may not have events
+ * in all of a set of classes. (For example, on PPC970, you can't select
+ * events from the FPU, ISU and IDU simultaneously, although any two are
+ * possible.) For N classes, the field is N+1 bits wide, and each class
+ * is assigned one bit from the least-significant N bits. The mask has
+ * only the most-significant bit set, and the value has only the bit
+ * for the event_id's class set. The test_adder has the least significant
+ * bit set in the field.
+ *
+ * If an event_id is not subject to the constraint expressed by a particular
+ * field, then it will have 0 in both the mask and value for that field.
+ */
+
+extern ssize_t power_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *page);
+
+/*
+ * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix.
+ *
+ * Having a suffix allows us to have aliases in sysfs - eg: the generic
+ * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and
+ * 'PM_CYC' where the latter is the name by which the event is known in
+ * POWER CPU specification.
+ *
+ * Similarly, some hardware and cache events use the same event code. Eg.
+ * on POWER8, both "cache-references" and "L1-dcache-loads" events refer
+ * to the same event, PM_LD_REF_L1. The suffix, allows us to have two
+ * sysfs objects for the same event and thus two entries/aliases in sysfs.
+ */
+#define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix
+#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr
+
+#define EVENT_ATTR(_name, _id, _suffix) \
+ PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), _id, \
+ power_events_sysfs_show)
+
+#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g)
+#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g)
+
+#define CACHE_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _c)
+#define CACHE_EVENT_PTR(_id) EVENT_PTR(_id, _c)
+
+#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _p)
+#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p)
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
new file mode 100644
index 000000000..3360cad78
--- /dev/null
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGALLOC_H
+#define _ASM_POWERPC_PGALLOC_H
+
+#include <linux/mm.h>
+
+#ifndef MODULE
+static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
+{
+ if (unlikely(mm == &init_mm))
+ return gfp;
+ return gfp | __GFP_ACCOUNT;
+}
+#else /* !MODULE */
+static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
+{
+ return gfp | __GFP_ACCOUNT;
+}
+#endif /* MODULE */
+
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
+
+pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel);
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ return (pte_t *)pte_fragment_alloc(mm, 1);
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
+{
+ return (pgtable_t)pte_fragment_alloc(mm, 0);
+}
+
+void pte_frag_destroy(void *pte_frag);
+void pte_fragment_free(unsigned long *table, int kernel);
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ pte_fragment_free((unsigned long *)pte, 1);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ pte_fragment_free((unsigned long *)ptepage, 0);
+}
+
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation. For PTE pages, the allocation size will be
+ * (2^index_size * sizeof(pointer)) and allocations are drawn from
+ * the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer. In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value. This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE 0xf
+
+extern struct kmem_cache *pgtable_cache[];
+#define PGT_CACHE(shift) pgtable_cache[shift]
+
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/book3s/pgalloc.h>
+#else
+#include <asm/nohash/pgalloc.h>
+#endif
+
+#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
new file mode 100644
index 000000000..82633200b
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGTABLE_BE_TYPES_H
+#define _ASM_POWERPC_PGTABLE_BE_TYPES_H
+
+#include <asm/cmpxchg.h>
+
+/* PTE level */
+typedef struct { __be64 pte; } pte_t;
+#define __pte(x) ((pte_t) { cpu_to_be64(x) })
+#define __pte_raw(x) ((pte_t) { (x) })
+static inline unsigned long pte_val(pte_t x)
+{
+ return be64_to_cpu(x.pte);
+}
+
+static inline __be64 pte_raw(pte_t x)
+{
+ return x.pte;
+}
+
+/* PMD level */
+#ifdef CONFIG_PPC64
+typedef struct { __be64 pmd; } pmd_t;
+#define __pmd(x) ((pmd_t) { cpu_to_be64(x) })
+#define __pmd_raw(x) ((pmd_t) { (x) })
+static inline unsigned long pmd_val(pmd_t x)
+{
+ return be64_to_cpu(x.pmd);
+}
+
+static inline __be64 pmd_raw(pmd_t x)
+{
+ return x.pmd;
+}
+
+/* 64 bit always use 4 level table. */
+typedef struct { __be64 pud; } pud_t;
+#define __pud(x) ((pud_t) { cpu_to_be64(x) })
+#define __pud_raw(x) ((pud_t) { (x) })
+static inline unsigned long pud_val(pud_t x)
+{
+ return be64_to_cpu(x.pud);
+}
+
+static inline __be64 pud_raw(pud_t x)
+{
+ return x.pud;
+}
+
+#endif /* CONFIG_PPC64 */
+
+/* PGD level */
+typedef struct { __be64 pgd; } pgd_t;
+#define __pgd(x) ((pgd_t) { cpu_to_be64(x) })
+#define __pgd_raw(x) ((pgd_t) { (x) })
+static inline unsigned long pgd_val(pgd_t x)
+{
+ return be64_to_cpu(x.pgd);
+}
+
+static inline __be64 pgd_raw(pgd_t x)
+{
+ return x.pgd;
+}
+
+/* Page protection bits */
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define pgprot_val(x) ((x).pgprot)
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+/*
+ * With hash config 64k pages additionally define a bigger "real PTE" type that
+ * gathers the "second half" part of the PTE for pseudo 64k pages
+ */
+#ifdef CONFIG_PPC_64K_PAGES
+typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
+#else
+typedef struct { pte_t pte; } real_pte_t;
+#endif
+
+static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
+{
+ unsigned long *p = (unsigned long *)ptep;
+ __be64 prev;
+
+ /* See comment in switch_mm_irqs_off() */
+ prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
+ (__force unsigned long)pte_raw(new));
+
+ return pte_raw(old) == prev;
+}
+
+static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
+{
+ unsigned long *p = (unsigned long *)pmdp;
+ __be64 prev;
+
+ prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pmd_raw(old),
+ (__force unsigned long)pmd_raw(new));
+
+ return pmd_raw(old) == prev;
+}
+
+#ifdef CONFIG_ARCH_HAS_HUGEPD
+typedef struct { __be64 pdbe; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
+
+static inline unsigned long hpd_val(hugepd_t x)
+{
+ return be64_to_cpu(x.pdbe);
+}
+#endif
+
+#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
new file mode 100644
index 000000000..082c85cc0
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGTABLE_TYPES_H
+#define _ASM_POWERPC_PGTABLE_TYPES_H
+
+#if defined(__CHECKER__) || !defined(CONFIG_PPC32)
+#define STRICT_MM_TYPECHECKS
+#endif
+
+/* PTE level */
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
+typedef struct { pte_basic_t pte, pte1, pte2, pte3; } pte_t;
+#elif defined(STRICT_MM_TYPECHECKS)
+typedef struct { pte_basic_t pte; } pte_t;
+#else
+typedef pte_basic_t pte_t;
+#endif
+
+#if defined(STRICT_MM_TYPECHECKS) || \
+ (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES))
+#define __pte(x) ((pte_t) { (x) })
+static inline pte_basic_t pte_val(pte_t x)
+{
+ return x.pte;
+}
+#else
+#define __pte(x) ((pte_t)(x))
+static inline pte_basic_t pte_val(pte_t x)
+{
+ return x;
+}
+#endif
+
+/* PMD level */
+#ifdef CONFIG_PPC64
+typedef struct { unsigned long pmd; } pmd_t;
+#define __pmd(x) ((pmd_t) { (x) })
+static inline unsigned long pmd_val(pmd_t x)
+{
+ return x.pmd;
+}
+
+/* 64 bit always use 4 level table. */
+typedef struct { unsigned long pud; } pud_t;
+#define __pud(x) ((pud_t) { (x) })
+static inline unsigned long pud_val(pud_t x)
+{
+ return x.pud;
+}
+#endif /* CONFIG_PPC64 */
+
+/* PGD level */
+typedef struct { unsigned long pgd; } pgd_t;
+#define __pgd(x) ((pgd_t) { (x) })
+static inline unsigned long pgd_val(pgd_t x)
+{
+ return x.pgd;
+}
+
+/* Page protection bits */
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define pgprot_val(x) ((x).pgprot)
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+/*
+ * With hash config 64k pages additionally define a bigger "real PTE" type that
+ * gathers the "second half" part of the PTE for pseudo 64k pages
+ */
+#ifdef CONFIG_PPC_64K_PAGES
+typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
+#else
+typedef struct { pte_t pte; } real_pte_t;
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/cmpxchg.h>
+
+static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
+{
+ unsigned long *p = (unsigned long *)ptep;
+
+ /* See comment in switch_mm_irqs_off() */
+ return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
+}
+#endif
+
+#ifdef CONFIG_ARCH_HAS_HUGEPD
+typedef struct { unsigned long pd; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline unsigned long hpd_val(hugepd_t x)
+{
+ return x.pd;
+}
+#endif
+
+#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
new file mode 100644
index 000000000..283f40d05
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGTABLE_H
+#define _ASM_POWERPC_PGTABLE_H
+
+#ifndef __ASSEMBLY__
+#include <linux/mmdebug.h>
+#include <linux/mmzone.h>
+#include <asm/processor.h> /* For TASK_SIZE */
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+
+struct mm_struct;
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/book3s/pgtable.h>
+#else
+#include <asm/nohash/pgtable.h>
+#endif /* !CONFIG_PPC_BOOK3S */
+
+/*
+ * Protection used for kernel text. We want the debuggers to be able to
+ * set breakpoints anywhere, so don't write protect the kernel text
+ * on platforms where such control is possible.
+ */
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
+ defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
+#else
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
+#endif
+
+/* Make modules code happy. We don't set RO yet */
+#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
+
+/* Advertise special mapping type for AGP */
+#define PAGE_AGP (PAGE_KERNEL_NC)
+#define HAVE_PAGE_AGP
+
+#ifndef __ASSEMBLY__
+
+#ifndef MAX_PTRS_PER_PGD
+#define MAX_PTRS_PER_PGD PTRS_PER_PGD
+#endif
+
+/* Keep these as a macros to avoid include dependency mess */
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+/*
+ * Select all bits except the pfn
+ */
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+ unsigned long pte_flags;
+
+ pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
+ return __pgprot(pte_flags);
+}
+
+#ifndef pmd_page_vaddr
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return ((unsigned long)__va(pmd_val(pmd) & ~PMD_MASKED_BITS));
+}
+#define pmd_page_vaddr pmd_page_vaddr
+#endif
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+extern pgd_t swapper_pg_dir[];
+
+extern void paging_init(void);
+void poking_init(void);
+
+extern unsigned long ioremap_bot;
+extern const pgprot_t protection_map[16];
+
+/*
+ * kern_addr_valid is intended to indicate whether an address is a valid
+ * kernel address. Most 32-bit archs define it as always true (like this)
+ * but most 64-bit archs actually perform a test. What should we do here?
+ */
+#define kern_addr_valid(addr) (1)
+
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_large(pmd) 0
+#endif
+
+/* can we use this in kvm */
+unsigned long vmalloc_to_phys(void *vmalloc_addr);
+
+void pgtable_cache_add(unsigned int shift);
+
+pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
+
+#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
+void mark_initmem_nx(void);
+#else
+static inline void mark_initmem_nx(void) { }
+#endif
+
+/*
+ * When used, PTE_FRAG_NR is defined in subarch pgtable.h
+ * so we are sure it is included when arriving here.
+ */
+#ifdef PTE_FRAG_NR
+static inline void *pte_frag_get(mm_context_t *ctx)
+{
+ return ctx->pte_frag;
+}
+
+static inline void pte_frag_set(mm_context_t *ctx, void *p)
+{
+ ctx->pte_frag = p;
+}
+#else
+#define PTE_FRAG_NR 1
+#define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT
+#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
+
+static inline void *pte_frag_get(mm_context_t *ctx)
+{
+ return NULL;
+}
+
+static inline void pte_frag_set(mm_context_t *ctx, void *p)
+{
+}
+#endif
+
+#ifndef pmd_is_leaf
+#define pmd_is_leaf pmd_is_leaf
+static inline bool pmd_is_leaf(pmd_t pmd)
+{
+ return false;
+}
+#endif
+
+#ifndef pud_is_leaf
+#define pud_is_leaf pud_is_leaf
+static inline bool pud_is_leaf(pud_t pud)
+{
+ return false;
+}
+#endif
+
+#ifndef p4d_is_leaf
+#define p4d_is_leaf p4d_is_leaf
+static inline bool p4d_is_leaf(p4d_t p4d)
+{
+ return false;
+}
+#endif
+
+#define pmd_pgtable pmd_pgtable
+static inline pgtable_t pmd_pgtable(pmd_t pmd)
+{
+ return (pgtable_t)pmd_page_vaddr(pmd);
+}
+
+#ifdef CONFIG_PPC64
+#define is_ioremap_addr is_ioremap_addr
+static inline bool is_ioremap_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)x;
+
+ return addr >= IOREMAP_BASE && addr < IOREMAP_END;
+}
+
+struct seq_file;
+void arch_report_meminfo(struct seq_file *m);
+#endif /* CONFIG_PPC64 */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h
new file mode 100644
index 000000000..59a2c7dbc
--- /dev/null
+++ b/arch/powerpc/include/asm/pkeys.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * PowerPC Memory Protection Keys management
+ *
+ * Copyright 2017, Ram Pai, IBM Corporation.
+ */
+
+#ifndef _ASM_POWERPC_KEYS_H
+#define _ASM_POWERPC_KEYS_H
+
+#include <linux/jump_label.h>
+#include <asm/firmware.h>
+
+extern int num_pkey;
+extern u32 reserved_allocation_mask; /* bits set for reserved keys */
+
+#define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | \
+ VM_PKEY_BIT3 | VM_PKEY_BIT4)
+
+/* Override any generic PKEY permission defines */
+#define PKEY_DISABLE_EXECUTE 0x4
+#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS | \
+ PKEY_DISABLE_WRITE | \
+ PKEY_DISABLE_EXECUTE)
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/book3s/64/pkeys.h>
+#else
+#error "Not supported"
+#endif
+
+
+static inline u64 pkey_to_vmflag_bits(u16 pkey)
+{
+ return (((u64)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS);
+}
+
+static inline int vma_pkey(struct vm_area_struct *vma)
+{
+ if (!mmu_has_feature(MMU_FTR_PKEY))
+ return 0;
+ return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT;
+}
+
+static inline int arch_max_pkey(void)
+{
+ return num_pkey;
+}
+
+#define pkey_alloc_mask(pkey) (0x1 << pkey)
+
+#define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map)
+
+#define __mm_pkey_allocated(mm, pkey) { \
+ mm_pkey_allocation_map(mm) |= pkey_alloc_mask(pkey); \
+}
+
+#define __mm_pkey_free(mm, pkey) { \
+ mm_pkey_allocation_map(mm) &= ~pkey_alloc_mask(pkey); \
+}
+
+#define __mm_pkey_is_allocated(mm, pkey) \
+ (mm_pkey_allocation_map(mm) & pkey_alloc_mask(pkey))
+
+#define __mm_pkey_is_reserved(pkey) (reserved_allocation_mask & \
+ pkey_alloc_mask(pkey))
+
+static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
+{
+ if (pkey < 0 || pkey >= arch_max_pkey())
+ return false;
+
+ /* Reserved keys are never allocated. */
+ if (__mm_pkey_is_reserved(pkey))
+ return false;
+
+ return __mm_pkey_is_allocated(mm, pkey);
+}
+
+/*
+ * Returns a positive, 5-bit key on success, or -1 on failure.
+ * Relies on the mmap_lock to protect against concurrency in mm_pkey_alloc() and
+ * mm_pkey_free().
+ */
+static inline int mm_pkey_alloc(struct mm_struct *mm)
+{
+ /*
+ * Note: this is the one and only place we make sure that the pkey is
+ * valid as far as the hardware is concerned. The rest of the kernel
+ * trusts that only good, valid pkeys come out of here.
+ */
+ u32 all_pkeys_mask = (u32)(~(0x0));
+ int ret;
+
+ if (!mmu_has_feature(MMU_FTR_PKEY))
+ return -1;
+ /*
+ * Are we out of pkeys? We must handle this specially because ffz()
+ * behavior is undefined if there are no zeros.
+ */
+ if (mm_pkey_allocation_map(mm) == all_pkeys_mask)
+ return -1;
+
+ ret = ffz((u32)mm_pkey_allocation_map(mm));
+ __mm_pkey_allocated(mm, ret);
+
+ return ret;
+}
+
+static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
+{
+ if (!mmu_has_feature(MMU_FTR_PKEY))
+ return -1;
+
+ if (!mm_pkey_is_allocated(mm, pkey))
+ return -EINVAL;
+
+ __mm_pkey_free(mm, pkey);
+
+ return 0;
+}
+
+/*
+ * Try to dedicate one of the protection keys to be used as an
+ * execute-only protection key.
+ */
+extern int execute_only_pkey(struct mm_struct *mm);
+extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
+ int prot, int pkey);
+static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
+ int prot, int pkey)
+{
+ if (!mmu_has_feature(MMU_FTR_PKEY))
+ return 0;
+
+ /*
+ * Is this an mprotect_pkey() call? If so, never override the value that
+ * came from the user.
+ */
+ if (pkey != -1)
+ return pkey;
+
+ return __arch_override_mprotect_pkey(vma, prot, pkey);
+}
+
+extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+ unsigned long init_val);
+static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+ unsigned long init_val)
+{
+ if (!mmu_has_feature(MMU_FTR_PKEY))
+ return -EINVAL;
+
+ /*
+ * userspace should not change pkey-0 permissions.
+ * pkey-0 is associated with every page in the kernel.
+ * If userspace denies any permission on pkey-0, the
+ * kernel cannot operate.
+ */
+ if (pkey == 0)
+ return init_val ? -EINVAL : 0;
+
+ return __arch_set_user_pkey_access(tsk, pkey, init_val);
+}
+
+static inline bool arch_pkeys_enabled(void)
+{
+ return mmu_has_feature(MMU_FTR_PKEY);
+}
+
+extern void pkey_mm_init(struct mm_struct *mm);
+#endif /*_ASM_POWERPC_KEYS_H */
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
new file mode 100644
index 000000000..fe3d0ea00
--- /dev/null
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -0,0 +1,412 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
+#define _ASM_POWERPC_PLPAR_WRAPPERS_H
+
+#ifdef CONFIG_PPC_PSERIES
+
+#include <linux/string.h>
+#include <linux/irqflags.h>
+
+#include <asm/hvcall.h>
+#include <asm/paca.h>
+#include <asm/lppaca.h>
+#include <asm/page.h>
+
+static inline long poll_pending(void)
+{
+ return plpar_hcall_norets(H_POLL_PENDING);
+}
+
+static inline u8 get_cede_latency_hint(void)
+{
+ return get_lppaca()->cede_latency_hint;
+}
+
+static inline void set_cede_latency_hint(u8 latency_hint)
+{
+ get_lppaca()->cede_latency_hint = latency_hint;
+}
+
+static inline long cede_processor(void)
+{
+ /*
+ * We cannot call tracepoints inside RCU idle regions which
+ * means we must not trace H_CEDE.
+ */
+ return plpar_hcall_norets_notrace(H_CEDE);
+}
+
+static inline long extended_cede_processor(unsigned long latency_hint)
+{
+ long rc;
+ u8 old_latency_hint = get_cede_latency_hint();
+
+ set_cede_latency_hint(latency_hint);
+
+ rc = cede_processor();
+
+ /* Ensure that H_CEDE returns with IRQs on */
+ if (WARN_ON(IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && !(mfmsr() & MSR_EE)))
+ __hard_irq_enable();
+
+ set_cede_latency_hint(old_latency_hint);
+
+ return rc;
+}
+
+static inline long vpa_call(unsigned long flags, unsigned long cpu,
+ unsigned long vpa)
+{
+ flags = flags << H_VPA_FUNC_SHIFT;
+
+ return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
+}
+
+static inline long unregister_vpa(unsigned long cpu)
+{
+ return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
+}
+
+static inline long register_vpa(unsigned long cpu, unsigned long vpa)
+{
+ return vpa_call(H_VPA_REG_VPA, cpu, vpa);
+}
+
+static inline long unregister_slb_shadow(unsigned long cpu)
+{
+ return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
+}
+
+static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
+{
+ return vpa_call(H_VPA_REG_SLB, cpu, vpa);
+}
+
+static inline long unregister_dtl(unsigned long cpu)
+{
+ return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
+}
+
+static inline long register_dtl(unsigned long cpu, unsigned long vpa)
+{
+ return vpa_call(H_VPA_REG_DTL, cpu, vpa);
+}
+
+extern void vpa_init(int cpu);
+
+static inline long plpar_pte_enter(unsigned long flags,
+ unsigned long hpte_group, unsigned long hpte_v,
+ unsigned long hpte_r, unsigned long *slot)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
+
+ *slot = retbuf[0];
+
+ return rc;
+}
+
+static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
+ unsigned long avpn, unsigned long *old_pteh_ret,
+ unsigned long *old_ptel_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
+
+ *old_pteh_ret = retbuf[0];
+ *old_ptel_ret = retbuf[1];
+
+ return rc;
+}
+
+/* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
+static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
+ unsigned long avpn, unsigned long *old_pteh_ret,
+ unsigned long *old_ptel_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
+
+ *old_pteh_ret = retbuf[0];
+ *old_ptel_ret = retbuf[1];
+
+ return rc;
+}
+
+static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
+ unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_READ, retbuf, flags, ptex);
+
+ *old_pteh_ret = retbuf[0];
+ *old_ptel_ret = retbuf[1];
+
+ return rc;
+}
+
+/* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
+static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
+ unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
+
+ *old_pteh_ret = retbuf[0];
+ *old_ptel_ret = retbuf[1];
+
+ return rc;
+}
+
+/*
+ * ptes must be 8*sizeof(unsigned long)
+ */
+static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
+ unsigned long *ptes)
+
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
+
+ memcpy(ptes, retbuf, 8*sizeof(unsigned long));
+
+ return rc;
+}
+
+/*
+ * plpar_pte_read_4_raw can be called in real mode.
+ * ptes must be 8*sizeof(unsigned long)
+ */
+static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
+ unsigned long *ptes)
+
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
+
+ memcpy(ptes, retbuf, 8*sizeof(unsigned long));
+
+ return rc;
+}
+
+static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
+ unsigned long avpn)
+{
+ return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
+}
+
+static inline long plpar_resize_hpt_prepare(unsigned long flags,
+ unsigned long shift)
+{
+ return plpar_hcall_norets(H_RESIZE_HPT_PREPARE, flags, shift);
+}
+
+static inline long plpar_resize_hpt_commit(unsigned long flags,
+ unsigned long shift)
+{
+ return plpar_hcall_norets(H_RESIZE_HPT_COMMIT, flags, shift);
+}
+
+static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
+ unsigned long *tce_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
+
+ *tce_ret = retbuf[0];
+
+ return rc;
+}
+
+static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
+ unsigned long tceval)
+{
+ return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
+}
+
+static inline long plpar_tce_put_indirect(unsigned long liobn,
+ unsigned long ioba, unsigned long page, unsigned long count)
+{
+ return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
+}
+
+static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
+ unsigned long tceval, unsigned long count)
+{
+ return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
+}
+
+/* Set various resource mode parameters */
+static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
+ unsigned long value1, unsigned long value2)
+{
+ return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
+}
+
+/*
+ * Enable relocation on exceptions on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long enable_reloc_on_exceptions(void)
+{
+ /* mflags = 3: Exceptions at 0xC000000000004000 */
+ return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
+}
+
+/*
+ * Disable relocation on exceptions on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long disable_reloc_on_exceptions(void) {
+ return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
+}
+
+/*
+ * Take exceptions in big endian mode on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long enable_big_endian_exceptions(void)
+{
+ /* mflags = 0: big endian exceptions */
+ return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0);
+}
+
+/*
+ * Take exceptions in little endian mode on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long enable_little_endian_exceptions(void)
+{
+ /* mflags = 1: little endian exceptions */
+ return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0);
+}
+
+static inline long plpar_set_ciabr(unsigned long ciabr)
+{
+ return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0);
+}
+
+static inline long plpar_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
+{
+ return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR0, dawr0, dawrx0);
+}
+
+static inline long plpar_set_watchpoint1(unsigned long dawr1, unsigned long dawrx1)
+{
+ return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR1, dawr1, dawrx1);
+}
+
+static inline long plpar_signal_sys_reset(long cpu)
+{
+ return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
+}
+
+static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
+ if (rc == H_SUCCESS) {
+ p->character = retbuf[0];
+ p->behaviour = retbuf[1];
+ }
+
+ return rc;
+}
+
+/*
+ * Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately
+ *
+ * - Returns H_SUCCESS on success
+ * - For H_BUSY return value, we retry the hcall.
+ * - For any other hcall failures, attempt a full flush once before
+ * resorting to BUG().
+ *
+ * Note: This hcall is expected to fail only very rarely. The correct
+ * error recovery of killing the process/guest will be eventually
+ * needed.
+ */
+static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
+ u64 page_sizes, u64 start, u64 end)
+{
+ long rc;
+ unsigned long all;
+
+ while (true) {
+ rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target, type,
+ page_sizes, start, end);
+ if (rc == H_BUSY) {
+ cpu_relax();
+ continue;
+ } else if (rc == H_SUCCESS)
+ return rc;
+
+ /* Flush request failed, try with a full flush once */
+ if (type & H_RPTI_TYPE_NESTED)
+ all = H_RPTI_TYPE_NESTED | H_RPTI_TYPE_NESTED_ALL;
+ else
+ all = H_RPTI_TYPE_ALL;
+retry:
+ rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target,
+ all, page_sizes, 0, -1UL);
+ if (rc == H_BUSY) {
+ cpu_relax();
+ goto retry;
+ } else if (rc == H_SUCCESS)
+ return rc;
+
+ BUG();
+ }
+}
+
+#else /* !CONFIG_PPC_PSERIES */
+
+static inline long plpar_set_ciabr(unsigned long ciabr)
+{
+ return 0;
+}
+
+static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
+ unsigned long *ptes)
+{
+ return 0;
+}
+
+static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
+ u64 page_sizes, u64 start, u64 end)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PPC_PSERIES */
+
+#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h
new file mode 100644
index 000000000..2495866f2
--- /dev/null
+++ b/arch/powerpc/include/asm/pmac_feature.h
@@ -0,0 +1,417 @@
+/*
+ * Definition of platform feature hooks for PowerMacs
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Paul Mackerras &
+ * Ben. Herrenschmidt.
+ *
+ *
+ * Note: I removed media-bay details from the feature stuff, I believe it's
+ * not worth it, the media-bay driver can directly use the mac-io
+ * ASIC registers.
+ *
+ * Implementation note: Currently, none of these functions will block.
+ * However, they may internally protect themselves with a spinlock
+ * for way too long. Be prepared for at least some of these to block
+ * in the future.
+ *
+ * Unless specifically defined, the result code is assumed to be an
+ * error when negative, 0 is the default success result. Some functions
+ * may return additional positive result values.
+ *
+ * To keep implementation simple, all feature calls are assumed to have
+ * the prototype parameters (struct device_node* node, int value).
+ * When either is not used, pass 0.
+ */
+
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_PMAC_FEATURE_H
+#define __ASM_POWERPC_PMAC_FEATURE_H
+
+#include <asm/macio.h>
+#include <asm/machdep.h>
+
+/*
+ * Known Mac motherboard models
+ *
+ * Please, report any error here to benh@kernel.crashing.org, thanks !
+ *
+ * Note that I don't fully maintain this list for Core99 & MacRISC2
+ * and I'm considering removing all NewWorld entries from it and
+ * entirely rely on the model string.
+ */
+
+/* PowerSurge are the first generation of PCI Pmacs. This include
+ * all of the Grand-Central based machines. We currently don't
+ * differentiate most of them.
+ */
+#define PMAC_TYPE_PSURGE 0x10 /* PowerSurge */
+#define PMAC_TYPE_ANS 0x11 /* Apple Network Server */
+
+/* Here is the infamous serie of OHare based machines
+ */
+#define PMAC_TYPE_COMET 0x20 /* Believed to be PowerBook 2400 */
+#define PMAC_TYPE_HOOPER 0x21 /* Believed to be PowerBook 3400 */
+#define PMAC_TYPE_KANGA 0x22 /* PowerBook 3500 (first G3) */
+#define PMAC_TYPE_ALCHEMY 0x23 /* Alchemy motherboard base */
+#define PMAC_TYPE_GAZELLE 0x24 /* Spartacus, some 5xxx/6xxx */
+#define PMAC_TYPE_UNKNOWN_OHARE 0x2f /* Unknown, but OHare based */
+
+/* Here are the Heathrow based machines
+ * FIXME: Differenciate wallstreet,mainstreet,wallstreetII
+ */
+#define PMAC_TYPE_GOSSAMER 0x30 /* Gossamer motherboard */
+#define PMAC_TYPE_SILK 0x31 /* Desktop PowerMac G3 */
+#define PMAC_TYPE_WALLSTREET 0x32 /* Wallstreet/Mainstreet PowerBook*/
+#define PMAC_TYPE_UNKNOWN_HEATHROW 0x3f /* Unknown but heathrow based */
+
+/* Here are newworld machines based on Paddington (heathrow derivative)
+ */
+#define PMAC_TYPE_101_PBOOK 0x40 /* 101 PowerBook (aka Lombard) */
+#define PMAC_TYPE_ORIG_IMAC 0x41 /* First generation iMac */
+#define PMAC_TYPE_YOSEMITE 0x42 /* B&W G3 */
+#define PMAC_TYPE_YIKES 0x43 /* Yikes G4 (PCI graphics) */
+#define PMAC_TYPE_UNKNOWN_PADDINGTON 0x4f /* Unknown but paddington based */
+
+/* Core99 machines based on UniNorth 1.0 and 1.5
+ *
+ * Note: A single entry here may cover several actual models according
+ * to the device-tree. (Sawtooth is most tower G4s, FW_IMAC is most
+ * FireWire based iMacs, etc...). Those machines are too similar to be
+ * distinguished here, when they need to be differencied, use the
+ * device-tree "model" or "compatible" property.
+ */
+#define PMAC_TYPE_ORIG_IBOOK 0x40 /* First iBook model (no firewire) */
+#define PMAC_TYPE_SAWTOOTH 0x41 /* Desktop G4s */
+#define PMAC_TYPE_FW_IMAC 0x42 /* FireWire iMacs (except Pangea based) */
+#define PMAC_TYPE_FW_IBOOK 0x43 /* FireWire iBooks (except iBook2) */
+#define PMAC_TYPE_CUBE 0x44 /* Cube PowerMac */
+#define PMAC_TYPE_QUICKSILVER 0x45 /* QuickSilver G4s */
+#define PMAC_TYPE_PISMO 0x46 /* Pismo PowerBook */
+#define PMAC_TYPE_TITANIUM 0x47 /* Titanium PowerBook */
+#define PMAC_TYPE_TITANIUM2 0x48 /* Titanium II PowerBook (no L3, M6) */
+#define PMAC_TYPE_TITANIUM3 0x49 /* Titanium III PowerBook (with L3 & M7) */
+#define PMAC_TYPE_TITANIUM4 0x50 /* Titanium IV PowerBook (with L3 & M9) */
+#define PMAC_TYPE_EMAC 0x50 /* eMac */
+#define PMAC_TYPE_UNKNOWN_CORE99 0x5f
+
+/* MacRisc2 with UniNorth 2.0 */
+#define PMAC_TYPE_RACKMAC 0x80 /* XServe */
+#define PMAC_TYPE_WINDTUNNEL 0x81
+
+/* MacRISC2 machines based on the Pangea chipset
+ */
+#define PMAC_TYPE_PANGEA_IMAC 0x100 /* Flower Power iMac */
+#define PMAC_TYPE_IBOOK2 0x101 /* iBook2 (polycarbonate) */
+#define PMAC_TYPE_FLAT_PANEL_IMAC 0x102 /* Flat panel iMac */
+#define PMAC_TYPE_UNKNOWN_PANGEA 0x10f
+
+/* MacRISC2 machines based on the Intrepid chipset
+ */
+#define PMAC_TYPE_UNKNOWN_INTREPID 0x11f /* Generic */
+
+/* MacRISC4 / G5 machines. We don't have per-machine selection here anymore,
+ * but rather machine families
+ */
+#define PMAC_TYPE_POWERMAC_G5 0x150 /* U3 & U3H based */
+#define PMAC_TYPE_POWERMAC_G5_U3L 0x151 /* U3L based desktop */
+#define PMAC_TYPE_IMAC_G5 0x152 /* iMac G5 */
+#define PMAC_TYPE_XSERVE_G5 0x153 /* Xserve G5 */
+#define PMAC_TYPE_UNKNOWN_K2 0x19f /* Any other K2 based */
+#define PMAC_TYPE_UNKNOWN_SHASTA 0x19e /* Any other Shasta based */
+
+/*
+ * Motherboard flags
+ */
+
+#define PMAC_MB_CAN_SLEEP 0x00000001
+#define PMAC_MB_HAS_FW_POWER 0x00000002
+#define PMAC_MB_OLD_CORE99 0x00000004
+#define PMAC_MB_MOBILE 0x00000008
+#define PMAC_MB_MAY_SLEEP 0x00000010
+
+/*
+ * Feature calls supported on pmac
+ *
+ */
+
+/*
+ * Use this inline wrapper
+ */
+struct device_node;
+
+static inline long pmac_call_feature(int selector, struct device_node* node,
+ long param, long value)
+{
+ if (!ppc_md.feature_call || !machine_is(powermac))
+ return -ENODEV;
+ return ppc_md.feature_call(selector, node, param, value);
+}
+
+/* PMAC_FTR_SERIAL_ENABLE (struct device_node* node, int param, int value)
+ * enable/disable an SCC side. Pass the node corresponding to the
+ * channel side as a parameter.
+ * param is the type of port
+ * if param is ored with PMAC_SCC_FLAG_XMON, then the SCC is locked enabled
+ * for use by xmon.
+ */
+#define PMAC_FTR_SCC_ENABLE PMAC_FTR_DEF(0)
+ #define PMAC_SCC_ASYNC 0
+ #define PMAC_SCC_IRDA 1
+ #define PMAC_SCC_I2S1 2
+ #define PMAC_SCC_FLAG_XMON 0x00001000
+
+/* PMAC_FTR_MODEM_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the internal modem.
+ */
+#define PMAC_FTR_MODEM_ENABLE PMAC_FTR_DEF(1)
+
+/* PMAC_FTR_SWIM3_ENABLE (struct device_node* node, 0,int value)
+ * enable/disable the swim3 (floppy) cell of a mac-io ASIC
+ */
+#define PMAC_FTR_SWIM3_ENABLE PMAC_FTR_DEF(2)
+
+/* PMAC_FTR_MESH_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the mesh (scsi) cell of a mac-io ASIC
+ */
+#define PMAC_FTR_MESH_ENABLE PMAC_FTR_DEF(3)
+
+/* PMAC_FTR_IDE_ENABLE (struct device_node* node, int busID, int value)
+ * enable/disable an IDE port of a mac-io ASIC
+ * pass the busID parameter
+ */
+#define PMAC_FTR_IDE_ENABLE PMAC_FTR_DEF(4)
+
+/* PMAC_FTR_IDE_RESET (struct device_node* node, int busID, int value)
+ * assert(1)/release(0) an IDE reset line (mac-io IDE only)
+ */
+#define PMAC_FTR_IDE_RESET PMAC_FTR_DEF(5)
+
+/* PMAC_FTR_BMAC_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the bmac (ethernet) cell of a mac-io ASIC, also drive
+ * it's reset line
+ */
+#define PMAC_FTR_BMAC_ENABLE PMAC_FTR_DEF(6)
+
+/* PMAC_FTR_GMAC_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the gmac (ethernet) cell of an uninorth ASIC. This
+ * control the cell's clock.
+ */
+#define PMAC_FTR_GMAC_ENABLE PMAC_FTR_DEF(7)
+
+/* PMAC_FTR_GMAC_PHY_RESET (struct device_node* node, 0, 0)
+ * Perform a HW reset of the PHY connected to a gmac controller.
+ * Pass the gmac device node, not the PHY node.
+ */
+#define PMAC_FTR_GMAC_PHY_RESET PMAC_FTR_DEF(8)
+
+/* PMAC_FTR_SOUND_CHIP_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the sound chip, whatever it is and provided it can
+ * actually be controlled
+ */
+#define PMAC_FTR_SOUND_CHIP_ENABLE PMAC_FTR_DEF(9)
+
+/* -- add various tweaks related to sound routing -- */
+
+/* PMAC_FTR_AIRPORT_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the airport card
+ */
+#define PMAC_FTR_AIRPORT_ENABLE PMAC_FTR_DEF(10)
+
+/* PMAC_FTR_RESET_CPU (NULL, int cpu_nr, 0)
+ * toggle the reset line of a CPU on an uninorth-based SMP machine
+ */
+#define PMAC_FTR_RESET_CPU PMAC_FTR_DEF(11)
+
+/* PMAC_FTR_USB_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable an USB cell, along with the power of the USB "pad"
+ * on keylargo based machines
+ */
+#define PMAC_FTR_USB_ENABLE PMAC_FTR_DEF(12)
+
+/* PMAC_FTR_1394_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the firewire cell of an uninorth ASIC.
+ */
+#define PMAC_FTR_1394_ENABLE PMAC_FTR_DEF(13)
+
+/* PMAC_FTR_1394_CABLE_POWER (struct device_node* node, 0, int value)
+ * enable/disable the firewire cable power supply of the uninorth
+ * firewire cell
+ */
+#define PMAC_FTR_1394_CABLE_POWER PMAC_FTR_DEF(14)
+
+/* PMAC_FTR_SLEEP_STATE (struct device_node* node, 0, int value)
+ * set the sleep state of the motherboard.
+ *
+ * Pass -1 as value to query for sleep capability
+ * Pass 1 to set IOs to sleep
+ * Pass 0 to set IOs to wake
+ */
+#define PMAC_FTR_SLEEP_STATE PMAC_FTR_DEF(15)
+
+/* PMAC_FTR_GET_MB_INFO (NULL, selector, 0)
+ *
+ * returns some motherboard infos.
+ * selector: 0 - model id
+ * 1 - model flags (capabilities)
+ * 2 - model name (cast to const char *)
+ */
+#define PMAC_FTR_GET_MB_INFO PMAC_FTR_DEF(16)
+#define PMAC_MB_INFO_MODEL 0
+#define PMAC_MB_INFO_FLAGS 1
+#define PMAC_MB_INFO_NAME 2
+
+/* PMAC_FTR_READ_GPIO (NULL, int index, 0)
+ *
+ * read a GPIO from a mac-io controller of type KeyLargo or Pangea.
+ * the value returned is a byte (positive), or a negative error code
+ */
+#define PMAC_FTR_READ_GPIO PMAC_FTR_DEF(17)
+
+/* PMAC_FTR_WRITE_GPIO (NULL, int index, int value)
+ *
+ * write a GPIO of a mac-io controller of type KeyLargo or Pangea.
+ */
+#define PMAC_FTR_WRITE_GPIO PMAC_FTR_DEF(18)
+
+/* PMAC_FTR_ENABLE_MPIC
+ *
+ * Enable the MPIC cell
+ */
+#define PMAC_FTR_ENABLE_MPIC PMAC_FTR_DEF(19)
+
+/* PMAC_FTR_AACK_DELAY_ENABLE (NULL, int enable, 0)
+ *
+ * Enable/disable the AACK delay on the northbridge for systems using DFS
+ */
+#define PMAC_FTR_AACK_DELAY_ENABLE PMAC_FTR_DEF(20)
+
+/* PMAC_FTR_DEVICE_CAN_WAKE
+ *
+ * Used by video drivers to inform system that they can actually perform
+ * wakeup from sleep
+ */
+#define PMAC_FTR_DEVICE_CAN_WAKE PMAC_FTR_DEF(22)
+
+
+/* Don't use those directly, they are for the sake of pmac_setup.c */
+extern long pmac_do_feature_call(unsigned int selector, ...);
+extern void pmac_feature_init(void);
+
+/* Video suspend tweak */
+extern void pmac_set_early_video_resume(void (*proc)(void *data), void *data);
+extern void pmac_call_early_video_resume(void);
+
+#define PMAC_FTR_DEF(x) ((0x6660000) | (x))
+
+/* The AGP driver registers itself here */
+extern void pmac_register_agp_pm(struct pci_dev *bridge,
+ int (*suspend)(struct pci_dev *bridge),
+ int (*resume)(struct pci_dev *bridge));
+
+/* Those are meant to be used by video drivers to deal with AGP
+ * suspend resume properly
+ */
+extern void pmac_suspend_agp_for_card(struct pci_dev *dev);
+extern void pmac_resume_agp_for_card(struct pci_dev *dev);
+
+/*
+ * The part below is for use by macio_asic.c only, do not rely
+ * on the data structures or constants below in a normal driver
+ *
+ */
+
+#define MAX_MACIO_CHIPS 2
+
+enum {
+ macio_unknown = 0,
+ macio_grand_central,
+ macio_ohare,
+ macio_ohareII,
+ macio_heathrow,
+ macio_gatwick,
+ macio_paddington,
+ macio_keylargo,
+ macio_pangea,
+ macio_intrepid,
+ macio_keylargo2,
+ macio_shasta,
+};
+
+struct macio_chip
+{
+ struct device_node *of_node;
+ int type;
+ const char *name;
+ int rev;
+ volatile u32 __iomem *base;
+ unsigned long flags;
+
+ /* For use by macio_asic PCI driver */
+ struct macio_bus lbus;
+};
+
+extern struct macio_chip macio_chips[MAX_MACIO_CHIPS];
+
+#define MACIO_FLAG_SCCA_ON 0x00000001
+#define MACIO_FLAG_SCCB_ON 0x00000002
+#define MACIO_FLAG_SCC_LOCKED 0x00000004
+#define MACIO_FLAG_AIRPORT_ON 0x00000010
+#define MACIO_FLAG_FW_SUPPORTED 0x00000020
+
+extern struct macio_chip* macio_find(struct device_node* child, int type);
+
+#define MACIO_FCR32(macio, r) ((macio)->base + ((r) >> 2))
+#define MACIO_FCR8(macio, r) (((volatile u8 __iomem *)((macio)->base)) + (r))
+
+#define MACIO_IN32(r) (in_le32(MACIO_FCR32(macio,r)))
+#define MACIO_OUT32(r,v) (out_le32(MACIO_FCR32(macio,r), (v)))
+#define MACIO_BIS(r,v) (MACIO_OUT32((r), MACIO_IN32(r) | (v)))
+#define MACIO_BIC(r,v) (MACIO_OUT32((r), MACIO_IN32(r) & ~(v)))
+#define MACIO_IN8(r) (in_8(MACIO_FCR8(macio,r)))
+#define MACIO_OUT8(r,v) (out_8(MACIO_FCR8(macio,r), (v)))
+
+/*
+ * Those are exported by pmac feature for internal use by arch code
+ * only like the platform function callbacks, do not use directly in drivers
+ */
+extern raw_spinlock_t feature_lock;
+extern struct device_node *uninorth_node;
+extern u32 __iomem *uninorth_base;
+
+/*
+ * Uninorth reg. access. Note that Uni-N regs are big endian
+ */
+
+#define UN_REG(r) (uninorth_base + ((r) >> 2))
+#define UN_IN(r) (in_be32(UN_REG(r)))
+#define UN_OUT(r,v) (out_be32(UN_REG(r), (v)))
+#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
+#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
+
+/* Uninorth variant:
+ *
+ * 0 = not uninorth
+ * 1 = U1.x or U2.x
+ * 3 = U3
+ * 4 = U4
+ */
+extern int pmac_get_uninorth_variant(void);
+
+/*
+ * Power macintoshes have either a CUDA, PMU or SMU controlling
+ * system reset, power, NVRAM, RTC.
+ */
+typedef enum sys_ctrler_kind {
+ SYS_CTRLER_UNKNOWN = 0,
+ SYS_CTRLER_CUDA = 1,
+ SYS_CTRLER_PMU = 2,
+ SYS_CTRLER_SMU = 3,
+} sys_ctrler_t;
+extern sys_ctrler_t sys_ctrler;
+
+#endif /* __ASM_POWERPC_PMAC_FEATURE_H */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/pmac_low_i2c.h b/arch/powerpc/include/asm/pmac_low_i2c.h
new file mode 100644
index 000000000..21bd7297c
--- /dev/null
+++ b/arch/powerpc/include/asm/pmac_low_i2c.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-ppc/pmac_low_i2c.h
+ *
+ * Copyright (C) 2003 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ */
+#ifndef __PMAC_LOW_I2C_H__
+#define __PMAC_LOW_I2C_H__
+#ifdef __KERNEL__
+
+/* i2c mode (based on the platform functions format) */
+enum {
+ pmac_i2c_mode_dumb = 1,
+ pmac_i2c_mode_std = 2,
+ pmac_i2c_mode_stdsub = 3,
+ pmac_i2c_mode_combined = 4,
+};
+
+/* RW bit in address */
+enum {
+ pmac_i2c_read = 0x01,
+ pmac_i2c_write = 0x00
+};
+
+/* i2c bus type */
+enum {
+ pmac_i2c_bus_keywest = 0,
+ pmac_i2c_bus_pmu = 1,
+ pmac_i2c_bus_smu = 2,
+};
+
+/* i2c bus features */
+enum {
+ /* can_largesub : supports >1 byte subaddresses (SMU only) */
+ pmac_i2c_can_largesub = 0x00000001u,
+
+ /* multibus : device node holds multiple busses, bus number is
+ * encoded in bits 0xff00 of "reg" of a given device
+ */
+ pmac_i2c_multibus = 0x00000002u,
+};
+
+/* i2c busses in the system */
+struct pmac_i2c_bus;
+struct i2c_adapter;
+
+/* Init, called early during boot */
+extern int pmac_i2c_init(void);
+
+/* Lookup an i2c bus for a device-node. The node can be either the bus
+ * node itself or a device below it. In the case of a multibus, the bus
+ * node itself is the controller node, else, it's a child of the controller
+ * node
+ */
+extern struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node);
+
+/* Get the address for an i2c device. This strips the bus number if
+ * necessary. The 7 bits address is returned 1 bit right shifted so that the
+ * direction can be directly ored in
+ */
+extern u8 pmac_i2c_get_dev_addr(struct device_node *device);
+
+/* Get infos about a bus */
+extern struct device_node *pmac_i2c_get_controller(struct pmac_i2c_bus *bus);
+extern struct device_node *pmac_i2c_get_bus_node(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_get_type(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_get_flags(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_get_channel(struct pmac_i2c_bus *bus);
+
+/* i2c layer adapter helpers */
+extern struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus);
+extern struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter);
+
+/* March a device or bus with an i2c adapter structure, to be used by drivers
+ * to match device-tree nodes with i2c adapters during adapter discovery
+ * callbacks
+ */
+extern int pmac_i2c_match_adapter(struct device_node *dev,
+ struct i2c_adapter *adapter);
+
+
+/* (legacy) Locking functions exposed to i2c-keywest */
+extern int pmac_low_i2c_lock(struct device_node *np);
+extern int pmac_low_i2c_unlock(struct device_node *np);
+
+/* Access functions for platform code */
+extern int pmac_i2c_open(struct pmac_i2c_bus *bus, int polled);
+extern void pmac_i2c_close(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode);
+extern int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
+ u32 subaddr, u8 *data, int len);
+
+/* Suspend/resume code called by via-pmu directly for now */
+extern void pmac_pfunc_i2c_suspend(void);
+extern void pmac_pfunc_i2c_resume(void);
+
+#endif /* __KERNEL__ */
+#endif /* __PMAC_LOW_I2C_H__ */
diff --git a/arch/powerpc/include/asm/pmac_pfunc.h b/arch/powerpc/include/asm/pmac_pfunc.h
new file mode 100644
index 000000000..cee4e9f5b
--- /dev/null
+++ b/arch/powerpc/include/asm/pmac_pfunc.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PMAC_PFUNC_H__
+#define __PMAC_PFUNC_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+/* Flags in command lists */
+#define PMF_FLAGS_ON_INIT 0x80000000u
+#define PMF_FLGAS_ON_TERM 0x40000000u
+#define PMF_FLAGS_ON_SLEEP 0x20000000u
+#define PMF_FLAGS_ON_WAKE 0x10000000u
+#define PMF_FLAGS_ON_DEMAND 0x08000000u
+#define PMF_FLAGS_INT_GEN 0x04000000u
+#define PMF_FLAGS_HIGH_SPEED 0x02000000u
+#define PMF_FLAGS_LOW_SPEED 0x01000000u
+#define PMF_FLAGS_SIDE_EFFECTS 0x00800000u
+
+/*
+ * Arguments to a platform function call.
+ *
+ * NOTE: By convention, pointer arguments point to an u32
+ */
+struct pmf_args {
+ union {
+ u32 v;
+ u32 *p;
+ } u[4];
+ unsigned int count;
+};
+
+/*
+ * A driver capable of interpreting commands provides a handlers
+ * structure filled with whatever handlers are implemented by this
+ * driver. Non implemented handlers are left NULL.
+ *
+ * PMF_STD_ARGS are the same arguments that are passed to the parser
+ * and that gets passed back to the various handlers.
+ *
+ * Interpreting a given function always start with a begin() call which
+ * returns an instance data to be passed around subsequent calls, and
+ * ends with an end() call. This allows the low level driver to implement
+ * locking policy or per-function instance data.
+ *
+ * For interrupt capable functions, irq_enable() is called when a client
+ * registers, and irq_disable() is called when the last client unregisters
+ * Note that irq_enable & irq_disable are called within a semaphore held
+ * by the core, thus you should not try to register yourself to some other
+ * pmf interrupt during those calls.
+ */
+
+#define PMF_STD_ARGS struct pmf_function *func, void *instdata, \
+ struct pmf_args *args
+
+struct pmf_function;
+
+struct pmf_handlers {
+ void * (*begin)(struct pmf_function *func, struct pmf_args *args);
+ void (*end)(struct pmf_function *func, void *instdata);
+
+ int (*irq_enable)(struct pmf_function *func);
+ int (*irq_disable)(struct pmf_function *func);
+
+ int (*write_gpio)(PMF_STD_ARGS, u8 value, u8 mask);
+ int (*read_gpio)(PMF_STD_ARGS, u8 mask, int rshift, u8 xor);
+
+ int (*write_reg32)(PMF_STD_ARGS, u32 offset, u32 value, u32 mask);
+ int (*read_reg32)(PMF_STD_ARGS, u32 offset);
+ int (*write_reg16)(PMF_STD_ARGS, u32 offset, u16 value, u16 mask);
+ int (*read_reg16)(PMF_STD_ARGS, u32 offset);
+ int (*write_reg8)(PMF_STD_ARGS, u32 offset, u8 value, u8 mask);
+ int (*read_reg8)(PMF_STD_ARGS, u32 offset);
+
+ int (*delay)(PMF_STD_ARGS, u32 duration);
+
+ int (*wait_reg32)(PMF_STD_ARGS, u32 offset, u32 value, u32 mask);
+ int (*wait_reg16)(PMF_STD_ARGS, u32 offset, u16 value, u16 mask);
+ int (*wait_reg8)(PMF_STD_ARGS, u32 offset, u8 value, u8 mask);
+
+ int (*read_i2c)(PMF_STD_ARGS, u32 len);
+ int (*write_i2c)(PMF_STD_ARGS, u32 len, const u8 *data);
+ int (*rmw_i2c)(PMF_STD_ARGS, u32 masklen, u32 valuelen, u32 totallen,
+ const u8 *maskdata, const u8 *valuedata);
+
+ int (*read_cfg)(PMF_STD_ARGS, u32 offset, u32 len);
+ int (*write_cfg)(PMF_STD_ARGS, u32 offset, u32 len, const u8 *data);
+ int (*rmw_cfg)(PMF_STD_ARGS, u32 offset, u32 masklen, u32 valuelen,
+ u32 totallen, const u8 *maskdata, const u8 *valuedata);
+
+ int (*read_i2c_sub)(PMF_STD_ARGS, u8 subaddr, u32 len);
+ int (*write_i2c_sub)(PMF_STD_ARGS, u8 subaddr, u32 len, const u8 *data);
+ int (*set_i2c_mode)(PMF_STD_ARGS, int mode);
+ int (*rmw_i2c_sub)(PMF_STD_ARGS, u8 subaddr, u32 masklen, u32 valuelen,
+ u32 totallen, const u8 *maskdata,
+ const u8 *valuedata);
+
+ int (*read_reg32_msrx)(PMF_STD_ARGS, u32 offset, u32 mask, u32 shift,
+ u32 xor);
+ int (*read_reg16_msrx)(PMF_STD_ARGS, u32 offset, u32 mask, u32 shift,
+ u32 xor);
+ int (*read_reg8_msrx)(PMF_STD_ARGS, u32 offset, u32 mask, u32 shift,
+ u32 xor);
+
+ int (*write_reg32_slm)(PMF_STD_ARGS, u32 offset, u32 shift, u32 mask);
+ int (*write_reg16_slm)(PMF_STD_ARGS, u32 offset, u32 shift, u32 mask);
+ int (*write_reg8_slm)(PMF_STD_ARGS, u32 offset, u32 shift, u32 mask);
+
+ int (*mask_and_compare)(PMF_STD_ARGS, u32 len, const u8 *maskdata,
+ const u8 *valuedata);
+
+ struct module *owner;
+};
+
+
+/*
+ * Drivers who expose platform functions register at init time, this
+ * causes the platform functions for that device node to be parsed in
+ * advance and associated with the device. The data structures are
+ * partially public so a driver can walk the list of platform functions
+ * and eventually inspect the flags
+ */
+struct pmf_device;
+
+struct pmf_function {
+ /* All functions for a given driver are linked */
+ struct list_head link;
+
+ /* Function node & driver data */
+ struct device_node *node;
+ void *driver_data;
+
+ /* For internal use by core */
+ struct pmf_device *dev;
+
+ /* The name is the "xxx" in "platform-do-xxx", this is how
+ * platform functions are identified by this code. Some functions
+ * only operate for a given target, in which case the phandle is
+ * here (or 0 if the filter doesn't apply)
+ */
+ const char *name;
+ u32 phandle;
+
+ /* The flags for that function. You can have several functions
+ * with the same name and different flag
+ */
+ u32 flags;
+
+ /* The actual tokenized function blob */
+ const void *data;
+ unsigned int length;
+
+ /* Interrupt clients */
+ struct list_head irq_clients;
+
+ /* Refcounting */
+ struct kref ref;
+};
+
+/*
+ * For platform functions that are interrupts, one can register
+ * irq_client structures. You canNOT use the same structure twice
+ * as it contains a link member. Also, the callback is called with
+ * a spinlock held, you must not call back into any of the pmf_* functions
+ * from within that callback
+ */
+struct pmf_irq_client {
+ void (*handler)(void *data);
+ void *data;
+ struct module *owner;
+ struct list_head link;
+ struct pmf_function *func;
+};
+
+
+/*
+ * Register/Unregister a function-capable driver and its handlers
+ */
+extern int pmf_register_driver(struct device_node *np,
+ struct pmf_handlers *handlers,
+ void *driverdata);
+
+extern void pmf_unregister_driver(struct device_node *np);
+
+
+/*
+ * Register/Unregister interrupt clients
+ */
+extern int pmf_register_irq_client(struct device_node *np,
+ const char *name,
+ struct pmf_irq_client *client);
+
+extern void pmf_unregister_irq_client(struct pmf_irq_client *client);
+
+/*
+ * Called by the handlers when an irq happens
+ */
+extern void pmf_do_irq(struct pmf_function *func);
+
+
+/*
+ * Low level call to platform functions.
+ *
+ * The phandle can filter on the target object for functions that have
+ * multiple targets, the flags allow you to restrict the call to a given
+ * combination of flags.
+ *
+ * The args array contains as many arguments as is required by the function,
+ * this is dependent on the function you are calling, unfortunately Apple
+ * mechanism provides no way to encode that so you have to get it right at
+ * the call site. Some functions require no args, in which case, you can
+ * pass NULL.
+ *
+ * You can also pass NULL to the name. This will match any function that has
+ * the appropriate combination of flags & phandle or you can pass 0 to the
+ * phandle to match any
+ */
+extern int pmf_do_functions(struct device_node *np, const char *name,
+ u32 phandle, u32 flags, struct pmf_args *args);
+
+
+
+/*
+ * High level call to a platform function.
+ *
+ * This one looks for the platform-xxx first so you should call it to the
+ * actual target if any. It will fallback to platform-do-xxx if it can't
+ * find one. It will also exclusively target functions that have
+ * the "OnDemand" flag.
+ */
+
+extern int pmf_call_function(struct device_node *target, const char *name,
+ struct pmf_args *args);
+
+
+/*
+ * For low latency interrupt usage, you can lookup for on-demand functions
+ * using the functions below
+ */
+
+extern struct pmf_function *pmf_find_function(struct device_node *target,
+ const char *name);
+
+extern struct pmf_function * pmf_get_function(struct pmf_function *func);
+extern void pmf_put_function(struct pmf_function *func);
+
+extern int pmf_call_one(struct pmf_function *func, struct pmf_args *args);
+
+int pmac_pfunc_base_install(void);
+
+/* Suspend/resume code called by via-pmu directly for now */
+extern void pmac_pfunc_base_suspend(void);
+extern void pmac_pfunc_base_resume(void);
+
+#endif /* __PMAC_PFUNC_H__ */
diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
new file mode 100644
index 000000000..3c09109e7
--- /dev/null
+++ b/arch/powerpc/include/asm/pmc.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * pmc.h
+ * Copyright (C) 2004 David Gibson, IBM Corporation
+ */
+#ifndef _POWERPC_PMC_H
+#define _POWERPC_PMC_H
+#ifdef __KERNEL__
+
+#include <asm/ptrace.h>
+
+typedef void (*perf_irq_t)(struct pt_regs *);
+extern perf_irq_t perf_irq;
+
+int reserve_pmc_hardware(perf_irq_t new_perf_irq);
+void release_pmc_hardware(void);
+void ppc_enable_pmcs(void);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/lppaca.h>
+#include <asm/firmware.h>
+
+static inline void ppc_set_pmu_inuse(int inuse)
+{
+#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
+#ifdef CONFIG_PPC_PSERIES
+ get_lppaca()->pmcregs_in_use = inuse;
+#endif
+ }
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ get_paca()->pmcregs_in_use = inuse;
+#endif
+#endif
+}
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+static inline int ppc_get_pmu_inuse(void)
+{
+ return get_paca()->pmcregs_in_use;
+}
+#endif
+
+extern void power4_enable_pmcs(void);
+
+#else /* CONFIG_PPC64 */
+
+static inline void ppc_set_pmu_inuse(int inuse) { }
+
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_PMC_H */
diff --git a/arch/powerpc/include/asm/pmi.h b/arch/powerpc/include/asm/pmi.h
new file mode 100644
index 000000000..478f0a2fe
--- /dev/null
+++ b/arch/powerpc/include/asm/pmi.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _POWERPC_PMI_H
+#define _POWERPC_PMI_H
+
+/*
+ * Definitions for talking with PMI device on PowerPC
+ *
+ * PMI (Platform Management Interrupt) is a way to communicate
+ * with the BMC (Baseboard Management Controller) via interrupts.
+ * Unlike IPMI it is bidirectional and has a low latency.
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ */
+
+#ifdef __KERNEL__
+
+#define PMI_TYPE_FREQ_CHANGE 0x01
+#define PMI_TYPE_POWER_BUTTON 0x02
+#define PMI_READ_TYPE 0
+#define PMI_READ_DATA0 1
+#define PMI_READ_DATA1 2
+#define PMI_READ_DATA2 3
+#define PMI_WRITE_TYPE 4
+#define PMI_WRITE_DATA0 5
+#define PMI_WRITE_DATA1 6
+#define PMI_WRITE_DATA2 7
+
+#define PMI_ACK 0x80
+
+#define PMI_TIMEOUT 100
+
+typedef struct {
+ u8 type;
+ u8 data0;
+ u8 data1;
+ u8 data2;
+} pmi_message_t;
+
+struct pmi_handler {
+ struct list_head node;
+ u8 type;
+ void (*handle_pmi_message) (pmi_message_t);
+};
+
+int pmi_register_handler(struct pmi_handler *);
+void pmi_unregister_handler(struct pmi_handler *);
+
+int pmi_send_message(pmi_message_t);
+
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_PMI_H */
diff --git a/arch/powerpc/include/asm/pnv-ocxl.h b/arch/powerpc/include/asm/pnv-ocxl.h
new file mode 100644
index 000000000..9acd1fbf1
--- /dev/null
+++ b/arch/powerpc/include/asm/pnv-ocxl.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright 2017 IBM Corp.
+#ifndef _ASM_PNV_OCXL_H
+#define _ASM_PNV_OCXL_H
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+
+#define PNV_OCXL_TL_MAX_TEMPLATE 63
+#define PNV_OCXL_TL_BITS_PER_RATE 4
+#define PNV_OCXL_TL_RATE_BUF_SIZE ((PNV_OCXL_TL_MAX_TEMPLATE+1) * PNV_OCXL_TL_BITS_PER_RATE / 8)
+
+#define PNV_OCXL_ATSD_TIMEOUT 1
+
+/* TLB Management Instructions */
+#define PNV_OCXL_ATSD_LNCH 0x00
+/* Radix Invalidate */
+#define PNV_OCXL_ATSD_LNCH_R PPC_BIT(0)
+/* Radix Invalidation Control
+ * 0b00 Just invalidate TLB.
+ * 0b01 Invalidate just Page Walk Cache.
+ * 0b10 Invalidate TLB, Page Walk Cache, and any
+ * caching of Partition and Process Table Entries.
+ */
+#define PNV_OCXL_ATSD_LNCH_RIC PPC_BITMASK(1, 2)
+/* Number and Page Size of translations to be invalidated */
+#define PNV_OCXL_ATSD_LNCH_LP PPC_BITMASK(3, 10)
+/* Invalidation Criteria
+ * 0b00 Invalidate just the target VA.
+ * 0b01 Invalidate matching PID.
+ */
+#define PNV_OCXL_ATSD_LNCH_IS PPC_BITMASK(11, 12)
+/* 0b1: Process Scope, 0b0: Partition Scope */
+#define PNV_OCXL_ATSD_LNCH_PRS PPC_BIT(13)
+/* Invalidation Flag */
+#define PNV_OCXL_ATSD_LNCH_B PPC_BIT(14)
+/* Actual Page Size to be invalidated
+ * 000 4KB
+ * 101 64KB
+ * 001 2MB
+ * 010 1GB
+ */
+#define PNV_OCXL_ATSD_LNCH_AP PPC_BITMASK(15, 17)
+/* Defines the large page select
+ * L=0b0 for 4KB pages
+ * L=0b1 for large pages)
+ */
+#define PNV_OCXL_ATSD_LNCH_L PPC_BIT(18)
+/* Process ID */
+#define PNV_OCXL_ATSD_LNCH_PID PPC_BITMASK(19, 38)
+/* NoFlush – Assumed to be 0b0 */
+#define PNV_OCXL_ATSD_LNCH_F PPC_BIT(39)
+#define PNV_OCXL_ATSD_LNCH_OCAPI_SLBI PPC_BIT(40)
+#define PNV_OCXL_ATSD_LNCH_OCAPI_SINGLETON PPC_BIT(41)
+#define PNV_OCXL_ATSD_AVA 0x08
+#define PNV_OCXL_ATSD_AVA_AVA PPC_BITMASK(0, 51)
+#define PNV_OCXL_ATSD_STAT 0x10
+
+int pnv_ocxl_get_actag(struct pci_dev *dev, u16 *base, u16 *enabled, u16 *supported);
+int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count);
+
+int pnv_ocxl_get_tl_cap(struct pci_dev *dev, long *cap,
+ char *rate_buf, int rate_buf_size);
+int pnv_ocxl_set_tl_conf(struct pci_dev *dev, long cap,
+ uint64_t rate_buf_phys, int rate_buf_size);
+
+int pnv_ocxl_get_xsl_irq(struct pci_dev *dev, int *hwirq);
+void pnv_ocxl_unmap_xsl_regs(void __iomem *dsisr, void __iomem *dar,
+ void __iomem *tfc, void __iomem *pe_handle);
+int pnv_ocxl_map_xsl_regs(struct pci_dev *dev, void __iomem **dsisr,
+ void __iomem **dar, void __iomem **tfc,
+ void __iomem **pe_handle);
+
+int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask, void **platform_data);
+void pnv_ocxl_spa_release(void *platform_data);
+int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle);
+
+int pnv_ocxl_map_lpar(struct pci_dev *dev, uint64_t lparid,
+ uint64_t lpcr, void __iomem **arva);
+void pnv_ocxl_unmap_lpar(void __iomem *arva);
+void pnv_ocxl_tlb_invalidate(void __iomem *arva,
+ unsigned long pid,
+ unsigned long addr,
+ unsigned long page_size);
+#endif /* _ASM_PNV_OCXL_H */
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
new file mode 100644
index 000000000..8afc92860
--- /dev/null
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2014 IBM Corp.
+ */
+
+#ifndef _ASM_PNV_PCI_H
+#define _ASM_PNV_PCI_H
+
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <misc/cxl-base.h>
+#include <asm/opal-api.h>
+
+#define PCI_SLOT_ID_PREFIX (1UL << 63)
+#define PCI_SLOT_ID(phb_id, bdfn) \
+ (PCI_SLOT_ID_PREFIX | ((uint64_t)(bdfn) << 16) | (phb_id))
+#define PCI_PHB_SLOT_ID(phb_id) (phb_id)
+
+extern int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id);
+extern int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len);
+extern int pnv_pci_get_presence_state(uint64_t id, uint8_t *state);
+extern int pnv_pci_get_power_state(uint64_t id, uint8_t *state);
+extern int pnv_pci_set_power_state(uint64_t id, uint8_t state,
+ struct opal_msg *msg);
+
+extern int pnv_pci_set_tunnel_bar(struct pci_dev *dev, uint64_t addr,
+ int enable);
+int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode);
+int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
+ unsigned int virq);
+int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num);
+void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num);
+int pnv_cxl_get_irq_count(struct pci_dev *dev);
+struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev);
+int64_t pnv_opal_pci_msi_eoi(struct irq_data *d);
+bool is_pnv_opal_msi(struct irq_chip *chip);
+
+#ifdef CONFIG_CXL_BASE
+int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
+ struct pci_dev *dev, int num);
+void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
+ struct pci_dev *dev);
+#endif
+
+struct pnv_php_slot {
+ struct hotplug_slot slot;
+ uint64_t id;
+ char *name;
+ int slot_no;
+ unsigned int flags;
+#define PNV_PHP_FLAG_BROKEN_PDC 0x1
+ struct kref kref;
+#define PNV_PHP_STATE_INITIALIZED 0
+#define PNV_PHP_STATE_REGISTERED 1
+#define PNV_PHP_STATE_POPULATED 2
+#define PNV_PHP_STATE_OFFLINE 3
+ int state;
+ int irq;
+ struct workqueue_struct *wq;
+ struct device_node *dn;
+ struct pci_dev *pdev;
+ struct pci_bus *bus;
+ bool power_state_check;
+ u8 attention_state;
+ void *fdt;
+ void *dt;
+ struct of_changeset ocs;
+ struct pnv_php_slot *parent;
+ struct list_head children;
+ struct list_head link;
+};
+extern struct pnv_php_slot *pnv_php_find_slot(struct device_node *dn);
+extern int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
+ uint8_t state);
+
+#endif
diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h
new file mode 100644
index 000000000..e1a858718
--- /dev/null
+++ b/arch/powerpc/include/asm/powernv.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2017 IBM Corp.
+ */
+
+#ifndef _ASM_POWERNV_H
+#define _ASM_POWERNV_H
+
+#ifdef CONFIG_PPC_POWERNV
+extern void powernv_set_nmmu_ptcr(unsigned long ptcr);
+
+void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val);
+
+void pnv_tm_init(void);
+#else
+static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { }
+
+static inline void pnv_tm_init(void) { }
+#endif
+
+#endif /* _ASM_POWERNV_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
new file mode 100644
index 000000000..21e33e46f
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -0,0 +1,690 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2009 Freescale Semiconductor, Inc.
+ *
+ * provides masks and opcode images for use by code generation, emulation
+ * and for instructions that older assemblers might not know about
+ */
+#ifndef _ASM_POWERPC_PPC_OPCODE_H
+#define _ASM_POWERPC_PPC_OPCODE_H
+
+#include <asm/asm-const.h>
+
+#define __REG_R0 0
+#define __REG_R1 1
+#define __REG_R2 2
+#define __REG_R3 3
+#define __REG_R4 4
+#define __REG_R5 5
+#define __REG_R6 6
+#define __REG_R7 7
+#define __REG_R8 8
+#define __REG_R9 9
+#define __REG_R10 10
+#define __REG_R11 11
+#define __REG_R12 12
+#define __REG_R13 13
+#define __REG_R14 14
+#define __REG_R15 15
+#define __REG_R16 16
+#define __REG_R17 17
+#define __REG_R18 18
+#define __REG_R19 19
+#define __REG_R20 20
+#define __REG_R21 21
+#define __REG_R22 22
+#define __REG_R23 23
+#define __REG_R24 24
+#define __REG_R25 25
+#define __REG_R26 26
+#define __REG_R27 27
+#define __REG_R28 28
+#define __REG_R29 29
+#define __REG_R30 30
+#define __REG_R31 31
+
+#define __REGA0_0 0
+#define __REGA0_R1 1
+#define __REGA0_R2 2
+#define __REGA0_R3 3
+#define __REGA0_R4 4
+#define __REGA0_R5 5
+#define __REGA0_R6 6
+#define __REGA0_R7 7
+#define __REGA0_R8 8
+#define __REGA0_R9 9
+#define __REGA0_R10 10
+#define __REGA0_R11 11
+#define __REGA0_R12 12
+#define __REGA0_R13 13
+#define __REGA0_R14 14
+#define __REGA0_R15 15
+#define __REGA0_R16 16
+#define __REGA0_R17 17
+#define __REGA0_R18 18
+#define __REGA0_R19 19
+#define __REGA0_R20 20
+#define __REGA0_R21 21
+#define __REGA0_R22 22
+#define __REGA0_R23 23
+#define __REGA0_R24 24
+#define __REGA0_R25 25
+#define __REGA0_R26 26
+#define __REGA0_R27 27
+#define __REGA0_R28 28
+#define __REGA0_R29 29
+#define __REGA0_R30 30
+#define __REGA0_R31 31
+
+/* For use with PPC_RAW_() macros */
+#define _R0 0
+#define _R1 1
+#define _R2 2
+#define _R3 3
+#define _R4 4
+#define _R5 5
+#define _R6 6
+#define _R7 7
+#define _R8 8
+#define _R9 9
+#define _R10 10
+#define _R11 11
+#define _R12 12
+#define _R13 13
+#define _R14 14
+#define _R15 15
+#define _R16 16
+#define _R17 17
+#define _R18 18
+#define _R19 19
+#define _R20 20
+#define _R21 21
+#define _R22 22
+#define _R23 23
+#define _R24 24
+#define _R25 25
+#define _R26 26
+#define _R27 27
+#define _R28 28
+#define _R29 29
+#define _R30 30
+#define _R31 31
+
+#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
+#define IMM_DS(i) ((uintptr_t)(i) & 0xfffc)
+#define IMM_DQ(i) ((uintptr_t)(i) & 0xfff0)
+#define IMM_D0(i) (((uintptr_t)(i) >> 16) & 0x3ffff)
+#define IMM_D1(i) IMM_L(i)
+
+/*
+ * 16-bit immediate helper macros: HA() is for use with sign-extending instrs
+ * (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the
+ * top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000).
+ */
+#define IMM_H(i) ((uintptr_t)(i)>>16)
+#define IMM_HA(i) (((uintptr_t)(i)>>16) + \
+ (((uintptr_t)(i) & 0x8000) >> 15))
+
+
+/* opcode and xopcode for instructions */
+#define OP_PREFIX 1
+#define OP_TRAP_64 2
+#define OP_TRAP 3
+#define OP_SC 17
+#define OP_19 19
+#define OP_31 31
+#define OP_LWZ 32
+#define OP_LWZU 33
+#define OP_LBZ 34
+#define OP_LBZU 35
+#define OP_STW 36
+#define OP_STWU 37
+#define OP_STB 38
+#define OP_STBU 39
+#define OP_LHZ 40
+#define OP_LHZU 41
+#define OP_LHA 42
+#define OP_LHAU 43
+#define OP_STH 44
+#define OP_STHU 45
+#define OP_LMW 46
+#define OP_STMW 47
+#define OP_LFS 48
+#define OP_LFSU 49
+#define OP_LFD 50
+#define OP_LFDU 51
+#define OP_STFS 52
+#define OP_STFSU 53
+#define OP_STFD 54
+#define OP_STFDU 55
+#define OP_LQ 56
+#define OP_LD 58
+#define OP_STD 62
+
+#define OP_19_XOP_RFID 18
+#define OP_19_XOP_RFMCI 38
+#define OP_19_XOP_RFDI 39
+#define OP_19_XOP_RFI 50
+#define OP_19_XOP_RFCI 51
+#define OP_19_XOP_RFSCV 82
+#define OP_19_XOP_HRFID 274
+#define OP_19_XOP_URFID 306
+#define OP_19_XOP_STOP 370
+#define OP_19_XOP_DOZE 402
+#define OP_19_XOP_NAP 434
+#define OP_19_XOP_SLEEP 466
+#define OP_19_XOP_RVWINKLE 498
+
+#define OP_31_XOP_TRAP 4
+#define OP_31_XOP_LDX 21
+#define OP_31_XOP_LWZX 23
+#define OP_31_XOP_LDUX 53
+#define OP_31_XOP_DCBST 54
+#define OP_31_XOP_LWZUX 55
+#define OP_31_XOP_TRAP_64 68
+#define OP_31_XOP_DCBF 86
+#define OP_31_XOP_LBZX 87
+#define OP_31_XOP_STDX 149
+#define OP_31_XOP_STWX 151
+#define OP_31_XOP_STDUX 181
+#define OP_31_XOP_STWUX 183
+#define OP_31_XOP_STBX 215
+#define OP_31_XOP_LBZUX 119
+#define OP_31_XOP_STBUX 247
+#define OP_31_XOP_LHZX 279
+#define OP_31_XOP_LHZUX 311
+#define OP_31_XOP_MSGSNDP 142
+#define OP_31_XOP_MSGCLRP 174
+#define OP_31_XOP_MTMSR 146
+#define OP_31_XOP_MTMSRD 178
+#define OP_31_XOP_TLBIE 306
+#define OP_31_XOP_MFSPR 339
+#define OP_31_XOP_LWAX 341
+#define OP_31_XOP_LHAX 343
+#define OP_31_XOP_LWAUX 373
+#define OP_31_XOP_LHAUX 375
+#define OP_31_XOP_STHX 407
+#define OP_31_XOP_STHUX 439
+#define OP_31_XOP_MTSPR 467
+#define OP_31_XOP_DCBI 470
+#define OP_31_XOP_LDBRX 532
+#define OP_31_XOP_LWBRX 534
+#define OP_31_XOP_TLBSYNC 566
+#define OP_31_XOP_STDBRX 660
+#define OP_31_XOP_STWBRX 662
+#define OP_31_XOP_STFSX 663
+#define OP_31_XOP_STFSUX 695
+#define OP_31_XOP_STFDX 727
+#define OP_31_XOP_STFDUX 759
+#define OP_31_XOP_LHBRX 790
+#define OP_31_XOP_LFIWAX 855
+#define OP_31_XOP_LFIWZX 887
+#define OP_31_XOP_STHBRX 918
+#define OP_31_XOP_STFIWX 983
+
+/* VSX Scalar Load Instructions */
+#define OP_31_XOP_LXSDX 588
+#define OP_31_XOP_LXSSPX 524
+#define OP_31_XOP_LXSIWAX 76
+#define OP_31_XOP_LXSIWZX 12
+
+/* VSX Scalar Store Instructions */
+#define OP_31_XOP_STXSDX 716
+#define OP_31_XOP_STXSSPX 652
+#define OP_31_XOP_STXSIWX 140
+
+/* VSX Vector Load Instructions */
+#define OP_31_XOP_LXVD2X 844
+#define OP_31_XOP_LXVW4X 780
+
+/* VSX Vector Load and Splat Instruction */
+#define OP_31_XOP_LXVDSX 332
+
+/* VSX Vector Store Instructions */
+#define OP_31_XOP_STXVD2X 972
+#define OP_31_XOP_STXVW4X 908
+
+#define OP_31_XOP_LFSX 535
+#define OP_31_XOP_LFSUX 567
+#define OP_31_XOP_LFDX 599
+#define OP_31_XOP_LFDUX 631
+
+/* VMX Vector Load Instructions */
+#define OP_31_XOP_LVX 103
+
+/* VMX Vector Store Instructions */
+#define OP_31_XOP_STVX 231
+
+/* sorted alphabetically */
+#define PPC_INST_BCCTR_FLUSH 0x4c400420
+#define PPC_INST_COPY 0x7c20060c
+#define PPC_INST_DCBA 0x7c0005ec
+#define PPC_INST_DCBA_MASK 0xfc0007fe
+#define PPC_INST_DSSALL 0x7e00066c
+#define PPC_INST_ISEL 0x7c00001e
+#define PPC_INST_ISEL_MASK 0xfc00003e
+#define PPC_INST_LSWI 0x7c0004aa
+#define PPC_INST_LSWX 0x7c00042a
+#define PPC_INST_LWSYNC 0x7c2004ac
+#define PPC_INST_SYNC 0x7c0004ac
+#define PPC_INST_SYNC_MASK 0xfc0007fe
+#define PPC_INST_MCRXR 0x7c000400
+#define PPC_INST_MCRXR_MASK 0xfc0007fe
+#define PPC_INST_MFSPR_PVR 0x7c1f42a6
+#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
+#define PPC_INST_MTMSRD 0x7c000164
+#define PPC_INST_PASTE 0x7c20070d
+#define PPC_INST_PASTE_MASK 0xfc2007ff
+#define PPC_INST_POPCNTB 0x7c0000f4
+#define PPC_INST_POPCNTB_MASK 0xfc0007fe
+#define PPC_INST_RFEBB 0x4c000124
+#define PPC_INST_RFID 0x4c000024
+#define PPC_INST_MFSPR_DSCR 0x7c1102a6
+#define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe
+#define PPC_INST_MTSPR_DSCR 0x7c1103a6
+#define PPC_INST_MTSPR_DSCR_MASK 0xfc1ffffe
+#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
+#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
+#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
+#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
+#define PPC_INST_STRING 0x7c00042a
+#define PPC_INST_STRING_MASK 0xfc0007fe
+#define PPC_INST_STRING_GEN_MASK 0xfc00067e
+#define PPC_INST_STSWI 0x7c0005aa
+#define PPC_INST_STSWX 0x7c00052a
+#define PPC_INST_TRECHKPT 0x7c0007dd
+#define PPC_INST_TRECLAIM 0x7c00075d
+#define PPC_INST_TSR 0x7c0005dd
+#define PPC_INST_BRANCH_COND 0x40800000
+
+/* Prefixes */
+#define PPC_INST_LFS 0xc0000000
+#define PPC_INST_STFS 0xd0000000
+#define PPC_INST_LFD 0xc8000000
+#define PPC_INST_STFD 0xd8000000
+#define PPC_PREFIX_MLS 0x06000000
+#define PPC_PREFIX_8LS 0x04000000
+
+/* Prefixed instructions */
+#define PPC_INST_PLD 0xe4000000
+#define PPC_INST_PSTD 0xf4000000
+
+/* macros to insert fields into opcodes */
+#define ___PPC_RA(a) (((a) & 0x1f) << 16)
+#define ___PPC_RB(b) (((b) & 0x1f) << 11)
+#define ___PPC_RC(c) (((c) & 0x1f) << 6)
+#define ___PPC_RS(s) (((s) & 0x1f) << 21)
+#define ___PPC_RT(t) ___PPC_RS(t)
+#define ___PPC_R(r) (((r) & 0x1) << 16)
+#define ___PPC_PRS(prs) (((prs) & 0x1) << 17)
+#define ___PPC_RIC(ric) (((ric) & 0x3) << 18)
+#define __PPC_RA(a) ___PPC_RA(__REG_##a)
+#define __PPC_RA0(a) ___PPC_RA(__REGA0_##a)
+#define __PPC_RB(b) ___PPC_RB(__REG_##b)
+#define __PPC_RS(s) ___PPC_RS(__REG_##s)
+#define __PPC_RT(t) ___PPC_RT(__REG_##t)
+#define __PPC_XA(a) ((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3))
+#define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4))
+#define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5))
+#define __PPC_XT(s) __PPC_XS(s)
+#define __PPC_XSP(s) ((((s) & 0x1e) | (((s) >> 5) & 0x1)) << 21)
+#define __PPC_XTP(s) __PPC_XSP(s)
+#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
+#define __PPC_PL(p) (((p) & 0x3) << 16)
+#define __PPC_WC(w) (((w) & 0x3) << 21)
+#define __PPC_WS(w) (((w) & 0x1f) << 11)
+#define __PPC_SH(s) __PPC_WS(s)
+#define __PPC_SH64(s) (__PPC_SH(s) | (((s) & 0x20) >> 4))
+#define __PPC_MB(s) ___PPC_RC(s)
+#define __PPC_ME(s) (((s) & 0x1f) << 1)
+#define __PPC_MB64(s) (__PPC_MB(s) | ((s) & 0x20))
+#define __PPC_ME64(s) __PPC_MB64(s)
+#define __PPC_BI(s) (((s) & 0x1f) << 16)
+#define __PPC_CT(t) (((t) & 0x0f) << 21)
+#define __PPC_SPR(r) ((((r) & 0x1f) << 16) | ((((r) >> 5) & 0x1f) << 11))
+#define __PPC_RC21 (0x1 << 10)
+#define __PPC_PRFX_R(r) (((r) & 0x1) << 20)
+#define __PPC_EH(eh) (((eh) & 0x1) << 0)
+
+/*
+ * Both low and high 16 bits are added as SIGNED additions, so if low 16 bits
+ * has high bit set, high 16 bits must be adjusted. These macros do that (stolen
+ * from binutils).
+ */
+#define PPC_LO(v) ((v) & 0xffff)
+#define PPC_HI(v) (((v) >> 16) & 0xffff)
+#define PPC_HA(v) PPC_HI((v) + 0x8000)
+#define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
+#define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
+
+/* LI Field */
+#define PPC_LI_MASK 0x03fffffc
+#define PPC_LI(v) ((v) & PPC_LI_MASK)
+
+/* Base instruction encoding */
+#define PPC_RAW_CP_ABORT (0x7c00068c)
+#define PPC_RAW_COPY(a, b) (PPC_INST_COPY | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_DARN(t, l) (0x7c0005e6 | ___PPC_RT(t) | (((l) & 0x3) << 16))
+#define PPC_RAW_DCBAL(a, b) (0x7c2005ec | __PPC_RA(a) | __PPC_RB(b))
+#define PPC_RAW_DCBZL(a, b) (0x7c2007ec | __PPC_RA(a) | __PPC_RB(b))
+#define PPC_RAW_LQARX(t, a, b, eh) (0x7c000228 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh))
+#define PPC_RAW_LDARX(t, a, b, eh) (0x7c0000a8 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh))
+#define PPC_RAW_LWARX(t, a, b, eh) (0x7c000028 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh))
+#define PPC_RAW_PHWSYNC (0x7c8004ac)
+#define PPC_RAW_PLWSYNC (0x7ca004ac)
+#define PPC_RAW_STQCX(t, a, b) (0x7c00016d | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_MADDHD(t, a, b, c) (0x10000030 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c))
+#define PPC_RAW_MADDHDU(t, a, b, c) (0x10000031 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c))
+#define PPC_RAW_MADDLD(t, a, b, c) (0x10000033 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c))
+#define PPC_RAW_MSGSND(b) (0x7c00019c | ___PPC_RB(b))
+#define PPC_RAW_MSGSYNC (0x7c0006ec)
+#define PPC_RAW_MSGCLR(b) (0x7c0001dc | ___PPC_RB(b))
+#define PPC_RAW_MSGSNDP(b) (0x7c00011c | ___PPC_RB(b))
+#define PPC_RAW_MSGCLRP(b) (0x7c00015c | ___PPC_RB(b))
+#define PPC_RAW_PASTE(a, b) (0x7c20070d | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_POPCNTB(a, s) (PPC_INST_POPCNTB | __PPC_RA(a) | __PPC_RS(s))
+#define PPC_RAW_POPCNTD(a, s) (0x7c0003f4 | __PPC_RA(a) | __PPC_RS(s))
+#define PPC_RAW_POPCNTW(a, s) (0x7c0002f4 | __PPC_RA(a) | __PPC_RS(s))
+#define PPC_RAW_RFCI (0x4c000066)
+#define PPC_RAW_RFDI (0x4c00004e)
+#define PPC_RAW_RFMCI (0x4c00004c)
+#define PPC_RAW_TLBILX(t, a, b) (0x7c000024 | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_WAIT_v203 (0x7c00007c)
+#define PPC_RAW_WAIT(w, p) (0x7c00003c | __PPC_WC(w) | __PPC_PL(p))
+#define PPC_RAW_TLBIE(lp, a) (0x7c000264 | ___PPC_RB(a) | ___PPC_RS(lp))
+#define PPC_RAW_TLBIE_5(rb, rs, ric, prs, r) \
+ (0x7c000264 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r))
+#define PPC_RAW_TLBIEL(rb, rs, ric, prs, r) \
+ (0x7c000224 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r))
+#define PPC_RAW_TLBIEL_v205(rb, l) (0x7c000224 | ___PPC_RB(rb) | (l << 21))
+#define PPC_RAW_TLBSRX_DOT(a, b) (0x7c0006a5 | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_TLBIVAX(a, b) (0x7c000624 | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_ERATWE(s, a, w) (0x7c0001a6 | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
+#define PPC_RAW_ERATRE(s, a, w) (0x7c000166 | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
+#define PPC_RAW_ERATILX(t, a, b) (0x7c000066 | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_ERATIVAX(s, a, b) (0x7c000666 | __PPC_RS(s) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_ERATSX(t, a, w) (0x7c000126 | __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_ERATSX_DOT(t, a, w) (0x7c000127 | __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_SLBFEE_DOT(t, b) (0x7c0007a7 | __PPC_RT(t) | __PPC_RB(b))
+#define __PPC_RAW_SLBFEE_DOT(t, b) (0x7c0007a7 | ___PPC_RT(t) | ___PPC_RB(b))
+#define PPC_RAW_ICBT(c, a, b) (0x7c00002c | __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_LBZCIX(t, a, b) (0x7c0006aa | __PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b))
+#define PPC_RAW_STBCIX(s, a, b) (0x7c0007aa | __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
+#define PPC_RAW_DCBFPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (4 << 21))
+#define PPC_RAW_DCBSTPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (6 << 21))
+#define PPC_RAW_SC() (0x44000002)
+#define PPC_RAW_SYNC() (0x7c0004ac)
+#define PPC_RAW_ISYNC() (0x4c00012c)
+
+/*
+ * Define what the VSX XX1 form instructions will look like, then add
+ * the 128 bit load store instructions based on that.
+ */
+#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b))
+#define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b))
+#define PPC_RAW_STXVD2X(s, a, b) (0x7c000798 | VSX_XX1((s), a, b))
+#define PPC_RAW_LXVD2X(s, a, b) (0x7c000698 | VSX_XX1((s), a, b))
+#define PPC_RAW_MFVRD(a, t) (0x7c000066 | VSX_XX1((t) + 32, a, R0))
+#define PPC_RAW_MTVRD(t, a) (0x7c000166 | VSX_XX1((t) + 32, a, R0))
+#define PPC_RAW_VPMSUMW(t, a, b) (0x10000488 | VSX_XX3((t), a, b))
+#define PPC_RAW_VPMSUMD(t, a, b) (0x100004c8 | VSX_XX3((t), a, b))
+#define PPC_RAW_XXLOR(t, a, b) (0xf0000490 | VSX_XX3((t), a, b))
+#define PPC_RAW_XXSWAPD(t, a) (0xf0000250 | VSX_XX3((t), a, a))
+#define PPC_RAW_XVCPSGNDP(t, a, b) ((0xf0000780 | VSX_XX3((t), (a), (b))))
+#define PPC_RAW_VPERMXOR(vrt, vra, vrb, vrc) \
+ ((0x1000002d | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | (((vrc) & 0x1f) << 6)))
+#define PPC_RAW_LXVP(xtp, a, i) (0x18000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_DQ(i))
+#define PPC_RAW_STXVP(xsp, a, i) (0x18000001 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_DQ(i))
+#define PPC_RAW_LXVPX(xtp, a, b) (0x7c00029a | __PPC_XTP(xtp) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_STXVPX(xsp, a, b) (0x7c00039a | __PPC_XSP(xsp) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_PLXVP_P(xtp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
+#define PPC_RAW_PLXVP_S(xtp, i, a, pr) (0xe8000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_D1(i))
+#define PPC_RAW_PSTXVP_P(xsp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
+#define PPC_RAW_PSTXVP_S(xsp, i, a, pr) (0xf8000000 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_D1(i))
+#define PPC_RAW_NAP (0x4c000364)
+#define PPC_RAW_SLEEP (0x4c0003a4)
+#define PPC_RAW_WINKLE (0x4c0003e4)
+#define PPC_RAW_STOP (0x4c0002e4)
+#define PPC_RAW_CLRBHRB (0x7c00035c)
+#define PPC_RAW_MFBHRBE(r, n) (0x7c00025c | __PPC_RT(r) | (((n) & 0x3ff) << 11))
+#define PPC_RAW_TRECHKPT (PPC_INST_TRECHKPT)
+#define PPC_RAW_TRECLAIM(r) (PPC_INST_TRECLAIM | __PPC_RA(r))
+#define PPC_RAW_TABORT(r) (0x7c00071d | __PPC_RA(r))
+#define TMRN(x) ((((x) & 0x1f) << 16) | (((x) & 0x3e0) << 6))
+#define PPC_RAW_MTTMR(tmr, r) (0x7c0003dc | TMRN(tmr) | ___PPC_RS(r))
+#define PPC_RAW_MFTMR(tmr, r) (0x7c0002dc | TMRN(tmr) | ___PPC_RT(r))
+#define PPC_RAW_ICSWX(s, a, b) (0x7c00032d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ICSWEPX(s, a, b) (0x7c00076d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_SLBIA(IH) (0x7c0003e4 | (((IH) & 0x7) << 21))
+#define PPC_RAW_VCMPEQUD_RC(vrt, vra, vrb) \
+ (0x100000c7 | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21)
+#define PPC_RAW_VCMPEQUB_RC(vrt, vra, vrb) \
+ (0x10000006 | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21)
+#define PPC_RAW_LD(r, base, i) (0xe8000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_DS(i))
+#define PPC_RAW_LWZ(r, base, i) (0x80000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+#define PPC_RAW_LWZX(t, a, b) (0x7c00002e | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_STD(r, base, i) (0xf8000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_DS(i))
+#define PPC_RAW_STDCX(s, a, b) (0x7c0001ad | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_LFSX(t, a, b) (0x7c00042e | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_STFSX(s, a, b) (0x7c00052e | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_LFDX(t, a, b) (0x7c0004ae | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_STFDX(s, a, b) (0x7c0005ae | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_LVX(t, a, b) (0x7c0000ce | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_STVX(s, a, b) (0x7c0001ce | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ADDE(t, a, b) (0x7c000114 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ADDZE(t, a) (0x7c000194 | ___PPC_RT(t) | ___PPC_RA(a))
+#define PPC_RAW_ADDME(t, a) (0x7c0001d4 | ___PPC_RT(t) | ___PPC_RA(a))
+#define PPC_RAW_ADD(t, a, b) (0x7c000214 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ADD_DOT(t, a, b) (0x7c000214 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
+#define PPC_RAW_ADDC(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ADDC_DOT(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
+#define PPC_RAW_NOP() PPC_RAW_ORI(0, 0, 0)
+#define PPC_RAW_BLR() (0x4e800020)
+#define PPC_RAW_BLRL() (0x4e800021)
+#define PPC_RAW_MTLR(r) (0x7c0803a6 | ___PPC_RT(r))
+#define PPC_RAW_MFLR(t) (0x7c0802a6 | ___PPC_RT(t))
+#define PPC_RAW_BCTR() (0x4e800420)
+#define PPC_RAW_BCTRL() (0x4e800421)
+#define PPC_RAW_MTCTR(r) (0x7c0903a6 | ___PPC_RT(r))
+#define PPC_RAW_ADDI(d, a, i) (0x38000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_LI(r, i) PPC_RAW_ADDI(r, 0, i)
+#define PPC_RAW_ADDIS(d, a, i) (0x3c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_ADDIC(d, a, i) (0x30000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_ADDIC_DOT(d, a, i) (0x34000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_LIS(r, i) PPC_RAW_ADDIS(r, 0, i)
+#define PPC_RAW_STDX(r, base, b) (0x7c00012a | ___PPC_RS(r) | ___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_RAW_STDU(r, base, i) (0xf8000001 | ___PPC_RS(r) | ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_RAW_STW(r, base, i) (0x90000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
+#define PPC_RAW_STWU(r, base, i) (0x94000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
+#define PPC_RAW_STH(r, base, i) (0xb0000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
+#define PPC_RAW_STB(r, base, i) (0x98000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
+#define PPC_RAW_LBZ(r, base, i) (0x88000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+#define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+#define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_RAW_LDBRX(r, base, b) (0x7c000428 | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_RAW_STWCX(s, a, b) (0x7c00012d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_CMPWI(a, i) (0x2c000000 | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_CMPDI(a, i) (0x2c200000 | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_CMPW(a, b) (0x7c000000 | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_CMPD(a, b) (0x7c200000 | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_CMPLWI(a, i) (0x28000000 | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_CMPLDI(a, i) (0x28200000 | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_CMPLW(a, b) (0x7c000040 | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_CMPLD(a, b) (0x7c200040 | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_SUB(d, a, b) (0x7c000050 | ___PPC_RT(d) | ___PPC_RB(a) | ___PPC_RA(b))
+#define PPC_RAW_SUBFC(d, a, b) (0x7c000010 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_SUBFE(d, a, b) (0x7c000110 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_SUBFIC(d, a, i) (0x20000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_SUBFZE(d, a) (0x7c000190 | ___PPC_RT(d) | ___PPC_RA(a))
+#define PPC_RAW_MULD(d, a, b) (0x7c0001d2 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_MULW(d, a, b) (0x7c0001d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_MULHWU(d, a, b) (0x7c000016 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_MULI(d, a, i) (0x1c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_DIVWU(d, a, b) (0x7c000396 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_DIVDU(d, a, b) (0x7c000392 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_DIVDE(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_DIVDE_DOT(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
+#define PPC_RAW_DIVDEU(t, a, b) (0x7c000312 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_DIVDEU_DOT(t, a, b) (0x7c000312 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
+#define PPC_RAW_AND(d, a, b) (0x7c000038 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_RAW_ANDI(d, a, i) (0x70000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_ANDIS(d, a, i) (0x74000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_AND_DOT(d, a, b) (0x7c000039 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_RAW_OR(d, a, b) (0x7c000378 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_RAW_MR(d, a) PPC_RAW_OR(d, a, a)
+#define PPC_RAW_ORI(d, a, i) (0x60000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_ORIS(d, a, i) (0x64000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_NOR(d, a, b) (0x7c0000f8 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_XORIS(d, a, i) (0x6c000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_EXTSW(d, a) (0x7c0007b4 | ___PPC_RA(d) | ___PPC_RS(a))
+#define PPC_RAW_SLW(d, a, s) (0x7c000030 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_RAW_SLD(d, a, s) (0x7c000036 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_RAW_SRW(d, a, s) (0x7c000430 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_RAW_SRAW(d, a, s) (0x7c000630 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_RAW_SRAWI(d, a, i) (0x7c000670 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i))
+#define PPC_RAW_SRD(d, a, s) (0x7c000436 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_RAW_SRAD(d, a, s) (0x7c000634 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_RAW_SRADI(d, a, i) (0x7c000674 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i))
+#define PPC_RAW_RLWINM(d, a, i, mb, me) (0x54000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
+#define PPC_RAW_RLWINM_DOT(d, a, i, mb, me) \
+ (0x54000001 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
+#define PPC_RAW_RLWIMI(d, a, i, mb, me) (0x50000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
+#define PPC_RAW_RLDICL(d, a, i, mb) (0x78000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_MB64(mb))
+#define PPC_RAW_RLDICR(d, a, i, me) (0x78000004 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me))
+
+/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
+#define PPC_RAW_SLWI(d, a, i) PPC_RAW_RLWINM(d, a, i, 0, 31-(i))
+/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */
+#define PPC_RAW_SRWI(d, a, i) PPC_RAW_RLWINM(d, a, 32-(i), i, 31)
+/* sldi = rldicr Rx, Ry, n, 63-n */
+#define PPC_RAW_SLDI(d, a, i) PPC_RAW_RLDICR(d, a, i, 63-(i))
+/* sldi = rldicl Rx, Ry, 64-n, n */
+#define PPC_RAW_SRDI(d, a, i) PPC_RAW_RLDICL(d, a, 64-(i), i)
+
+#define PPC_RAW_NEG(d, a) (0x7c0000d0 | ___PPC_RT(d) | ___PPC_RA(a))
+
+#define PPC_RAW_MFSPR(d, spr) (0x7c0002a6 | ___PPC_RT(d) | __PPC_SPR(spr))
+#define PPC_RAW_MTSPR(spr, d) (0x7c0003a6 | ___PPC_RS(d) | __PPC_SPR(spr))
+#define PPC_RAW_EIEIO() (0x7c0006ac)
+
+#define PPC_RAW_BRANCH(offset) (0x48000000 | PPC_LI(offset))
+#define PPC_RAW_BL(offset) (0x48000001 | PPC_LI(offset))
+#define PPC_RAW_TW(t0, a, b) (0x7c000008 | ___PPC_RS(t0) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_TRAP() PPC_RAW_TW(31, 0, 0)
+#define PPC_RAW_SETB(t, bfa) (0x7c000100 | ___PPC_RT(t) | ___PPC_RA((bfa) << 2))
+
+/* Deal with instructions that older assemblers aren't aware of */
+#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH)
+#define PPC_CP_ABORT stringify_in_c(.long PPC_RAW_CP_ABORT)
+#define PPC_COPY(a, b) stringify_in_c(.long PPC_RAW_COPY(a, b))
+#define PPC_DARN(t, l) stringify_in_c(.long PPC_RAW_DARN(t, l))
+#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_RAW_DCBAL(a, b))
+#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_RAW_DCBZL(a, b))
+#define PPC_DIVDE(t, a, b) stringify_in_c(.long PPC_RAW_DIVDE(t, a, b))
+#define PPC_DIVDEU(t, a, b) stringify_in_c(.long PPC_RAW_DIVDEU(t, a, b))
+#define PPC_DSSALL stringify_in_c(.long PPC_INST_DSSALL)
+#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LQARX(t, a, b, eh))
+#define PPC_STQCX(t, a, b) stringify_in_c(.long PPC_RAW_STQCX(t, a, b))
+#define PPC_MADDHD(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDHD(t, a, b, c))
+#define PPC_MADDHDU(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDHDU(t, a, b, c))
+#define PPC_MADDLD(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDLD(t, a, b, c))
+#define PPC_MSGSND(b) stringify_in_c(.long PPC_RAW_MSGSND(b))
+#define PPC_MSGSYNC stringify_in_c(.long PPC_RAW_MSGSYNC)
+#define PPC_MSGCLR(b) stringify_in_c(.long PPC_RAW_MSGCLR(b))
+#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_RAW_MSGSNDP(b))
+#define PPC_MSGCLRP(b) stringify_in_c(.long PPC_RAW_MSGCLRP(b))
+#define PPC_PASTE(a, b) stringify_in_c(.long PPC_RAW_PASTE(a, b))
+#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_RAW_POPCNTB(a, s))
+#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_RAW_POPCNTD(a, s))
+#define PPC_POPCNTW(a, s) stringify_in_c(.long PPC_RAW_POPCNTW(a, s))
+#define PPC_RFCI stringify_in_c(.long PPC_RAW_RFCI)
+#define PPC_RFDI stringify_in_c(.long PPC_RAW_RFDI)
+#define PPC_RFMCI stringify_in_c(.long PPC_RAW_RFMCI)
+#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_RAW_TLBILX(t, a, b))
+#define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b)
+#define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b)
+#define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b)
+#define PPC_WAIT_v203 stringify_in_c(.long PPC_RAW_WAIT_v203)
+#define PPC_WAIT(w, p) stringify_in_c(.long PPC_RAW_WAIT(w, p))
+#define PPC_TLBIE(lp, a) stringify_in_c(.long PPC_RAW_TLBIE(lp, a))
+#define PPC_TLBIE_5(rb, rs, ric, prs, r) \
+ stringify_in_c(.long PPC_RAW_TLBIE_5(rb, rs, ric, prs, r))
+#define PPC_TLBIEL(rb,rs,ric,prs,r) \
+ stringify_in_c(.long PPC_RAW_TLBIEL(rb, rs, ric, prs, r))
+#define PPC_TLBIEL_v205(rb, l) stringify_in_c(.long PPC_RAW_TLBIEL_v205(rb, l))
+#define PPC_TLBSRX_DOT(a, b) stringify_in_c(.long PPC_RAW_TLBSRX_DOT(a, b))
+#define PPC_TLBIVAX(a, b) stringify_in_c(.long PPC_RAW_TLBIVAX(a, b))
+
+#define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_RAW_ERATWE(s, a, w))
+#define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_RAW_ERATRE(a, a, w))
+#define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_RAW_ERATILX(t, a, b))
+#define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_RAW_ERATIVAX(s, a, b))
+#define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_RAW_ERATSX(t, a, w))
+#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_RAW_ERATSX_DOT(t, a, w))
+#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_RAW_SLBFEE_DOT(t, b))
+#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long __PPC_RAW_SLBFEE_DOT(t, b))
+#define PPC_ICBT(c, a, b) stringify_in_c(.long PPC_RAW_ICBT(c, a, b))
+/* PASemi instructions */
+#define LBZCIX(t, a, b) stringify_in_c(.long PPC_RAW_LBZCIX(t, a, b))
+#define STBCIX(s, a, b) stringify_in_c(.long PPC_RAW_STBCIX(s, a, b))
+#define PPC_DCBFPS(a, b) stringify_in_c(.long PPC_RAW_DCBFPS(a, b))
+#define PPC_DCBSTPS(a, b) stringify_in_c(.long PPC_RAW_DCBSTPS(a, b))
+#define PPC_PHWSYNC stringify_in_c(.long PPC_RAW_PHWSYNC)
+#define PPC_PLWSYNC stringify_in_c(.long PPC_RAW_PLWSYNC)
+#define STXVD2X(s, a, b) stringify_in_c(.long PPC_RAW_STXVD2X(s, a, b))
+#define LXVD2X(s, a, b) stringify_in_c(.long PPC_RAW_LXVD2X(s, a, b))
+#define MFVRD(a, t) stringify_in_c(.long PPC_RAW_MFVRD(a, t))
+#define MTVRD(t, a) stringify_in_c(.long PPC_RAW_MTVRD(t, a))
+#define VPMSUMW(t, a, b) stringify_in_c(.long PPC_RAW_VPMSUMW(t, a, b))
+#define VPMSUMD(t, a, b) stringify_in_c(.long PPC_RAW_VPMSUMD(t, a, b))
+#define XXLOR(t, a, b) stringify_in_c(.long PPC_RAW_XXLOR(t, a, b))
+#define XXSWAPD(t, a) stringify_in_c(.long PPC_RAW_XXSWAPD(t, a))
+#define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_RAW_XVCPSGNDP(t, a, b)))
+
+#define VPERMXOR(vrt, vra, vrb, vrc) \
+ stringify_in_c(.long (PPC_RAW_VPERMXOR(vrt, vra, vrb, vrc)))
+
+#define PPC_NAP stringify_in_c(.long PPC_RAW_NAP)
+#define PPC_SLEEP stringify_in_c(.long PPC_RAW_SLEEP)
+#define PPC_WINKLE stringify_in_c(.long PPC_RAW_WINKLE)
+
+#define PPC_STOP stringify_in_c(.long PPC_RAW_STOP)
+
+/* BHRB instructions */
+#define PPC_CLRBHRB stringify_in_c(.long PPC_RAW_CLRBHRB)
+#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_RAW_MFBHRBE(r, n))
+
+/* Transactional memory instructions */
+#define TRECHKPT stringify_in_c(.long PPC_RAW_TRECHKPT)
+#define TRECLAIM(r) stringify_in_c(.long PPC_RAW_TRECLAIM(r))
+#define TABORT(r) stringify_in_c(.long PPC_RAW_TABORT(r))
+
+/* book3e thread control instructions */
+#define MTTMR(tmr, r) stringify_in_c(.long PPC_RAW_MTTMR(tmr, r))
+#define MFTMR(tmr, r) stringify_in_c(.long PPC_RAW_MFTMR(tmr, r))
+
+/* Coprocessor instructions */
+#define PPC_ICSWX(s, a, b) stringify_in_c(.long PPC_RAW_ICSWX(s, a, b))
+#define PPC_ICSWEPX(s, a, b) stringify_in_c(.long PPC_RAW_ICSWEPX(s, a, b))
+
+#define PPC_SLBIA(IH) stringify_in_c(.long PPC_RAW_SLBIA(IH))
+
+/*
+ * These may only be used on ISA v3.0 or later (aka. CPU_FTR_ARCH_300, radix
+ * implies CPU_FTR_ARCH_300). USER/GUEST invalidates may only be used by radix
+ * mode (on HPT these would also invalidate various SLBEs which may not be
+ * desired).
+ */
+#define PPC_ISA_3_0_INVALIDATE_ERAT PPC_SLBIA(7)
+#define PPC_RADIX_INVALIDATE_ERAT_USER PPC_SLBIA(3)
+#define PPC_RADIX_INVALIDATE_ERAT_GUEST PPC_SLBIA(6)
+
+#define VCMPEQUD_RC(vrt, vra, vrb) stringify_in_c(.long PPC_RAW_VCMPEQUD_RC(vrt, vra, vrb))
+
+#define VCMPEQUB_RC(vrt, vra, vrb) stringify_in_c(.long PPC_RAW_VCMPEQUB_RC(vrt, vra, vrb))
+
+#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
new file mode 100644
index 000000000..f6cf01590
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ */
+#ifndef _ASM_POWERPC_PPC_PCI_H
+#define _ASM_POWERPC_PPC_PCI_H
+#ifdef __KERNEL__
+
+#ifdef CONFIG_PCI
+
+#include <linux/pci.h>
+#include <asm/pci-bridge.h>
+
+extern unsigned long isa_io_base;
+
+extern struct list_head hose_list;
+
+extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */
+
+/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */
+#define BUID_HI(buid) upper_32_bits(buid)
+#define BUID_LO(buid) lower_32_bits(buid)
+
+/* PCI device_node operations */
+struct device_node;
+struct pci_dn;
+
+void *pci_traverse_device_nodes(struct device_node *start,
+ void *(*fn)(struct device_node *, void *),
+ void *data);
+extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
+
+/* From rtas_pci.h */
+extern void init_pci_config_tokens (void);
+extern unsigned long get_phb_buid (struct device_node *);
+extern int rtas_setup_phb(struct pci_controller *phb);
+
+#ifdef CONFIG_EEH
+
+void eeh_addr_cache_insert_dev(struct pci_dev *dev);
+void eeh_addr_cache_rmv_dev(struct pci_dev *dev);
+struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr);
+void eeh_slot_error_detail(struct eeh_pe *pe, int severity);
+int eeh_pci_enable(struct eeh_pe *pe, int function);
+int eeh_pe_reset_full(struct eeh_pe *pe, bool include_passed);
+void eeh_save_bars(struct eeh_dev *edev);
+int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
+int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
+void eeh_pe_state_mark(struct eeh_pe *pe, int state);
+void eeh_pe_mark_isolated(struct eeh_pe *pe);
+void eeh_pe_state_clear(struct eeh_pe *pe, int state, bool include_passed);
+void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state);
+void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode);
+
+void eeh_sysfs_add_device(struct pci_dev *pdev);
+void eeh_sysfs_remove_device(struct pci_dev *pdev);
+
+#endif /* CONFIG_EEH */
+
+#define PCI_BUSNO(bdfn) ((bdfn >> 8) & 0xff)
+
+#else /* CONFIG_PCI */
+static inline void init_pci_config_tokens(void) { }
+#endif /* !CONFIG_PCI */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PPC_PCI_H */
diff --git a/arch/powerpc/include/asm/ppc4xx.h b/arch/powerpc/include/asm/ppc4xx.h
new file mode 100644
index 000000000..b37119e48
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc4xx.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PPC4xx Prototypes and definitions
+ *
+ * Copyright 2008 DENX Software Engineering, Stefan Roese <sr@denx.de>
+ */
+
+#ifndef __ASM_POWERPC_PPC4xx_H__
+#define __ASM_POWERPC_PPC4xx_H__
+
+extern void __noreturn ppc4xx_reset_system(char *cmd);
+
+#endif /* __ASM_POWERPC_PPC4xx_H__ */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
new file mode 100644
index 000000000..753a2757b
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -0,0 +1,821 @@
+/*
+ * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
+ */
+#ifndef _ASM_POWERPC_PPC_ASM_H
+#define _ASM_POWERPC_PPC_ASM_H
+
+#include <linux/stringify.h>
+#include <asm/asm-compat.h>
+#include <asm/processor.h>
+#include <asm/ppc-opcode.h>
+#include <asm/firmware.h>
+#include <asm/feature-fixups.h>
+#include <asm/extable.h>
+
+#ifdef __ASSEMBLY__
+
+#define SZL (BITS_PER_LONG/8)
+
+/*
+ * This expands to a sequence of operations with reg incrementing from
+ * start to end inclusive, of this form:
+ *
+ * op reg, (offset + (width * reg))(base)
+ *
+ * Note that offset is not the offset of the first operation unless start
+ * is zero (or width is zero).
+ */
+.macro OP_REGS op, width, start, end, base, offset
+ .Lreg=\start
+ .rept (\end - \start + 1)
+ \op .Lreg, \offset + \width * .Lreg(\base)
+ .Lreg=.Lreg+1
+ .endr
+.endm
+
+/*
+ * This expands to a sequence of register clears for regs start to end
+ * inclusive, of the form:
+ *
+ * li rN, 0
+ */
+.macro ZEROIZE_REGS start, end
+ .Lreg=\start
+ .rept (\end - \start + 1)
+ li .Lreg, 0
+ .Lreg=.Lreg+1
+ .endr
+.endm
+
+/*
+ * Macros for storing registers into and loading registers from
+ * exception frames.
+ */
+#ifdef __powerpc64__
+#define SAVE_GPRS(start, end, base) OP_REGS std, 8, start, end, base, GPR0
+#define REST_GPRS(start, end, base) OP_REGS ld, 8, start, end, base, GPR0
+#define SAVE_NVGPRS(base) SAVE_GPRS(14, 31, base)
+#define REST_NVGPRS(base) REST_GPRS(14, 31, base)
+#else
+#define SAVE_GPRS(start, end, base) OP_REGS stw, 4, start, end, base, GPR0
+#define REST_GPRS(start, end, base) OP_REGS lwz, 4, start, end, base, GPR0
+#define SAVE_NVGPRS(base) SAVE_GPRS(13, 31, base)
+#define REST_NVGPRS(base) REST_GPRS(13, 31, base)
+#endif
+
+#define ZEROIZE_GPRS(start, end) ZEROIZE_REGS start, end
+#ifdef __powerpc64__
+#define ZEROIZE_NVGPRS() ZEROIZE_GPRS(14, 31)
+#else
+#define ZEROIZE_NVGPRS() ZEROIZE_GPRS(13, 31)
+#endif
+#define ZEROIZE_GPR(n) ZEROIZE_GPRS(n, n)
+
+#define SAVE_GPR(n, base) SAVE_GPRS(n, n, base)
+#define REST_GPR(n, base) REST_GPRS(n, n, base)
+
+#define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base)
+#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
+#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
+#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
+#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
+#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
+#define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base)
+#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
+#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
+#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
+#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
+#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
+
+#define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b
+#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
+#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
+#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
+#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
+#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
+#define REST_VR(n,b,base) li b,16*(n); lvx n,base,b
+#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
+#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
+#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
+#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
+#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
+
+#ifdef __BIG_ENDIAN__
+#define STXVD2X_ROT(n,b,base) STXVD2X(n,b,base)
+#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base)
+#else
+#define STXVD2X_ROT(n,b,base) XXSWAPD(n,n); \
+ STXVD2X(n,b,base); \
+ XXSWAPD(n,n)
+
+#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base); \
+ XXSWAPD(n,n)
+#endif
+/* Save the lower 32 VSRs in the thread VSR region */
+#define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X_ROT(n,R##base,R##b)
+#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
+#define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
+#define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
+#define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
+#define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
+#define REST_VSR(n,b,base) li b,16*(n); LXVD2X_ROT(n,R##base,R##b)
+#define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base)
+#define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
+#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
+#define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
+#define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
+
+/*
+ * b = base register for addressing, o = base offset from register of 1st EVR
+ * n = first EVR, s = scratch
+ */
+#define SAVE_EVR(n,s,b,o) evmergehi s,s,n; stw s,o+4*(n)(b)
+#define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o)
+#define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o)
+#define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o)
+#define SAVE_16EVRS(n,s,b,o) SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o)
+#define SAVE_32EVRS(n,s,b,o) SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o)
+#define REST_EVR(n,s,b,o) lwz s,o+4*(n)(b); evmergelo n,s,n
+#define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o)
+#define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o)
+#define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o)
+#define REST_16EVRS(n,s,b,o) REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o)
+#define REST_32EVRS(n,s,b,o) REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o)
+
+/* Macros to adjust thread priority for hardware multithreading */
+#define HMT_VERY_LOW or 31,31,31 # very low priority
+#define HMT_LOW or 1,1,1
+#define HMT_MEDIUM_LOW or 6,6,6 # medium low priority
+#define HMT_MEDIUM or 2,2,2
+#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority
+#define HMT_HIGH or 3,3,3
+#define HMT_EXTRA_HIGH or 7,7,7 # power7 only
+
+#ifdef CONFIG_PPC64
+#define ULONG_SIZE 8
+#else
+#define ULONG_SIZE 4
+#endif
+#define __VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
+#define VCPU_GPR(n) __VCPU_GPR(__REG_##n)
+
+#ifdef __KERNEL__
+
+/*
+ * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit
+ * version below in the else case of the ifdef.
+ */
+#ifdef __powerpc64__
+
+#define STACKFRAMESIZE 256
+#define __STK_REG(i) (112 + ((i)-14)*8)
+#define STK_REG(i) __STK_REG(__REG_##i)
+
+#ifdef CONFIG_PPC64_ELF_ABI_V2
+#define STK_GOT 24
+#define __STK_PARAM(i) (32 + ((i)-3)*8)
+#else
+#define STK_GOT 40
+#define __STK_PARAM(i) (48 + ((i)-3)*8)
+#endif
+#define STK_PARAM(i) __STK_PARAM(__REG_##i)
+
+#ifdef CONFIG_PPC64_ELF_ABI_V2
+
+#define _GLOBAL(name) \
+ .align 2 ; \
+ .type name,@function; \
+ .globl name; \
+name:
+
+#define _GLOBAL_TOC(name) \
+ .align 2 ; \
+ .type name,@function; \
+ .globl name; \
+name: \
+0: addis r2,r12,(.TOC.-0b)@ha; \
+ addi r2,r2,(.TOC.-0b)@l; \
+ .localentry name,.-name
+
+#define DOTSYM(a) a
+
+#else
+
+#define XGLUE(a,b) a##b
+#define GLUE(a,b) XGLUE(a,b)
+
+#define _GLOBAL(name) \
+ .align 2 ; \
+ .globl name; \
+ .globl GLUE(.,name); \
+ .pushsection ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .popsection; \
+ .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#define _GLOBAL_TOC(name) _GLOBAL(name)
+
+#define DOTSYM(a) GLUE(.,a)
+
+#endif
+
+#else /* 32-bit */
+
+#define _GLOBAL(n) \
+ .globl n; \
+n:
+
+#define _GLOBAL_TOC(name) _GLOBAL(name)
+
+#define DOTSYM(a) a
+
+#endif
+
+/*
+ * __kprobes (the C annotation) puts the symbol into the .kprobes.text
+ * section, which gets emitted at the end of regular text.
+ *
+ * _ASM_NOKPROBE_SYMBOL and NOKPROBE_SYMBOL just adds the symbol to
+ * a blacklist. The former is for core kprobe functions/data, the
+ * latter is for those that incdentially must be excluded from probing
+ * and allows them to be linked at more optimal location within text.
+ */
+#ifdef CONFIG_KPROBES
+#define _ASM_NOKPROBE_SYMBOL(entry) \
+ .pushsection "_kprobe_blacklist","aw"; \
+ PPC_LONG (entry) ; \
+ .popsection
+#else
+#define _ASM_NOKPROBE_SYMBOL(entry)
+#endif
+
+#define FUNC_START(name) _GLOBAL(name)
+#define FUNC_END(name)
+
+/*
+ * LOAD_REG_IMMEDIATE(rn, expr)
+ * Loads the value of the constant expression 'expr' into register 'rn'
+ * using immediate instructions only. Use this when it's important not
+ * to reference other data (i.e. on ppc64 when the TOC pointer is not
+ * valid) and when 'expr' is a constant or absolute address.
+ *
+ * LOAD_REG_ADDR(rn, name)
+ * Loads the address of label 'name' into register 'rn'. Use this when
+ * you don't particularly need immediate instructions only, but you need
+ * the whole address in one register (e.g. it's a structure address and
+ * you want to access various offsets within it). On ppc32 this is
+ * identical to LOAD_REG_IMMEDIATE.
+ *
+ * LOAD_REG_ADDR_PIC(rn, name)
+ * Loads the address of label 'name' into register 'run'. Use this when
+ * the kernel doesn't run at the linked or relocated address. Please
+ * note that this macro will clobber the lr register.
+ *
+ * LOAD_REG_ADDRBASE(rn, name)
+ * ADDROFF(name)
+ * LOAD_REG_ADDRBASE loads part of the address of label 'name' into
+ * register 'rn'. ADDROFF(name) returns the remainder of the address as
+ * a constant expression. ADDROFF(name) is a signed expression < 16 bits
+ * in size, so is suitable for use directly as an offset in load and store
+ * instructions. Use this when loading/storing a single word or less as:
+ * LOAD_REG_ADDRBASE(rX, name)
+ * ld rY,ADDROFF(name)(rX)
+ */
+
+/* Be careful, this will clobber the lr register. */
+#define LOAD_REG_ADDR_PIC(reg, name) \
+ bcl 20,31,$+4; \
+0: mflr reg; \
+ addis reg,reg,(name - 0b)@ha; \
+ addi reg,reg,(name - 0b)@l;
+
+#if defined(__powerpc64__) && defined(HAVE_AS_ATHIGH)
+#define __AS_ATHIGH high
+#else
+#define __AS_ATHIGH h
+#endif
+
+.macro __LOAD_REG_IMMEDIATE_32 r, x
+ .if (\x) >= 0x8000 || (\x) < -0x8000
+ lis \r, (\x)@__AS_ATHIGH
+ .if (\x) & 0xffff != 0
+ ori \r, \r, (\x)@l
+ .endif
+ .else
+ li \r, (\x)@l
+ .endif
+.endm
+
+.macro __LOAD_REG_IMMEDIATE r, x
+ .if (\x) >= 0x80000000 || (\x) < -0x80000000
+ __LOAD_REG_IMMEDIATE_32 \r, (\x) >> 32
+ sldi \r, \r, 32
+ .if (\x) & 0xffff0000 != 0
+ oris \r, \r, (\x)@__AS_ATHIGH
+ .endif
+ .if (\x) & 0xffff != 0
+ ori \r, \r, (\x)@l
+ .endif
+ .else
+ __LOAD_REG_IMMEDIATE_32 \r, \x
+ .endif
+.endm
+
+#ifdef __powerpc64__
+
+#define __LOAD_PACA_TOC(reg) \
+ ld reg,PACATOC(r13)
+
+#define LOAD_PACA_TOC() \
+ __LOAD_PACA_TOC(r2)
+
+#define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE reg, expr
+
+#define LOAD_REG_IMMEDIATE_SYM(reg, tmp, expr) \
+ lis tmp, (expr)@highest; \
+ lis reg, (expr)@__AS_ATHIGH; \
+ ori tmp, tmp, (expr)@higher; \
+ ori reg, reg, (expr)@l; \
+ rldimi reg, tmp, 32, 0
+
+#define LOAD_REG_ADDR(reg,name) \
+ addis reg,r2,name@toc@ha; \
+ addi reg,reg,name@toc@l
+
+#ifdef CONFIG_PPC_BOOK3E_64
+/*
+ * This is used in register-constrained interrupt handlers. Not to be used
+ * by BOOK3S. ld complains with "got/toc optimization is not supported" if r2
+ * is not used for the TOC offset, so use @got(tocreg). If the interrupt
+ * handlers saved r2 instead, LOAD_REG_ADDR could be used.
+ */
+#define LOAD_REG_ADDR_ALTTOC(reg,tocreg,name) \
+ ld reg,name@got(tocreg)
+#endif
+
+#define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name)
+#define ADDROFF(name) 0
+
+/* offsets for stack frame layout */
+#define LRSAVE 16
+
+#else /* 32-bit */
+
+#define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE_32 reg, expr
+
+#define LOAD_REG_IMMEDIATE_SYM(reg,expr) \
+ lis reg,(expr)@ha; \
+ addi reg,reg,(expr)@l;
+
+#define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE_SYM(reg, name)
+
+#define LOAD_REG_ADDRBASE(reg, name) lis reg,name@ha
+#define ADDROFF(name) name@l
+
+/* offsets for stack frame layout */
+#define LRSAVE 4
+
+#endif
+
+/* various errata or part fixups */
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_E500)
+#define MFTB(dest) \
+90: mfspr dest, SPRN_TBRL; \
+BEGIN_FTR_SECTION_NESTED(96); \
+ cmpwi dest,0; \
+ beq- 90b; \
+END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
+#else
+#define MFTB(dest) MFTBL(dest)
+#endif
+
+#ifdef CONFIG_PPC_8xx
+#define MFTBL(dest) mftb dest
+#define MFTBU(dest) mftbu dest
+#else
+#define MFTBL(dest) mfspr dest, SPRN_TBRL
+#define MFTBU(dest) mfspr dest, SPRN_TBRU
+#endif
+
+#ifndef CONFIG_SMP
+#define TLBSYNC
+#else
+#define TLBSYNC tlbsync; sync
+#endif
+
+#ifdef CONFIG_PPC64
+#define MTOCRF(FXM, RS) \
+ BEGIN_FTR_SECTION_NESTED(848); \
+ mtcrf (FXM), RS; \
+ FTR_SECTION_ELSE_NESTED(848); \
+ mtocrf (FXM), RS; \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
+#endif
+
+/*
+ * This instruction is not implemented on the PPC 603 or 601; however, on
+ * the 403GCX and 405GP tlbia IS defined and tlbie is not.
+ * All of these instructions exist in the 8xx, they have magical powers,
+ * and they must be used.
+ */
+
+#if !defined(CONFIG_4xx) && !defined(CONFIG_PPC_8xx)
+#define tlbia \
+ li r4,1024; \
+ mtctr r4; \
+ lis r4,KERNELBASE@h; \
+ .machine push; \
+ .machine "power4"; \
+0: tlbie r4; \
+ .machine pop; \
+ addi r4,r4,0x1000; \
+ bdnz 0b
+#endif
+
+
+#ifdef CONFIG_IBM440EP_ERR42
+#define PPC440EP_ERR42 isync
+#else
+#define PPC440EP_ERR42
+#endif
+
+/* The following stops all load and store data streams associated with stream
+ * ID (ie. streams created explicitly). The embedded and server mnemonics for
+ * dcbt are different so this must only be used for server.
+ */
+#define DCBT_BOOK3S_STOP_ALL_STREAM_IDS(scratch) \
+ lis scratch,0x60000000@h; \
+ dcbt 0,scratch,0b01010
+
+/*
+ * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
+ * keep the address intact to be compatible with code shared with
+ * 32-bit classic.
+ *
+ * On the other hand, I find it useful to have them behave as expected
+ * by their name (ie always do the addition) on 64-bit BookE
+ */
+#if defined(CONFIG_BOOKE) && !defined(CONFIG_PPC64)
+#define toreal(rd)
+#define fromreal(rd)
+
+/*
+ * We use addis to ensure compatibility with the "classic" ppc versions of
+ * these macros, which use rs = 0 to get the tophys offset in rd, rather than
+ * converting the address in r0, and so this version has to do that too
+ * (i.e. set register rd to 0 when rs == 0).
+ */
+#define tophys(rd,rs) \
+ addis rd,rs,0
+
+#define tovirt(rd,rs) \
+ addis rd,rs,0
+
+#elif defined(CONFIG_PPC64)
+#define toreal(rd) /* we can access c000... in real mode */
+#define fromreal(rd)
+
+#define tophys(rd,rs) \
+ clrldi rd,rs,2
+
+#define tovirt(rd,rs) \
+ rotldi rd,rs,16; \
+ ori rd,rd,((KERNELBASE>>48)&0xFFFF);\
+ rotldi rd,rd,48
+#else
+#define toreal(rd) tophys(rd,rd)
+#define fromreal(rd) tovirt(rd,rd)
+
+#define tophys(rd, rs) addis rd, rs, -PAGE_OFFSET@h
+#define tovirt(rd, rs) addis rd, rs, PAGE_OFFSET@h
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#define MTMSRD(r) mtmsrd r
+#define MTMSR_EERI(reg) mtmsrd reg,1
+#else
+#define MTMSRD(r) mtmsr r
+#define MTMSR_EERI(reg) mtmsr reg
+#endif
+
+#endif /* __KERNEL__ */
+
+/* The boring bits... */
+
+/* Condition Register Bit Fields */
+
+#define cr0 0
+#define cr1 1
+#define cr2 2
+#define cr3 3
+#define cr4 4
+#define cr5 5
+#define cr6 6
+#define cr7 7
+
+
+/*
+ * General Purpose Registers (GPRs)
+ *
+ * The lower case r0-r31 should be used in preference to the upper
+ * case R0-R31 as they provide more error checking in the assembler.
+ * Use R0-31 only when really nessesary.
+ */
+
+#define r0 %r0
+#define r1 %r1
+#define r2 %r2
+#define r3 %r3
+#define r4 %r4
+#define r5 %r5
+#define r6 %r6
+#define r7 %r7
+#define r8 %r8
+#define r9 %r9
+#define r10 %r10
+#define r11 %r11
+#define r12 %r12
+#define r13 %r13
+#define r14 %r14
+#define r15 %r15
+#define r16 %r16
+#define r17 %r17
+#define r18 %r18
+#define r19 %r19
+#define r20 %r20
+#define r21 %r21
+#define r22 %r22
+#define r23 %r23
+#define r24 %r24
+#define r25 %r25
+#define r26 %r26
+#define r27 %r27
+#define r28 %r28
+#define r29 %r29
+#define r30 %r30
+#define r31 %r31
+
+
+/* Floating Point Registers (FPRs) */
+
+#define fr0 0
+#define fr1 1
+#define fr2 2
+#define fr3 3
+#define fr4 4
+#define fr5 5
+#define fr6 6
+#define fr7 7
+#define fr8 8
+#define fr9 9
+#define fr10 10
+#define fr11 11
+#define fr12 12
+#define fr13 13
+#define fr14 14
+#define fr15 15
+#define fr16 16
+#define fr17 17
+#define fr18 18
+#define fr19 19
+#define fr20 20
+#define fr21 21
+#define fr22 22
+#define fr23 23
+#define fr24 24
+#define fr25 25
+#define fr26 26
+#define fr27 27
+#define fr28 28
+#define fr29 29
+#define fr30 30
+#define fr31 31
+
+/* AltiVec Registers (VPRs) */
+
+#define v0 0
+#define v1 1
+#define v2 2
+#define v3 3
+#define v4 4
+#define v5 5
+#define v6 6
+#define v7 7
+#define v8 8
+#define v9 9
+#define v10 10
+#define v11 11
+#define v12 12
+#define v13 13
+#define v14 14
+#define v15 15
+#define v16 16
+#define v17 17
+#define v18 18
+#define v19 19
+#define v20 20
+#define v21 21
+#define v22 22
+#define v23 23
+#define v24 24
+#define v25 25
+#define v26 26
+#define v27 27
+#define v28 28
+#define v29 29
+#define v30 30
+#define v31 31
+
+/* VSX Registers (VSRs) */
+
+#define vs0 0
+#define vs1 1
+#define vs2 2
+#define vs3 3
+#define vs4 4
+#define vs5 5
+#define vs6 6
+#define vs7 7
+#define vs8 8
+#define vs9 9
+#define vs10 10
+#define vs11 11
+#define vs12 12
+#define vs13 13
+#define vs14 14
+#define vs15 15
+#define vs16 16
+#define vs17 17
+#define vs18 18
+#define vs19 19
+#define vs20 20
+#define vs21 21
+#define vs22 22
+#define vs23 23
+#define vs24 24
+#define vs25 25
+#define vs26 26
+#define vs27 27
+#define vs28 28
+#define vs29 29
+#define vs30 30
+#define vs31 31
+#define vs32 32
+#define vs33 33
+#define vs34 34
+#define vs35 35
+#define vs36 36
+#define vs37 37
+#define vs38 38
+#define vs39 39
+#define vs40 40
+#define vs41 41
+#define vs42 42
+#define vs43 43
+#define vs44 44
+#define vs45 45
+#define vs46 46
+#define vs47 47
+#define vs48 48
+#define vs49 49
+#define vs50 50
+#define vs51 51
+#define vs52 52
+#define vs53 53
+#define vs54 54
+#define vs55 55
+#define vs56 56
+#define vs57 57
+#define vs58 58
+#define vs59 59
+#define vs60 60
+#define vs61 61
+#define vs62 62
+#define vs63 63
+
+/* SPE Registers (EVPRs) */
+
+#define evr0 0
+#define evr1 1
+#define evr2 2
+#define evr3 3
+#define evr4 4
+#define evr5 5
+#define evr6 6
+#define evr7 7
+#define evr8 8
+#define evr9 9
+#define evr10 10
+#define evr11 11
+#define evr12 12
+#define evr13 13
+#define evr14 14
+#define evr15 15
+#define evr16 16
+#define evr17 17
+#define evr18 18
+#define evr19 19
+#define evr20 20
+#define evr21 21
+#define evr22 22
+#define evr23 23
+#define evr24 24
+#define evr25 25
+#define evr26 26
+#define evr27 27
+#define evr28 28
+#define evr29 29
+#define evr30 30
+#define evr31 31
+
+#define RFSCV .long 0x4c0000a4
+
+/*
+ * Create an endian fixup trampoline
+ *
+ * This starts with a "tdi 0,0,0x48" instruction which is
+ * essentially a "trap never", and thus akin to a nop.
+ *
+ * The opcode for this instruction read with the wrong endian
+ * however results in a b . + 8
+ *
+ * So essentially we use that trick to execute the following
+ * trampoline in "reverse endian" if we are running with the
+ * MSR_LE bit set the "wrong" way for whatever endianness the
+ * kernel is built for.
+ */
+
+#ifdef CONFIG_PPC_BOOK3E_64
+#define FIXUP_ENDIAN
+#else
+/*
+ * This version may be used in HV or non-HV context.
+ * MSR[EE] must be disabled.
+ */
+#define FIXUP_ENDIAN \
+ tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \
+ b 191f; /* Skip trampoline if endian is good */ \
+ .long 0xa600607d; /* mfmsr r11 */ \
+ .long 0x01006b69; /* xori r11,r11,1 */ \
+ .long 0x00004039; /* li r10,0 */ \
+ .long 0x6401417d; /* mtmsrd r10,1 */ \
+ .long 0x05009f42; /* bcl 20,31,$+4 */ \
+ .long 0xa602487d; /* mflr r10 */ \
+ .long 0x14004a39; /* addi r10,r10,20 */ \
+ .long 0xa6035a7d; /* mtsrr0 r10 */ \
+ .long 0xa6037b7d; /* mtsrr1 r11 */ \
+ .long 0x2400004c; /* rfid */ \
+191:
+
+/*
+ * This version that may only be used with MSR[HV]=1
+ * - Does not clear MSR[RI], so more robust.
+ * - Slightly smaller and faster.
+ */
+#define FIXUP_ENDIAN_HV \
+ tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \
+ b 191f; /* Skip trampoline if endian is good */ \
+ .long 0xa600607d; /* mfmsr r11 */ \
+ .long 0x01006b69; /* xori r11,r11,1 */ \
+ .long 0x05009f42; /* bcl 20,31,$+4 */ \
+ .long 0xa602487d; /* mflr r10 */ \
+ .long 0x14004a39; /* addi r10,r10,20 */ \
+ .long 0xa64b5a7d; /* mthsrr0 r10 */ \
+ .long 0xa64b7b7d; /* mthsrr1 r11 */ \
+ .long 0x2402004c; /* hrfid */ \
+191:
+
+#endif /* !CONFIG_PPC_BOOK3E_64 */
+
+#endif /* __ASSEMBLY__ */
+
+#define SOFT_MASK_TABLE(_start, _end) \
+ stringify_in_c(.section __soft_mask_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
+ stringify_in_c(.previous)
+
+#define RESTART_TABLE(_start, _end, _target) \
+ stringify_in_c(.section __restart_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
+ stringify_in_c(.llong (_target);) \
+ stringify_in_c(.previous)
+
+#ifdef CONFIG_PPC_E500
+#define BTB_FLUSH(reg) \
+ lis reg,BUCSR_INIT@h; \
+ ori reg,reg,BUCSR_INIT@l; \
+ mtspr SPRN_BUCSR,reg; \
+ isync;
+#else
+#define BTB_FLUSH(reg)
+#endif /* CONFIG_PPC_E500 */
+
+#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h
new file mode 100644
index 000000000..e77a2ed7d
--- /dev/null
+++ b/arch/powerpc/include/asm/probes.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_PROBES_H
+#define _ASM_POWERPC_PROBES_H
+#ifdef __KERNEL__
+/*
+ * Definitions common to probes files
+ *
+ * Copyright IBM Corporation, 2012
+ */
+#include <linux/types.h>
+#include <asm/disassemble.h>
+#include <asm/ppc-opcode.h>
+
+#define BREAKPOINT_INSTRUCTION PPC_RAW_TRAP() /* trap */
+
+/* Trap definitions per ISA */
+#define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008)
+#define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088)
+#define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000)
+#define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000)
+
+#ifdef CONFIG_PPC64
+#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \
+ IS_TWI(instr) || IS_TDI(instr))
+#else
+#define is_trap(instr) (IS_TW(instr) || IS_TWI(instr))
+#endif /* CONFIG_PPC64 */
+
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+#define MSR_SINGLESTEP (MSR_DE)
+#else
+#define MSR_SINGLESTEP (MSR_SE)
+#endif
+
+static inline bool can_single_step(u32 inst)
+{
+ switch (get_op(inst)) {
+ case OP_TRAP_64: return false;
+ case OP_TRAP: return false;
+ case OP_SC: return false;
+ case OP_19:
+ switch (get_xop(inst)) {
+ case OP_19_XOP_RFID: return false;
+ case OP_19_XOP_RFMCI: return false;
+ case OP_19_XOP_RFDI: return false;
+ case OP_19_XOP_RFI: return false;
+ case OP_19_XOP_RFCI: return false;
+ case OP_19_XOP_RFSCV: return false;
+ case OP_19_XOP_HRFID: return false;
+ case OP_19_XOP_URFID: return false;
+ case OP_19_XOP_STOP: return false;
+ case OP_19_XOP_DOZE: return false;
+ case OP_19_XOP_NAP: return false;
+ case OP_19_XOP_SLEEP: return false;
+ case OP_19_XOP_RVWINKLE: return false;
+ }
+ break;
+ case OP_31:
+ switch (get_xop(inst)) {
+ case OP_31_XOP_TRAP: return false;
+ case OP_31_XOP_TRAP_64: return false;
+ case OP_31_XOP_MTMSR: return false;
+ case OP_31_XOP_MTMSRD: return false;
+ }
+ break;
+ }
+ return true;
+}
+
+/* Enable single stepping for the current task */
+static inline void enable_single_step(struct pt_regs *regs)
+{
+ regs_set_return_msr(regs, regs->msr | MSR_SINGLESTEP);
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+ /*
+ * We turn off Critical Input Exception(CE) to ensure that the single
+ * step will be for the instruction we have the probe on; if we don't,
+ * it is possible we'd get the single step reported for CE.
+ */
+ regs_set_return_msr(regs, regs->msr & ~MSR_CE);
+ mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
+#ifdef CONFIG_PPC_47x
+ isync();
+#endif
+#endif
+}
+
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PROBES_H */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
new file mode 100644
index 000000000..631802999
--- /dev/null
+++ b/arch/powerpc/include/asm/processor.h
@@ -0,0 +1,451 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_PROCESSOR_H
+#define _ASM_POWERPC_PROCESSOR_H
+
+/*
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ */
+
+#include <vdso/processor.h>
+
+#include <asm/reg.h>
+
+#ifdef CONFIG_VSX
+#define TS_FPRWIDTH 2
+
+#ifdef __BIG_ENDIAN__
+#define TS_FPROFFSET 0
+#define TS_VSRLOWOFFSET 1
+#else
+#define TS_FPROFFSET 1
+#define TS_VSRLOWOFFSET 0
+#endif
+
+#else
+#define TS_FPRWIDTH 1
+#define TS_FPROFFSET 0
+#endif
+
+#ifdef CONFIG_PPC64
+/* Default SMT priority is set to 3. Use 11- 13bits to save priority. */
+#define PPR_PRIORITY 3
+#ifdef __ASSEMBLY__
+#define DEFAULT_PPR (PPR_PRIORITY << 50)
+#else
+#define DEFAULT_PPR ((u64)PPR_PRIORITY << 50)
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PPC64 */
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <linux/thread_info.h>
+#include <asm/ptrace.h>
+#include <asm/hw_breakpoint.h>
+
+/* We do _not_ want to define new machine types at all, those must die
+ * in favor of using the device-tree
+ * -- BenH.
+ */
+
+/* PREP sub-platform types. Unused */
+#define _PREP_Motorola 0x01 /* motorola prep */
+#define _PREP_Firm 0x02 /* firmworks prep */
+#define _PREP_IBM 0x00 /* ibm prep */
+#define _PREP_Bull 0x03 /* bull prep */
+
+/* CHRP sub-platform types. These are arbitrary */
+#define _CHRP_Motorola 0x04 /* motorola chrp, the cobra */
+#define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */
+#define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */
+#define _CHRP_briq 0x07 /* TotalImpact's briQ */
+
+#if defined(__KERNEL__) && defined(CONFIG_PPC32)
+
+extern int _chrp_type;
+
+#endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_PPC64
+#include <asm/task_size_64.h>
+#else
+#include <asm/task_size_32.h>
+#endif
+
+struct task_struct;
+void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
+
+#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
+#define TS_CKFPR(i) ckfp_state.fpr[i][TS_FPROFFSET]
+
+/* FP and VSX 0-31 register set */
+struct thread_fp_state {
+ u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
+ u64 fpscr; /* Floating point status */
+};
+
+/* Complete AltiVec register set including VSCR */
+struct thread_vr_state {
+ vector128 vr[32] __attribute__((aligned(16)));
+ vector128 vscr __attribute__((aligned(16)));
+};
+
+struct debug_reg {
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+ /*
+ * The following help to manage the use of Debug Control Registers
+ * om the BookE platforms.
+ */
+ uint32_t dbcr0;
+ uint32_t dbcr1;
+#ifdef CONFIG_BOOKE
+ uint32_t dbcr2;
+#endif
+ /*
+ * The stored value of the DBSR register will be the value at the
+ * last debug interrupt. This register can only be read from the
+ * user (will never be written to) and has value while helping to
+ * describe the reason for the last debug trap. Torez
+ */
+ uint32_t dbsr;
+ /*
+ * The following will contain addresses used by debug applications
+ * to help trace and trap on particular address locations.
+ * The bits in the Debug Control Registers above help define which
+ * of the following registers will contain valid data and/or addresses.
+ */
+ unsigned long iac1;
+ unsigned long iac2;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+ unsigned long iac3;
+ unsigned long iac4;
+#endif
+ unsigned long dac1;
+ unsigned long dac2;
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+ unsigned long dvc1;
+ unsigned long dvc2;
+#endif
+#endif
+};
+
+struct thread_struct {
+ unsigned long ksp; /* Kernel stack pointer */
+
+#ifdef CONFIG_PPC64
+ unsigned long ksp_vsid;
+#endif
+ struct pt_regs *regs; /* Pointer to saved register state */
+#ifdef CONFIG_BOOKE
+ /* BookE base exception scratch space; align on cacheline */
+ unsigned long normsave[8] ____cacheline_aligned;
+#endif
+#ifdef CONFIG_PPC32
+ void *pgdir; /* root of page-table tree */
+#ifdef CONFIG_PPC_RTAS
+ unsigned long rtas_sp; /* stack pointer for when in RTAS */
+#endif
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+ unsigned long kuap; /* opened segments for user access */
+#endif
+ unsigned long srr0;
+ unsigned long srr1;
+ unsigned long dar;
+ unsigned long dsisr;
+#ifdef CONFIG_PPC_BOOK3S_32
+ unsigned long r0, r3, r4, r5, r6, r8, r9, r11;
+ unsigned long lr, ctr;
+ unsigned long sr0;
+#endif
+#endif /* CONFIG_PPC32 */
+#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
+ unsigned long pid; /* value written in PID reg. at interrupt exit */
+#endif
+ /* Debug Registers */
+ struct debug_reg debug;
+#ifdef CONFIG_PPC_FPU_REGS
+ struct thread_fp_state fp_state;
+ struct thread_fp_state *fp_save_area;
+#endif
+ int fpexc_mode; /* floating-point exception mode */
+ unsigned int align_ctl; /* alignment handling control */
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ struct perf_event *ptrace_bps[HBP_NUM_MAX];
+ /*
+ * Helps identify source of single-step exception and subsequent
+ * hw-breakpoint enablement
+ */
+ struct perf_event *last_hit_ubp[HBP_NUM_MAX];
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+ struct arch_hw_breakpoint hw_brk[HBP_NUM_MAX]; /* hardware breakpoint info */
+ unsigned long trap_nr; /* last trap # on this thread */
+ u8 load_slb; /* Ages out SLB preload cache entries */
+ u8 load_fp;
+#ifdef CONFIG_ALTIVEC
+ u8 load_vec;
+ struct thread_vr_state vr_state;
+ struct thread_vr_state *vr_save_area;
+ unsigned long vrsave;
+ int used_vr; /* set if process has used altivec */
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_VSX
+ /* VSR status */
+ int used_vsr; /* set if process has used VSX */
+#endif /* CONFIG_VSX */
+#ifdef CONFIG_SPE
+ struct_group(spe,
+ unsigned long evr[32]; /* upper 32-bits of SPE regs */
+ u64 acc; /* Accumulator */
+ );
+ unsigned long spefscr; /* SPE & eFP status */
+ unsigned long spefscr_last; /* SPEFSCR value on last prctl
+ call or trap return */
+ int used_spe; /* set if process has used spe */
+#endif /* CONFIG_SPE */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u8 load_tm;
+ u64 tm_tfhar; /* Transaction fail handler addr */
+ u64 tm_texasr; /* Transaction exception & summary */
+ u64 tm_tfiar; /* Transaction fail instr address reg */
+ struct pt_regs ckpt_regs; /* Checkpointed registers */
+
+ unsigned long tm_tar;
+ unsigned long tm_ppr;
+ unsigned long tm_dscr;
+ unsigned long tm_amr;
+
+ /*
+ * Checkpointed FP and VSX 0-31 register set.
+ *
+ * When a transaction is active/signalled/scheduled etc., *regs is the
+ * most recent set of/speculated GPRs with ckpt_regs being the older
+ * checkpointed regs to which we roll back if transaction aborts.
+ *
+ * These are analogous to how ckpt_regs and pt_regs work
+ */
+ struct thread_fp_state ckfp_state; /* Checkpointed FP state */
+ struct thread_vr_state ckvr_state; /* Checkpointed VR state */
+ unsigned long ckvrsave; /* Checkpointed VRSAVE */
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
+ void* kvm_shadow_vcpu; /* KVM internal data */
+#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
+#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
+ struct kvm_vcpu *kvm_vcpu;
+#endif
+#ifdef CONFIG_PPC64
+ unsigned long dscr;
+ unsigned long fscr;
+ /*
+ * This member element dscr_inherit indicates that the process
+ * has explicitly attempted and changed the DSCR register value
+ * for itself. Hence kernel wont use the default CPU DSCR value
+ * contained in the PACA structure anymore during process context
+ * switch. Once this variable is set, this behaviour will also be
+ * inherited to all the children of this process from that point
+ * onwards.
+ */
+ int dscr_inherit;
+ unsigned long tidr;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ unsigned long tar;
+ unsigned long ebbrr;
+ unsigned long ebbhr;
+ unsigned long bescr;
+ unsigned long siar;
+ unsigned long sdar;
+ unsigned long sier;
+ unsigned long mmcr2;
+ unsigned mmcr0;
+
+ unsigned used_ebb;
+ unsigned long mmcr3;
+ unsigned long sier2;
+ unsigned long sier3;
+
+#endif
+};
+
+#define ARCH_MIN_TASKALIGN 16
+
+#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
+#define INIT_SP_LIMIT ((unsigned long)&init_stack)
+
+#ifdef CONFIG_SPE
+#define SPEFSCR_INIT \
+ .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \
+ .spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
+#else
+#define SPEFSCR_INIT
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_32
+#define SR0_INIT .sr0 = IS_ENABLED(CONFIG_PPC_KUEP) ? SR_NX : 0,
+#else
+#define SR0_INIT
+#endif
+
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+#define INIT_THREAD { \
+ .ksp = INIT_SP, \
+ .pgdir = swapper_pg_dir, \
+ .kuap = ~0UL, /* KUAP_NONE */ \
+ .fpexc_mode = MSR_FE0 | MSR_FE1, \
+ SPEFSCR_INIT \
+ SR0_INIT \
+}
+#elif defined(CONFIG_PPC32)
+#define INIT_THREAD { \
+ .ksp = INIT_SP, \
+ .pgdir = swapper_pg_dir, \
+ .fpexc_mode = MSR_FE0 | MSR_FE1, \
+ SPEFSCR_INIT \
+ SR0_INIT \
+}
+#else
+#define INIT_THREAD { \
+ .ksp = INIT_SP, \
+ .fpexc_mode = 0, \
+}
+#endif
+
+#define task_pt_regs(tsk) ((tsk)->thread.regs)
+
+unsigned long __get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
+#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
+
+/* Get/set floating-point exception mode */
+#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
+#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
+
+extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
+extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
+
+#define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr))
+#define SET_ENDIAN(tsk, val) set_endian((tsk), (val))
+
+extern int get_endian(struct task_struct *tsk, unsigned long adr);
+extern int set_endian(struct task_struct *tsk, unsigned int val);
+
+#define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
+#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
+
+extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
+extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+
+extern void load_fp_state(struct thread_fp_state *fp);
+extern void store_fp_state(struct thread_fp_state *fp);
+extern void load_vr_state(struct thread_vr_state *vr);
+extern void store_vr_state(struct thread_vr_state *vr);
+
+static inline unsigned int __unpack_fe01(unsigned long msr_bits)
+{
+ return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
+}
+
+static inline unsigned long __pack_fe01(unsigned int fpmode)
+{
+ return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
+}
+
+#ifdef CONFIG_PPC64
+
+#define spin_begin() \
+ asm volatile(ASM_FTR_IFCLR( \
+ "or 1,1,1", /* HMT_LOW */ \
+ "nop", /* v3.1 uses pause_short in cpu_relax instead */ \
+ %0) :: "i" (CPU_FTR_ARCH_31) : "memory")
+
+#define spin_cpu_relax() \
+ asm volatile(ASM_FTR_IFCLR( \
+ "nop", /* Before v3.1 use priority nops in spin_begin/end */ \
+ PPC_WAIT(2, 0), /* aka pause_short */ \
+ %0) :: "i" (CPU_FTR_ARCH_31) : "memory")
+
+#define spin_end() \
+ asm volatile(ASM_FTR_IFCLR( \
+ "or 2,2,2", /* HMT_MEDIUM */ \
+ "nop", \
+ %0) :: "i" (CPU_FTR_ARCH_31) : "memory")
+
+#endif
+
+/* Check that a certain kernel stack pointer is valid in task_struct p */
+int validate_sp(unsigned long sp, struct task_struct *p,
+ unsigned long nbytes);
+
+/*
+ * Prefetch macros.
+ */
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+static inline void prefetch(const void *x)
+{
+ if (unlikely(!x))
+ return;
+
+ __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
+}
+
+static inline void prefetchw(const void *x)
+{
+ if (unlikely(!x))
+ return;
+
+ __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
+}
+
+#define spin_lock_prefetch(x) prefetchw(x)
+
+/* asm stubs */
+extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val);
+extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
+extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
+#ifdef CONFIG_PPC_970_NAP
+extern void power4_idle_nap(void);
+void power4_idle_nap_return(void);
+#endif
+
+extern unsigned long cpuidle_disable;
+enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
+
+extern int powersave_nap; /* set if nap mode can be used in idle loop */
+
+extern void power7_idle_type(unsigned long type);
+extern void arch300_idle_type(unsigned long stop_psscr_val,
+ unsigned long stop_psscr_mask);
+void pnv_power9_force_smt4_catch(void);
+void pnv_power9_force_smt4_release(void);
+
+extern int fix_alignment(struct pt_regs *);
+
+#ifdef CONFIG_PPC64
+/*
+ * We handle most unaligned accesses in hardware. On the other hand
+ * unaligned DMA can be very expensive on some ppc64 IO chips (it does
+ * powers of 2 writes until it reaches sufficient alignment).
+ *
+ * Based on this we disable the IP header alignment in network drivers.
+ */
+#define NET_IP_ALIGN 0
+#endif
+
+int do_mathemu(struct pt_regs *regs);
+int do_spe_mathemu(struct pt_regs *regs);
+int speround_handler(struct pt_regs *regs);
+
+/* VMX copying */
+int enter_vmx_usercopy(void);
+int exit_vmx_usercopy(void);
+int enter_vmx_ops(void);
+void *exit_vmx_ops(void *dest);
+
+#endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
new file mode 100644
index 000000000..2e82820fb
--- /dev/null
+++ b/arch/powerpc/include/asm/prom.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _POWERPC_PROM_H
+#define _POWERPC_PROM_H
+#ifdef __KERNEL__
+
+/*
+ * Definitions for talking to the Open Firmware PROM on
+ * Power Macintosh computers.
+ *
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
+ */
+#include <linux/types.h>
+#include <asm/firmware.h>
+
+struct device_node;
+struct property;
+
+#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
+#define OF_DT_END_NODE 0x2 /* End node */
+#define OF_DT_PROP 0x3 /* Property: name off, size,
+ * content */
+#define OF_DT_NOP 0x4 /* nop */
+#define OF_DT_END 0x9
+
+#define OF_DT_VERSION 0x10
+
+/*
+ * This is what gets passed to the kernel by prom_init or kexec
+ *
+ * The dt struct contains the device tree structure, full pathes and
+ * property contents. The dt strings contain a separate block with just
+ * the strings for the property names, and is fully page aligned and
+ * self contained in a page, so that it can be kept around by the kernel,
+ * each property name appears only once in this page (cheap compression)
+ *
+ * the mem_rsvmap contains a map of reserved ranges of physical memory,
+ * passing it here instead of in the device-tree itself greatly simplifies
+ * the job of everybody. It's just a list of u64 pairs (base/size) that
+ * ends when size is 0
+ */
+struct boot_param_header {
+ __be32 magic; /* magic word OF_DT_HEADER */
+ __be32 totalsize; /* total size of DT block */
+ __be32 off_dt_struct; /* offset to structure */
+ __be32 off_dt_strings; /* offset to strings */
+ __be32 off_mem_rsvmap; /* offset to memory reserve map */
+ __be32 version; /* format version */
+ __be32 last_comp_version; /* last compatible version */
+ /* version 2 fields below */
+ __be32 boot_cpuid_phys; /* Physical CPU id we're booting on */
+ /* version 3 fields below */
+ __be32 dt_strings_size; /* size of the DT strings block */
+ /* version 17 fields below */
+ __be32 dt_struct_size; /* size of the DT structure block */
+};
+
+/*
+ * OF address retreival & translation
+ */
+
+/* Parse the ibm,dma-window property of an OF node into the busno, phys and
+ * size parameters.
+ */
+void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
+ unsigned long *busno, unsigned long *phys,
+ unsigned long *size);
+
+extern void of_instantiate_rtc(void);
+
+extern int of_get_ibm_chip_id(struct device_node *np);
+
+struct of_drc_info {
+ char *drc_type;
+ char *drc_name_prefix;
+ u32 drc_index_start;
+ u32 drc_name_suffix_start;
+ u32 num_sequential_elems;
+ u32 sequential_inc;
+ u32 drc_power_domain;
+ u32 last_drc_index;
+};
+
+extern int of_read_drc_info_cell(struct property **prop,
+ const __be32 **curval, struct of_drc_info *data);
+
+
+/*
+ * There are two methods for telling firmware what our capabilities are.
+ * Newer machines have an "ibm,client-architecture-support" method on the
+ * root node. For older machines, we have to call the "process-elf-header"
+ * method in the /packages/elf-loader node, passing it a fake 32-bit
+ * ELF header containing a couple of PT_NOTE sections that contain
+ * structures that contain various information.
+ */
+
+/* New method - extensible architecture description vector. */
+
+/* Option vector bits - generic bits in byte 1 */
+#define OV_IGNORE 0x80 /* ignore this vector */
+#define OV_CESSATION_POLICY 0x40 /* halt if unsupported option present*/
+
+/* Option vector 1: processor architectures supported */
+#define OV1_PPC_2_00 0x80 /* set if we support PowerPC 2.00 */
+#define OV1_PPC_2_01 0x40 /* set if we support PowerPC 2.01 */
+#define OV1_PPC_2_02 0x20 /* set if we support PowerPC 2.02 */
+#define OV1_PPC_2_03 0x10 /* set if we support PowerPC 2.03 */
+#define OV1_PPC_2_04 0x08 /* set if we support PowerPC 2.04 */
+#define OV1_PPC_2_05 0x04 /* set if we support PowerPC 2.05 */
+#define OV1_PPC_2_06 0x02 /* set if we support PowerPC 2.06 */
+#define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */
+
+#define OV1_PPC_3_00 0x80 /* set if we support PowerPC 3.00 */
+#define OV1_PPC_3_1 0x40 /* set if we support PowerPC 3.1 */
+
+/* Option vector 2: Open Firmware options supported */
+#define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */
+
+/* Option vector 3: processor options supported */
+#define OV3_FP 0x80 /* floating point */
+#define OV3_VMX 0x40 /* VMX/Altivec */
+#define OV3_DFP 0x20 /* decimal FP */
+
+/* Option vector 4: IBM PAPR implementation */
+#define OV4_MIN_ENT_CAP 0x01 /* minimum VP entitled capacity */
+
+/* Option vector 5: PAPR/OF options supported
+ * These bits are also used in firmware_has_feature() to validate
+ * the capabilities reported for vector 5 in the device tree so we
+ * encode the vector index in the define and use the OV5_FEAT()
+ * and OV5_INDX() macros to extract the desired information.
+ */
+#define OV5_FEAT(x) ((x) & 0xff)
+#define OV5_INDX(x) ((x) >> 8)
+#define OV5_LPAR 0x0280 /* logical partitioning supported */
+#define OV5_SPLPAR 0x0240 /* shared-processor LPAR supported */
+/* ibm,dynamic-reconfiguration-memory property supported */
+#define OV5_DRCONF_MEMORY 0x0220
+#define OV5_LARGE_PAGES 0x0210 /* large pages supported */
+#define OV5_DONATE_DEDICATE_CPU 0x0202 /* donate dedicated CPU support */
+#define OV5_MSI 0x0201 /* PCIe/MSI support */
+#define OV5_CMO 0x0480 /* Cooperative Memory Overcommitment */
+#define OV5_XCMO 0x0440 /* Page Coalescing */
+#define OV5_FORM1_AFFINITY 0x0580 /* FORM1 NUMA affinity */
+#define OV5_PRRN 0x0540 /* Platform Resource Reassignment */
+#define OV5_FORM2_AFFINITY 0x0520 /* Form2 NUMA affinity */
+#define OV5_HP_EVT 0x0604 /* Hot Plug Event support */
+#define OV5_RESIZE_HPT 0x0601 /* Hash Page Table resizing */
+#define OV5_PFO_HW_RNG 0x1180 /* PFO Random Number Generator */
+#define OV5_PFO_HW_842 0x1140 /* PFO Compression Accelerator */
+#define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */
+#define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */
+#define OV5_DRMEM_V2 0x1680 /* ibm,dynamic-reconfiguration-v2 */
+#define OV5_XIVE_SUPPORT 0x17C0 /* XIVE Exploitation Support Mask */
+#define OV5_XIVE_LEGACY 0x1700 /* XIVE legacy mode Only */
+#define OV5_XIVE_EXPLOIT 0x1740 /* XIVE exploitation mode Only */
+#define OV5_XIVE_EITHER 0x1780 /* XIVE legacy or exploitation mode */
+/* MMU Base Architecture */
+#define OV5_MMU_SUPPORT 0x18C0 /* MMU Mode Support Mask */
+#define OV5_MMU_HASH 0x1800 /* Hash MMU Only */
+#define OV5_MMU_RADIX 0x1840 /* Radix MMU Only */
+#define OV5_MMU_EITHER 0x1880 /* Hash or Radix Supported */
+#define OV5_MMU_DYNAMIC 0x18C0 /* Hash or Radix Can Switch Later */
+#define OV5_NMMU 0x1820 /* Nest MMU Available */
+/* Hash Table Extensions */
+#define OV5_HASH_SEG_TBL 0x1980 /* In Memory Segment Tables Available */
+#define OV5_HASH_GTSE 0x1940 /* Guest Translation Shoot Down Avail */
+/* Radix Table Extensions */
+#define OV5_RADIX_GTSE 0x1A40 /* Guest Translation Shoot Down Avail */
+#define OV5_DRC_INFO 0x1640 /* Redef Prop Structures: drc-info */
+
+/* Option Vector 6: IBM PAPR hints */
+#define OV6_LINUX 0x02 /* Linux is our OS */
+
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_PROM_H */
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
new file mode 100644
index 000000000..8a0d8fb35
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3.h
@@ -0,0 +1,521 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PS3 platform declarations.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ */
+
+#if !defined(_ASM_POWERPC_PS3_H)
+#define _ASM_POWERPC_PS3_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <asm/cell-pmu.h>
+
+union ps3_firmware_version {
+ u64 raw;
+ struct {
+ u16 pad;
+ u16 major;
+ u16 minor;
+ u16 rev;
+ };
+};
+
+void ps3_get_firmware_version(union ps3_firmware_version *v);
+int ps3_compare_firmware_version(u16 major, u16 minor, u16 rev);
+
+/* 'Other OS' area */
+
+enum ps3_param_av_multi_out {
+ PS3_PARAM_AV_MULTI_OUT_NTSC = 0,
+ PS3_PARAM_AV_MULTI_OUT_PAL_RGB = 1,
+ PS3_PARAM_AV_MULTI_OUT_PAL_YCBCR = 2,
+ PS3_PARAM_AV_MULTI_OUT_SECAM = 3,
+};
+
+enum ps3_param_av_multi_out ps3_os_area_get_av_multi_out(void);
+
+extern u64 ps3_os_area_get_rtc_diff(void);
+extern void ps3_os_area_set_rtc_diff(u64 rtc_diff);
+
+struct ps3_os_area_flash_ops {
+ ssize_t (*read)(void *buf, size_t count, loff_t pos);
+ ssize_t (*write)(const void *buf, size_t count, loff_t pos);
+};
+
+extern void ps3_os_area_flash_register(const struct ps3_os_area_flash_ops *ops);
+
+/* dma routines */
+
+enum ps3_dma_page_size {
+ PS3_DMA_4K = 12U,
+ PS3_DMA_64K = 16U,
+ PS3_DMA_1M = 20U,
+ PS3_DMA_16M = 24U,
+};
+
+enum ps3_dma_region_type {
+ PS3_DMA_OTHER = 0,
+ PS3_DMA_INTERNAL = 2,
+};
+
+struct ps3_dma_region_ops;
+
+/**
+ * struct ps3_dma_region - A per device dma state variables structure
+ * @did: The HV device id.
+ * @page_size: The ioc pagesize.
+ * @region_type: The HV region type.
+ * @bus_addr: The 'translated' bus address of the region.
+ * @len: The length in bytes of the region.
+ * @offset: The offset from the start of memory of the region.
+ * @dma_mask: Device dma_mask.
+ * @ioid: The IOID of the device who owns this region
+ * @chunk_list: Opaque variable used by the ioc page manager.
+ * @region_ops: struct ps3_dma_region_ops - dma region operations
+ */
+
+struct ps3_dma_region {
+ struct ps3_system_bus_device *dev;
+ /* device variables */
+ const struct ps3_dma_region_ops *region_ops;
+ unsigned char ioid;
+ enum ps3_dma_page_size page_size;
+ enum ps3_dma_region_type region_type;
+ unsigned long len;
+ unsigned long offset;
+ u64 dma_mask;
+
+ /* driver variables (set by ps3_dma_region_create) */
+ unsigned long bus_addr;
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ } chunk_list;
+};
+
+struct ps3_dma_region_ops {
+ int (*create)(struct ps3_dma_region *);
+ int (*free)(struct ps3_dma_region *);
+ int (*map)(struct ps3_dma_region *,
+ unsigned long virt_addr,
+ unsigned long len,
+ dma_addr_t *bus_addr,
+ u64 iopte_pp);
+ int (*unmap)(struct ps3_dma_region *,
+ dma_addr_t bus_addr,
+ unsigned long len);
+};
+/**
+ * struct ps3_dma_region_init - Helper to initialize structure variables
+ *
+ * Helper to properly initialize variables prior to calling
+ * ps3_system_bus_device_register.
+ */
+
+struct ps3_system_bus_device;
+
+int ps3_dma_region_init(struct ps3_system_bus_device *dev,
+ struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
+ enum ps3_dma_region_type region_type, void *addr, unsigned long len);
+int ps3_dma_region_create(struct ps3_dma_region *r);
+int ps3_dma_region_free(struct ps3_dma_region *r);
+int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
+ unsigned long len, dma_addr_t *bus_addr,
+ u64 iopte_pp);
+int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
+ unsigned long len);
+
+/* mmio routines */
+
+enum ps3_mmio_page_size {
+ PS3_MMIO_4K = 12U,
+ PS3_MMIO_64K = 16U
+};
+
+struct ps3_mmio_region_ops;
+/**
+ * struct ps3_mmio_region - a per device mmio state variables structure
+ *
+ * Current systems can be supported with a single region per device.
+ */
+
+struct ps3_mmio_region {
+ struct ps3_system_bus_device *dev;
+ const struct ps3_mmio_region_ops *mmio_ops;
+ unsigned long bus_addr;
+ unsigned long len;
+ enum ps3_mmio_page_size page_size;
+ unsigned long lpar_addr;
+};
+
+struct ps3_mmio_region_ops {
+ int (*create)(struct ps3_mmio_region *);
+ int (*free)(struct ps3_mmio_region *);
+};
+/**
+ * struct ps3_mmio_region_init - Helper to initialize structure variables
+ *
+ * Helper to properly initialize variables prior to calling
+ * ps3_system_bus_device_register.
+ */
+
+int ps3_mmio_region_init(struct ps3_system_bus_device *dev,
+ struct ps3_mmio_region *r, unsigned long bus_addr, unsigned long len,
+ enum ps3_mmio_page_size page_size);
+int ps3_mmio_region_create(struct ps3_mmio_region *r);
+int ps3_free_mmio_region(struct ps3_mmio_region *r);
+unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr);
+
+/* inrerrupt routines */
+
+enum ps3_cpu_binding {
+ PS3_BINDING_CPU_ANY = -1,
+ PS3_BINDING_CPU_0 = 0,
+ PS3_BINDING_CPU_1 = 1,
+};
+
+int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
+ unsigned int *virq);
+int ps3_irq_plug_destroy(unsigned int virq);
+int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq);
+int ps3_event_receive_port_destroy(unsigned int virq);
+int ps3_send_event_locally(unsigned int virq);
+
+int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
+ unsigned int *virq);
+int ps3_io_irq_destroy(unsigned int virq);
+int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
+ unsigned int *virq);
+int ps3_vuart_irq_destroy(unsigned int virq);
+int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
+ unsigned int class, unsigned int *virq);
+int ps3_spe_irq_destroy(unsigned int virq);
+
+int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev,
+ enum ps3_cpu_binding cpu, unsigned int *virq);
+int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev,
+ unsigned int virq);
+
+/* lv1 result codes */
+
+enum lv1_result {
+ LV1_SUCCESS = 0,
+ /* not used -1 */
+ LV1_RESOURCE_SHORTAGE = -2,
+ LV1_NO_PRIVILEGE = -3,
+ LV1_DENIED_BY_POLICY = -4,
+ LV1_ACCESS_VIOLATION = -5,
+ LV1_NO_ENTRY = -6,
+ LV1_DUPLICATE_ENTRY = -7,
+ LV1_TYPE_MISMATCH = -8,
+ LV1_BUSY = -9,
+ LV1_EMPTY = -10,
+ LV1_WRONG_STATE = -11,
+ /* not used -12 */
+ LV1_NO_MATCH = -13,
+ LV1_ALREADY_CONNECTED = -14,
+ LV1_UNSUPPORTED_PARAMETER_VALUE = -15,
+ LV1_CONDITION_NOT_SATISFIED = -16,
+ LV1_ILLEGAL_PARAMETER_VALUE = -17,
+ LV1_BAD_OPTION = -18,
+ LV1_IMPLEMENTATION_LIMITATION = -19,
+ LV1_NOT_IMPLEMENTED = -20,
+ LV1_INVALID_CLASS_ID = -21,
+ LV1_CONSTRAINT_NOT_SATISFIED = -22,
+ LV1_ALIGNMENT_ERROR = -23,
+ LV1_HARDWARE_ERROR = -24,
+ LV1_INVALID_DATA_FORMAT = -25,
+ LV1_INVALID_OPERATION = -26,
+ LV1_INTERNAL_ERROR = -32768,
+};
+
+static inline const char* ps3_result(int result)
+{
+#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT) || defined(CONFIG_PS3_VERBOSE_RESULT)
+ switch (result) {
+ case LV1_SUCCESS:
+ return "LV1_SUCCESS (0)";
+ case -1:
+ return "** unknown result ** (-1)";
+ case LV1_RESOURCE_SHORTAGE:
+ return "LV1_RESOURCE_SHORTAGE (-2)";
+ case LV1_NO_PRIVILEGE:
+ return "LV1_NO_PRIVILEGE (-3)";
+ case LV1_DENIED_BY_POLICY:
+ return "LV1_DENIED_BY_POLICY (-4)";
+ case LV1_ACCESS_VIOLATION:
+ return "LV1_ACCESS_VIOLATION (-5)";
+ case LV1_NO_ENTRY:
+ return "LV1_NO_ENTRY (-6)";
+ case LV1_DUPLICATE_ENTRY:
+ return "LV1_DUPLICATE_ENTRY (-7)";
+ case LV1_TYPE_MISMATCH:
+ return "LV1_TYPE_MISMATCH (-8)";
+ case LV1_BUSY:
+ return "LV1_BUSY (-9)";
+ case LV1_EMPTY:
+ return "LV1_EMPTY (-10)";
+ case LV1_WRONG_STATE:
+ return "LV1_WRONG_STATE (-11)";
+ case -12:
+ return "** unknown result ** (-12)";
+ case LV1_NO_MATCH:
+ return "LV1_NO_MATCH (-13)";
+ case LV1_ALREADY_CONNECTED:
+ return "LV1_ALREADY_CONNECTED (-14)";
+ case LV1_UNSUPPORTED_PARAMETER_VALUE:
+ return "LV1_UNSUPPORTED_PARAMETER_VALUE (-15)";
+ case LV1_CONDITION_NOT_SATISFIED:
+ return "LV1_CONDITION_NOT_SATISFIED (-16)";
+ case LV1_ILLEGAL_PARAMETER_VALUE:
+ return "LV1_ILLEGAL_PARAMETER_VALUE (-17)";
+ case LV1_BAD_OPTION:
+ return "LV1_BAD_OPTION (-18)";
+ case LV1_IMPLEMENTATION_LIMITATION:
+ return "LV1_IMPLEMENTATION_LIMITATION (-19)";
+ case LV1_NOT_IMPLEMENTED:
+ return "LV1_NOT_IMPLEMENTED (-20)";
+ case LV1_INVALID_CLASS_ID:
+ return "LV1_INVALID_CLASS_ID (-21)";
+ case LV1_CONSTRAINT_NOT_SATISFIED:
+ return "LV1_CONSTRAINT_NOT_SATISFIED (-22)";
+ case LV1_ALIGNMENT_ERROR:
+ return "LV1_ALIGNMENT_ERROR (-23)";
+ case LV1_HARDWARE_ERROR:
+ return "LV1_HARDWARE_ERROR (-24)";
+ case LV1_INVALID_DATA_FORMAT:
+ return "LV1_INVALID_DATA_FORMAT (-25)";
+ case LV1_INVALID_OPERATION:
+ return "LV1_INVALID_OPERATION (-26)";
+ case LV1_INTERNAL_ERROR:
+ return "LV1_INTERNAL_ERROR (-32768)";
+ default:
+ BUG();
+ return "** unknown result **";
+ };
+#else
+ return "";
+#endif
+}
+
+/* system bus routines */
+
+enum ps3_match_id {
+ PS3_MATCH_ID_EHCI = 1,
+ PS3_MATCH_ID_OHCI = 2,
+ PS3_MATCH_ID_GELIC = 3,
+ PS3_MATCH_ID_AV_SETTINGS = 4,
+ PS3_MATCH_ID_SYSTEM_MANAGER = 5,
+ PS3_MATCH_ID_STOR_DISK = 6,
+ PS3_MATCH_ID_STOR_ROM = 7,
+ PS3_MATCH_ID_STOR_FLASH = 8,
+ PS3_MATCH_ID_SOUND = 9,
+ PS3_MATCH_ID_GPU = 10,
+ PS3_MATCH_ID_LPM = 11,
+};
+
+enum ps3_match_sub_id {
+ PS3_MATCH_SUB_ID_GPU_FB = 1,
+ PS3_MATCH_SUB_ID_GPU_RAMDISK = 2,
+};
+
+#define PS3_MODULE_ALIAS_EHCI "ps3:1:0"
+#define PS3_MODULE_ALIAS_OHCI "ps3:2:0"
+#define PS3_MODULE_ALIAS_GELIC "ps3:3:0"
+#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4:0"
+#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5:0"
+#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6:0"
+#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7:0"
+#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8:0"
+#define PS3_MODULE_ALIAS_SOUND "ps3:9:0"
+#define PS3_MODULE_ALIAS_GPU_FB "ps3:10:1"
+#define PS3_MODULE_ALIAS_GPU_RAMDISK "ps3:10:2"
+#define PS3_MODULE_ALIAS_LPM "ps3:11:0"
+
+enum ps3_system_bus_device_type {
+ PS3_DEVICE_TYPE_IOC0 = 1,
+ PS3_DEVICE_TYPE_SB,
+ PS3_DEVICE_TYPE_VUART,
+ PS3_DEVICE_TYPE_LPM,
+};
+
+/**
+ * struct ps3_system_bus_device - a device on the system bus
+ */
+
+struct ps3_system_bus_device {
+ enum ps3_match_id match_id;
+ enum ps3_match_sub_id match_sub_id;
+ enum ps3_system_bus_device_type dev_type;
+
+ u64 bus_id; /* SB */
+ u64 dev_id; /* SB */
+ unsigned int interrupt_id; /* SB */
+ struct ps3_dma_region *d_region; /* SB, IOC0 */
+ struct ps3_mmio_region *m_region; /* SB, IOC0*/
+ unsigned int port_number; /* VUART */
+ struct { /* LPM */
+ u64 node_id;
+ u64 pu_id;
+ u64 rights;
+ } lpm;
+
+/* struct iommu_table *iommu_table; -- waiting for BenH's cleanups */
+ struct device core;
+ void *driver_priv; /* private driver variables */
+};
+
+int ps3_open_hv_device(struct ps3_system_bus_device *dev);
+int ps3_close_hv_device(struct ps3_system_bus_device *dev);
+
+/**
+ * struct ps3_system_bus_driver - a driver for a device on the system bus
+ */
+
+struct ps3_system_bus_driver {
+ enum ps3_match_id match_id;
+ enum ps3_match_sub_id match_sub_id;
+ struct device_driver core;
+ int (*probe)(struct ps3_system_bus_device *);
+ void (*remove)(struct ps3_system_bus_device *);
+ void (*shutdown)(struct ps3_system_bus_device *);
+/* int (*suspend)(struct ps3_system_bus_device *, pm_message_t); */
+/* int (*resume)(struct ps3_system_bus_device *); */
+};
+
+int ps3_system_bus_device_register(struct ps3_system_bus_device *dev);
+int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv);
+void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv);
+
+static inline struct ps3_system_bus_driver *ps3_drv_to_system_bus_drv(
+ struct device_driver *_drv)
+{
+ return container_of(_drv, struct ps3_system_bus_driver, core);
+}
+static inline struct ps3_system_bus_device *ps3_dev_to_system_bus_dev(
+ struct device *_dev)
+{
+ return container_of(_dev, struct ps3_system_bus_device, core);
+}
+static inline struct ps3_system_bus_driver *
+ ps3_system_bus_dev_to_system_bus_drv(struct ps3_system_bus_device *_dev)
+{
+ BUG_ON(!_dev);
+ BUG_ON(!_dev->core.driver);
+ return ps3_drv_to_system_bus_drv(_dev->core.driver);
+}
+
+/**
+ * ps3_system_bus_set_drvdata -
+ * @dev: device structure
+ * @data: Data to set
+ */
+
+static inline void ps3_system_bus_set_drvdata(
+ struct ps3_system_bus_device *dev, void *data)
+{
+ dev_set_drvdata(&dev->core, data);
+}
+static inline void *ps3_system_bus_get_drvdata(
+ struct ps3_system_bus_device *dev)
+{
+ return dev_get_drvdata(&dev->core);
+}
+
+/* These two need global scope for get_arch_dma_ops(). */
+
+extern struct bus_type ps3_system_bus_type;
+
+/* system manager */
+
+struct ps3_sys_manager_ops {
+ struct ps3_system_bus_device *dev;
+ void (*power_off)(struct ps3_system_bus_device *dev);
+ void (*restart)(struct ps3_system_bus_device *dev);
+};
+
+void ps3_sys_manager_register_ops(const struct ps3_sys_manager_ops *ops);
+void __noreturn ps3_sys_manager_power_off(void);
+void __noreturn ps3_sys_manager_restart(void);
+void __noreturn ps3_sys_manager_halt(void);
+int ps3_sys_manager_get_wol(void);
+void ps3_sys_manager_set_wol(int state);
+
+struct ps3_prealloc {
+ const char *name;
+ void *address;
+ unsigned long size;
+ unsigned long align;
+};
+
+extern struct ps3_prealloc ps3fb_videomemory;
+extern struct ps3_prealloc ps3flash_bounce_buffer;
+
+/* logical performance monitor */
+
+/**
+ * enum ps3_lpm_rights - Rigths granted by the system policy module.
+ *
+ * @PS3_LPM_RIGHTS_USE_LPM: The right to use the lpm.
+ * @PS3_LPM_RIGHTS_USE_TB: The right to use the internal trace buffer.
+ */
+
+enum ps3_lpm_rights {
+ PS3_LPM_RIGHTS_USE_LPM = 0x001,
+ PS3_LPM_RIGHTS_USE_TB = 0x100,
+};
+
+/**
+ * enum ps3_lpm_tb_type - Type of trace buffer lv1 should use.
+ *
+ * @PS3_LPM_TB_TYPE_NONE: Do not use a trace buffer.
+ * @PS3_LPM_RIGHTS_USE_TB: Use the lv1 internal trace buffer. Must have
+ * rights @PS3_LPM_RIGHTS_USE_TB.
+ */
+
+enum ps3_lpm_tb_type {
+ PS3_LPM_TB_TYPE_NONE = 0,
+ PS3_LPM_TB_TYPE_INTERNAL = 1,
+};
+
+int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
+ u64 tb_cache_size);
+int ps3_lpm_close(void);
+int ps3_lpm_copy_tb(unsigned long offset, void *buf, unsigned long count,
+ unsigned long *bytes_copied);
+int ps3_lpm_copy_tb_to_user(unsigned long offset, void __user *buf,
+ unsigned long count, unsigned long *bytes_copied);
+void ps3_set_bookmark(u64 bookmark);
+void ps3_set_pm_bookmark(u64 tag, u64 incident, u64 th_id);
+int ps3_set_signal(u64 rtas_signal_group, u8 signal_bit, u16 sub_unit,
+ u8 bus_word);
+
+u32 ps3_read_phys_ctr(u32 cpu, u32 phys_ctr);
+void ps3_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
+u32 ps3_read_ctr(u32 cpu, u32 ctr);
+void ps3_write_ctr(u32 cpu, u32 ctr, u32 val);
+
+u32 ps3_read_pm07_control(u32 cpu, u32 ctr);
+void ps3_write_pm07_control(u32 cpu, u32 ctr, u32 val);
+u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg);
+void ps3_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
+
+u32 ps3_get_ctr_size(u32 cpu, u32 phys_ctr);
+void ps3_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
+
+void ps3_enable_pm(u32 cpu);
+void ps3_disable_pm(u32 cpu);
+void ps3_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
+void ps3_disable_pm_interrupts(u32 cpu);
+
+u32 ps3_get_and_clear_pm_interrupts(u32 cpu);
+void ps3_sync_irq(int node);
+u32 ps3_get_hw_thread_id(int cpu);
+u64 ps3_get_spe_id(void *arg);
+
+void ps3_early_mm_init(void);
+
+#endif
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h
new file mode 100644
index 000000000..c8b0f2ffc
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3av.h
@@ -0,0 +1,729 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PS3 AV backend support.
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ */
+
+#ifndef _ASM_POWERPC_PS3AV_H_
+#define _ASM_POWERPC_PS3AV_H_
+
+/** command for ioctl() **/
+#define PS3AV_VERSION 0x205 /* version of ps3av command */
+
+#define PS3AV_CID_AV_INIT 0x00000001
+#define PS3AV_CID_AV_FIN 0x00000002
+#define PS3AV_CID_AV_GET_HW_CONF 0x00000003
+#define PS3AV_CID_AV_GET_MONITOR_INFO 0x00000004
+#define PS3AV_CID_AV_ENABLE_EVENT 0x00000006
+#define PS3AV_CID_AV_DISABLE_EVENT 0x00000007
+#define PS3AV_CID_AV_TV_MUTE 0x0000000a
+
+#define PS3AV_CID_AV_VIDEO_CS 0x00010001
+#define PS3AV_CID_AV_VIDEO_MUTE 0x00010002
+#define PS3AV_CID_AV_VIDEO_DISABLE_SIG 0x00010003
+#define PS3AV_CID_AV_AUDIO_PARAM 0x00020001
+#define PS3AV_CID_AV_AUDIO_MUTE 0x00020002
+#define PS3AV_CID_AV_HDMI_MODE 0x00040001
+
+#define PS3AV_CID_VIDEO_INIT 0x01000001
+#define PS3AV_CID_VIDEO_MODE 0x01000002
+#define PS3AV_CID_VIDEO_FORMAT 0x01000004
+#define PS3AV_CID_VIDEO_PITCH 0x01000005
+
+#define PS3AV_CID_AUDIO_INIT 0x02000001
+#define PS3AV_CID_AUDIO_MODE 0x02000002
+#define PS3AV_CID_AUDIO_MUTE 0x02000003
+#define PS3AV_CID_AUDIO_ACTIVE 0x02000004
+#define PS3AV_CID_AUDIO_INACTIVE 0x02000005
+#define PS3AV_CID_AUDIO_SPDIF_BIT 0x02000006
+#define PS3AV_CID_AUDIO_CTRL 0x02000007
+
+#define PS3AV_CID_EVENT_UNPLUGGED 0x10000001
+#define PS3AV_CID_EVENT_PLUGGED 0x10000002
+#define PS3AV_CID_EVENT_HDCP_DONE 0x10000003
+#define PS3AV_CID_EVENT_HDCP_FAIL 0x10000004
+#define PS3AV_CID_EVENT_HDCP_AUTH 0x10000005
+#define PS3AV_CID_EVENT_HDCP_ERROR 0x10000006
+
+#define PS3AV_CID_AVB_PARAM 0x04000001
+
+/* max backend ports */
+#define PS3AV_HDMI_MAX 2 /* HDMI_0 HDMI_1 */
+#define PS3AV_AVMULTI_MAX 1 /* AVMULTI_0 */
+#define PS3AV_AV_PORT_MAX (PS3AV_HDMI_MAX + PS3AV_AVMULTI_MAX)
+#define PS3AV_OPT_PORT_MAX 1 /* SPDIF0 */
+#define PS3AV_HEAD_MAX 2 /* HEAD_A HEAD_B */
+
+/* num of pkt for PS3AV_CID_AVB_PARAM */
+#define PS3AV_AVB_NUM_VIDEO PS3AV_HEAD_MAX
+#define PS3AV_AVB_NUM_AUDIO 0 /* not supported */
+#define PS3AV_AVB_NUM_AV_VIDEO PS3AV_AV_PORT_MAX
+#define PS3AV_AVB_NUM_AV_AUDIO PS3AV_HDMI_MAX
+
+#define PS3AV_MUTE_PORT_MAX 1 /* num of ports in mute pkt */
+
+/* event_bit */
+#define PS3AV_CMD_EVENT_BIT_UNPLUGGED (1 << 0)
+#define PS3AV_CMD_EVENT_BIT_PLUGGED (1 << 1)
+#define PS3AV_CMD_EVENT_BIT_HDCP_DONE (1 << 2)
+#define PS3AV_CMD_EVENT_BIT_HDCP_FAIL (1 << 3)
+#define PS3AV_CMD_EVENT_BIT_HDCP_REAUTH (1 << 4)
+#define PS3AV_CMD_EVENT_BIT_HDCP_TOPOLOGY (1 << 5)
+
+/* common params */
+/* mute */
+#define PS3AV_CMD_MUTE_OFF 0x0000
+#define PS3AV_CMD_MUTE_ON 0x0001
+/* avport */
+#define PS3AV_CMD_AVPORT_HDMI_0 0x0000
+#define PS3AV_CMD_AVPORT_HDMI_1 0x0001
+#define PS3AV_CMD_AVPORT_AVMULTI_0 0x0010
+#define PS3AV_CMD_AVPORT_SPDIF_0 0x0020
+#define PS3AV_CMD_AVPORT_SPDIF_1 0x0021
+
+/* for av backend */
+/* av_mclk */
+#define PS3AV_CMD_AV_MCLK_128 0x0000
+#define PS3AV_CMD_AV_MCLK_256 0x0001
+#define PS3AV_CMD_AV_MCLK_512 0x0003
+/* av_inputlen */
+#define PS3AV_CMD_AV_INPUTLEN_16 0x02
+#define PS3AV_CMD_AV_INPUTLEN_20 0x0a
+#define PS3AV_CMD_AV_INPUTLEN_24 0x0b
+/* av_layout */
+#define PS3AV_CMD_AV_LAYOUT_32 (1 << 0)
+#define PS3AV_CMD_AV_LAYOUT_44 (1 << 1)
+#define PS3AV_CMD_AV_LAYOUT_48 (1 << 2)
+#define PS3AV_CMD_AV_LAYOUT_88 (1 << 3)
+#define PS3AV_CMD_AV_LAYOUT_96 (1 << 4)
+#define PS3AV_CMD_AV_LAYOUT_176 (1 << 5)
+#define PS3AV_CMD_AV_LAYOUT_192 (1 << 6)
+/* hdmi_mode */
+#define PS3AV_CMD_AV_HDMI_MODE_NORMAL 0xff
+#define PS3AV_CMD_AV_HDMI_HDCP_OFF 0x01
+#define PS3AV_CMD_AV_HDMI_EDID_PASS 0x80
+#define PS3AV_CMD_AV_HDMI_DVI 0x40
+
+/* for video module */
+/* video_head */
+#define PS3AV_CMD_VIDEO_HEAD_A 0x0000
+#define PS3AV_CMD_VIDEO_HEAD_B 0x0001
+/* video_cs_out video_cs_in */
+#define PS3AV_CMD_VIDEO_CS_NONE 0x0000
+#define PS3AV_CMD_VIDEO_CS_RGB_8 0x0001
+#define PS3AV_CMD_VIDEO_CS_YUV444_8 0x0002
+#define PS3AV_CMD_VIDEO_CS_YUV422_8 0x0003
+#define PS3AV_CMD_VIDEO_CS_XVYCC_8 0x0004
+#define PS3AV_CMD_VIDEO_CS_RGB_10 0x0005
+#define PS3AV_CMD_VIDEO_CS_YUV444_10 0x0006
+#define PS3AV_CMD_VIDEO_CS_YUV422_10 0x0007
+#define PS3AV_CMD_VIDEO_CS_XVYCC_10 0x0008
+#define PS3AV_CMD_VIDEO_CS_RGB_12 0x0009
+#define PS3AV_CMD_VIDEO_CS_YUV444_12 0x000a
+#define PS3AV_CMD_VIDEO_CS_YUV422_12 0x000b
+#define PS3AV_CMD_VIDEO_CS_XVYCC_12 0x000c
+/* video_vid */
+#define PS3AV_CMD_VIDEO_VID_NONE 0x0000
+#define PS3AV_CMD_VIDEO_VID_480I 0x0001
+#define PS3AV_CMD_VIDEO_VID_576I 0x0003
+#define PS3AV_CMD_VIDEO_VID_480P 0x0005
+#define PS3AV_CMD_VIDEO_VID_576P 0x0006
+#define PS3AV_CMD_VIDEO_VID_1080I_60HZ 0x0007
+#define PS3AV_CMD_VIDEO_VID_1080I_50HZ 0x0008
+#define PS3AV_CMD_VIDEO_VID_720P_60HZ 0x0009
+#define PS3AV_CMD_VIDEO_VID_720P_50HZ 0x000a
+#define PS3AV_CMD_VIDEO_VID_1080P_60HZ 0x000b
+#define PS3AV_CMD_VIDEO_VID_1080P_50HZ 0x000c
+#define PS3AV_CMD_VIDEO_VID_WXGA 0x000d
+#define PS3AV_CMD_VIDEO_VID_SXGA 0x000e
+#define PS3AV_CMD_VIDEO_VID_WUXGA 0x000f
+#define PS3AV_CMD_VIDEO_VID_480I_A 0x0010
+/* video_format */
+#define PS3AV_CMD_VIDEO_FORMAT_BLACK 0x0000
+#define PS3AV_CMD_VIDEO_FORMAT_ARGB_8BIT 0x0007
+/* video_order */
+#define PS3AV_CMD_VIDEO_ORDER_RGB 0x0000
+#define PS3AV_CMD_VIDEO_ORDER_BGR 0x0001
+/* video_fmt */
+#define PS3AV_CMD_VIDEO_FMT_X8R8G8B8 0x0000
+/* video_out_format */
+#define PS3AV_CMD_VIDEO_OUT_FORMAT_RGB_12BIT 0x0000
+/* video_cl_cnv */
+#define PS3AV_CMD_VIDEO_CL_CNV_ENABLE_LUT 0x0000
+#define PS3AV_CMD_VIDEO_CL_CNV_DISABLE_LUT 0x0010
+/* video_sync */
+#define PS3AV_CMD_VIDEO_SYNC_VSYNC 0x0001
+#define PS3AV_CMD_VIDEO_SYNC_CSYNC 0x0004
+#define PS3AV_CMD_VIDEO_SYNC_HSYNC 0x0010
+
+/* for audio module */
+/* num_of_ch */
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_2 0x0000
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_3 0x0001
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_4 0x0002
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_5 0x0003
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_6 0x0004
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_7 0x0005
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_8 0x0006
+/* audio_fs */
+#define PS3AV_CMD_AUDIO_FS_32K 0x0001
+#define PS3AV_CMD_AUDIO_FS_44K 0x0002
+#define PS3AV_CMD_AUDIO_FS_48K 0x0003
+#define PS3AV_CMD_AUDIO_FS_88K 0x0004
+#define PS3AV_CMD_AUDIO_FS_96K 0x0005
+#define PS3AV_CMD_AUDIO_FS_176K 0x0006
+#define PS3AV_CMD_AUDIO_FS_192K 0x0007
+/* audio_word_bits */
+#define PS3AV_CMD_AUDIO_WORD_BITS_16 0x0001
+#define PS3AV_CMD_AUDIO_WORD_BITS_20 0x0002
+#define PS3AV_CMD_AUDIO_WORD_BITS_24 0x0003
+/* audio_format */
+#define PS3AV_CMD_AUDIO_FORMAT_PCM 0x0001
+#define PS3AV_CMD_AUDIO_FORMAT_BITSTREAM 0x00ff
+/* audio_source */
+#define PS3AV_CMD_AUDIO_SOURCE_SERIAL 0x0000
+#define PS3AV_CMD_AUDIO_SOURCE_SPDIF 0x0001
+/* audio_swap */
+#define PS3AV_CMD_AUDIO_SWAP_0 0x0000
+#define PS3AV_CMD_AUDIO_SWAP_1 0x0000
+/* audio_map */
+#define PS3AV_CMD_AUDIO_MAP_OUTPUT_0 0x0000
+#define PS3AV_CMD_AUDIO_MAP_OUTPUT_1 0x0001
+#define PS3AV_CMD_AUDIO_MAP_OUTPUT_2 0x0002
+#define PS3AV_CMD_AUDIO_MAP_OUTPUT_3 0x0003
+/* audio_layout */
+#define PS3AV_CMD_AUDIO_LAYOUT_2CH 0x0000
+#define PS3AV_CMD_AUDIO_LAYOUT_6CH 0x000b /* LREClr */
+#define PS3AV_CMD_AUDIO_LAYOUT_8CH 0x001f /* LREClrXY */
+/* audio_downmix */
+#define PS3AV_CMD_AUDIO_DOWNMIX_PERMITTED 0x0000
+#define PS3AV_CMD_AUDIO_DOWNMIX_PROHIBITED 0x0001
+
+/* audio_port */
+#define PS3AV_CMD_AUDIO_PORT_HDMI_0 ( 1 << 0 )
+#define PS3AV_CMD_AUDIO_PORT_HDMI_1 ( 1 << 1 )
+#define PS3AV_CMD_AUDIO_PORT_AVMULTI_0 ( 1 << 10 )
+#define PS3AV_CMD_AUDIO_PORT_SPDIF_0 ( 1 << 20 )
+#define PS3AV_CMD_AUDIO_PORT_SPDIF_1 ( 1 << 21 )
+
+/* audio_ctrl_id */
+#define PS3AV_CMD_AUDIO_CTRL_ID_DAC_RESET 0x0000
+#define PS3AV_CMD_AUDIO_CTRL_ID_DAC_DE_EMPHASIS 0x0001
+#define PS3AV_CMD_AUDIO_CTRL_ID_AVCLK 0x0002
+/* audio_ctrl_data[0] reset */
+#define PS3AV_CMD_AUDIO_CTRL_RESET_NEGATE 0x0000
+#define PS3AV_CMD_AUDIO_CTRL_RESET_ASSERT 0x0001
+/* audio_ctrl_data[0] de-emphasis */
+#define PS3AV_CMD_AUDIO_CTRL_DE_EMPHASIS_OFF 0x0000
+#define PS3AV_CMD_AUDIO_CTRL_DE_EMPHASIS_ON 0x0001
+/* audio_ctrl_data[0] avclk */
+#define PS3AV_CMD_AUDIO_CTRL_AVCLK_22 0x0000
+#define PS3AV_CMD_AUDIO_CTRL_AVCLK_18 0x0001
+
+/* av_vid */
+/* do not use these params directly, use vid_video2av */
+#define PS3AV_CMD_AV_VID_480I 0x0000
+#define PS3AV_CMD_AV_VID_480P 0x0001
+#define PS3AV_CMD_AV_VID_720P_60HZ 0x0002
+#define PS3AV_CMD_AV_VID_1080I_60HZ 0x0003
+#define PS3AV_CMD_AV_VID_1080P_60HZ 0x0004
+#define PS3AV_CMD_AV_VID_576I 0x0005
+#define PS3AV_CMD_AV_VID_576P 0x0006
+#define PS3AV_CMD_AV_VID_720P_50HZ 0x0007
+#define PS3AV_CMD_AV_VID_1080I_50HZ 0x0008
+#define PS3AV_CMD_AV_VID_1080P_50HZ 0x0009
+#define PS3AV_CMD_AV_VID_WXGA 0x000a
+#define PS3AV_CMD_AV_VID_SXGA 0x000b
+#define PS3AV_CMD_AV_VID_WUXGA 0x000c
+/* av_cs_out av_cs_in */
+/* use cs_video2av() */
+#define PS3AV_CMD_AV_CS_RGB_8 0x0000
+#define PS3AV_CMD_AV_CS_YUV444_8 0x0001
+#define PS3AV_CMD_AV_CS_YUV422_8 0x0002
+#define PS3AV_CMD_AV_CS_XVYCC_8 0x0003
+#define PS3AV_CMD_AV_CS_RGB_10 0x0004
+#define PS3AV_CMD_AV_CS_YUV444_10 0x0005
+#define PS3AV_CMD_AV_CS_YUV422_10 0x0006
+#define PS3AV_CMD_AV_CS_XVYCC_10 0x0007
+#define PS3AV_CMD_AV_CS_RGB_12 0x0008
+#define PS3AV_CMD_AV_CS_YUV444_12 0x0009
+#define PS3AV_CMD_AV_CS_YUV422_12 0x000a
+#define PS3AV_CMD_AV_CS_XVYCC_12 0x000b
+#define PS3AV_CMD_AV_CS_8 0x0000
+#define PS3AV_CMD_AV_CS_10 0x0001
+#define PS3AV_CMD_AV_CS_12 0x0002
+/* dither */
+#define PS3AV_CMD_AV_DITHER_OFF 0x0000
+#define PS3AV_CMD_AV_DITHER_ON 0x0001
+#define PS3AV_CMD_AV_DITHER_8BIT 0x0000
+#define PS3AV_CMD_AV_DITHER_10BIT 0x0002
+#define PS3AV_CMD_AV_DITHER_12BIT 0x0004
+/* super_white */
+#define PS3AV_CMD_AV_SUPER_WHITE_OFF 0x0000
+#define PS3AV_CMD_AV_SUPER_WHITE_ON 0x0001
+/* aspect */
+#define PS3AV_CMD_AV_ASPECT_16_9 0x0000
+#define PS3AV_CMD_AV_ASPECT_4_3 0x0001
+/* video_cs_cnv() */
+#define PS3AV_CMD_VIDEO_CS_RGB 0x0001
+#define PS3AV_CMD_VIDEO_CS_YUV422 0x0002
+#define PS3AV_CMD_VIDEO_CS_YUV444 0x0003
+
+/* for broadcast automode */
+#define PS3AV_RESBIT_720x480P 0x0003 /* 0x0001 | 0x0002 */
+#define PS3AV_RESBIT_720x576P 0x0003 /* 0x0001 | 0x0002 */
+#define PS3AV_RESBIT_1280x720P 0x0004
+#define PS3AV_RESBIT_1920x1080I 0x0008
+#define PS3AV_RESBIT_1920x1080P 0x4000
+#define PS3AV_RES_MASK_60 (PS3AV_RESBIT_720x480P \
+ | PS3AV_RESBIT_1280x720P \
+ | PS3AV_RESBIT_1920x1080I \
+ | PS3AV_RESBIT_1920x1080P)
+#define PS3AV_RES_MASK_50 (PS3AV_RESBIT_720x576P \
+ | PS3AV_RESBIT_1280x720P \
+ | PS3AV_RESBIT_1920x1080I \
+ | PS3AV_RESBIT_1920x1080P)
+
+/* for VESA automode */
+#define PS3AV_RESBIT_VGA 0x0001
+#define PS3AV_RESBIT_WXGA 0x0002
+#define PS3AV_RESBIT_SXGA 0x0004
+#define PS3AV_RESBIT_WUXGA 0x0008
+#define PS3AV_RES_MASK_VESA (PS3AV_RESBIT_WXGA |\
+ PS3AV_RESBIT_SXGA |\
+ PS3AV_RESBIT_WUXGA)
+
+#define PS3AV_MONITOR_TYPE_HDMI 1 /* HDMI */
+#define PS3AV_MONITOR_TYPE_DVI 2 /* DVI */
+
+
+/* for video mode */
+enum ps3av_mode_num {
+ PS3AV_MODE_AUTO = 0,
+ PS3AV_MODE_480I = 1,
+ PS3AV_MODE_480P = 2,
+ PS3AV_MODE_720P60 = 3,
+ PS3AV_MODE_1080I60 = 4,
+ PS3AV_MODE_1080P60 = 5,
+ PS3AV_MODE_576I = 6,
+ PS3AV_MODE_576P = 7,
+ PS3AV_MODE_720P50 = 8,
+ PS3AV_MODE_1080I50 = 9,
+ PS3AV_MODE_1080P50 = 10,
+ PS3AV_MODE_WXGA = 11,
+ PS3AV_MODE_SXGA = 12,
+ PS3AV_MODE_WUXGA = 13,
+};
+
+#define PS3AV_MODE_MASK 0x000F
+#define PS3AV_MODE_HDCP_OFF 0x1000 /* Retail PS3 product doesn't support this */
+#define PS3AV_MODE_DITHER 0x0800
+#define PS3AV_MODE_COLOR 0x0400
+#define PS3AV_MODE_WHITE 0x0200
+#define PS3AV_MODE_FULL 0x0080
+#define PS3AV_MODE_DVI 0x0040
+#define PS3AV_MODE_RGB 0x0020
+
+
+#define PS3AV_DEFAULT_HDMI_MODE_ID_REG_60 PS3AV_MODE_480P
+#define PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_60 PS3AV_MODE_480I
+#define PS3AV_DEFAULT_HDMI_MODE_ID_REG_50 PS3AV_MODE_576P
+#define PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_50 PS3AV_MODE_576I
+
+#define PS3AV_REGION_60 0x01
+#define PS3AV_REGION_50 0x02
+#define PS3AV_REGION_RGB 0x10
+
+#define get_status(buf) (((__u32 *)buf)[2])
+#define PS3AV_HDR_SIZE 4 /* version + size */
+
+
+/** command packet structure **/
+struct ps3av_send_hdr {
+ u16 version;
+ u16 size; /* size of command packet */
+ u32 cid; /* command id */
+};
+
+struct ps3av_reply_hdr {
+ u16 version;
+ u16 size;
+ u32 cid;
+ u32 status;
+};
+
+/* backend: initialization */
+struct ps3av_pkt_av_init {
+ struct ps3av_send_hdr send_hdr;
+ u32 event_bit;
+};
+
+/* backend: finalize */
+struct ps3av_pkt_av_fin {
+ struct ps3av_send_hdr send_hdr;
+ /* recv */
+ u32 reserved;
+};
+
+/* backend: get port */
+struct ps3av_pkt_av_get_hw_conf {
+ struct ps3av_send_hdr send_hdr;
+ /* recv */
+ u32 status;
+ u16 num_of_hdmi; /* out: number of hdmi */
+ u16 num_of_avmulti; /* out: number of avmulti */
+ u16 num_of_spdif; /* out: number of hdmi */
+ u16 reserved;
+};
+
+/* backend: get monitor info */
+struct ps3av_info_resolution {
+ u32 res_bits;
+ u32 native;
+};
+
+struct ps3av_info_cs {
+ u8 rgb;
+ u8 yuv444;
+ u8 yuv422;
+ u8 reserved;
+};
+
+struct ps3av_info_color {
+ u16 red_x;
+ u16 red_y;
+ u16 green_x;
+ u16 green_y;
+ u16 blue_x;
+ u16 blue_y;
+ u16 white_x;
+ u16 white_y;
+ u32 gamma;
+};
+
+struct ps3av_info_audio {
+ u8 type;
+ u8 max_num_of_ch;
+ u8 fs;
+ u8 sbit;
+};
+
+struct ps3av_info_monitor {
+ u8 avport;
+ u8 monitor_id[10];
+ u8 monitor_type;
+ u8 monitor_name[16];
+ struct ps3av_info_resolution res_60;
+ struct ps3av_info_resolution res_50;
+ struct ps3av_info_resolution res_other;
+ struct ps3av_info_resolution res_vesa;
+ struct ps3av_info_cs cs;
+ struct ps3av_info_color color;
+ u8 supported_ai;
+ u8 speaker_info;
+ u8 num_of_audio_block;
+ struct ps3av_info_audio audio[0]; /* 0 or more audio blocks */
+ u8 reserved[169];
+} __attribute__ ((packed));
+
+struct ps3av_pkt_av_get_monitor_info {
+ struct ps3av_send_hdr send_hdr;
+ u16 avport; /* in: avport */
+ u16 reserved;
+ /* recv */
+ struct ps3av_info_monitor info; /* out: monitor info */
+};
+
+/* backend: enable/disable event */
+struct ps3av_pkt_av_event {
+ struct ps3av_send_hdr send_hdr;
+ u32 event_bit; /* in */
+};
+
+/* backend: video cs param */
+struct ps3av_pkt_av_video_cs {
+ struct ps3av_send_hdr send_hdr;
+ u16 avport; /* in: avport */
+ u16 av_vid; /* in: video resolution */
+ u16 av_cs_out; /* in: output color space */
+ u16 av_cs_in; /* in: input color space */
+ u8 dither; /* in: dither bit length */
+ u8 bitlen_out; /* in: bit length */
+ u8 super_white; /* in: super white */
+ u8 aspect; /* in: aspect ratio */
+};
+
+/* backend: video mute */
+struct ps3av_av_mute {
+ u16 avport; /* in: avport */
+ u16 mute; /* in: mute on/off */
+};
+
+struct ps3av_pkt_av_video_mute {
+ struct ps3av_send_hdr send_hdr;
+ struct ps3av_av_mute mute[PS3AV_MUTE_PORT_MAX];
+};
+
+/* backend: video disable signal */
+struct ps3av_pkt_av_video_disable_sig {
+ struct ps3av_send_hdr send_hdr;
+ u16 avport; /* in: avport */
+ u16 reserved;
+};
+
+/* backend: audio param */
+struct ps3av_audio_info_frame {
+ struct pb1_bit {
+ u8 ct:4;
+ u8 rsv:1;
+ u8 cc:3;
+ } pb1;
+ struct pb2_bit {
+ u8 rsv:3;
+ u8 sf:3;
+ u8 ss:2;
+ } pb2;
+ u8 pb3;
+ u8 pb4;
+ struct pb5_bit {
+ u8 dm:1;
+ u8 lsv:4;
+ u8 rsv:3;
+ } pb5;
+};
+
+struct ps3av_pkt_av_audio_param {
+ struct ps3av_send_hdr send_hdr;
+ u16 avport; /* in: avport */
+ u16 reserved;
+ u8 mclk; /* in: audio mclk */
+ u8 ns[3]; /* in: audio ns val */
+ u8 enable; /* in: audio enable */
+ u8 swaplr; /* in: audio swap */
+ u8 fifomap; /* in: audio fifomap */
+ u8 inputctrl; /* in: audio input ctrl */
+ u8 inputlen; /* in: sample bit size */
+ u8 layout; /* in: speaker layout param */
+ struct ps3av_audio_info_frame info; /* in: info */
+ u8 chstat[5]; /* in: ch stat */
+};
+
+/* backend: audio_mute */
+struct ps3av_pkt_av_audio_mute {
+ struct ps3av_send_hdr send_hdr;
+ struct ps3av_av_mute mute[PS3AV_MUTE_PORT_MAX];
+};
+
+/* backend: hdmi_mode */
+struct ps3av_pkt_av_hdmi_mode {
+ struct ps3av_send_hdr send_hdr;
+ u8 mode; /* in: hdmi_mode */
+ u8 reserved0;
+ u8 reserved1;
+ u8 reserved2;
+};
+
+/* backend: tv_mute */
+struct ps3av_pkt_av_tv_mute {
+ struct ps3av_send_hdr send_hdr;
+ u16 avport; /* in: avport HDMI only */
+ u16 mute; /* in: mute */
+};
+
+/* video: initialize */
+struct ps3av_pkt_video_init {
+ struct ps3av_send_hdr send_hdr;
+ /* recv */
+ u32 reserved;
+};
+
+/* video: mode setting */
+struct ps3av_pkt_video_mode {
+ struct ps3av_send_hdr send_hdr;
+ u32 video_head; /* in: head */
+ u32 reserved;
+ u32 video_vid; /* in: video resolution */
+ u16 reserved1;
+ u16 width; /* in: width in pixel */
+ u16 reserved2;
+ u16 height; /* in: height in pixel */
+ u32 pitch; /* in: line size in byte */
+ u32 video_out_format; /* in: out format */
+ u32 video_format; /* in: input frame buffer format */
+ u8 reserved3;
+ u8 video_cl_cnv; /* in: color conversion */
+ u16 video_order; /* in: input RGB order */
+ u32 reserved4;
+};
+
+/* video: format */
+struct ps3av_pkt_video_format {
+ struct ps3av_send_hdr send_hdr;
+ u32 video_head; /* in: head */
+ u32 video_format; /* in: frame buffer format */
+ u8 reserved;
+ u8 video_cl_cnv; /* in: color conversion */
+ u16 video_order; /* in: input RGB order */
+};
+
+/* video: pitch */
+struct ps3av_pkt_video_pitch {
+ u16 version;
+ u16 size; /* size of command packet */
+ u32 cid; /* command id */
+ u32 video_head; /* in: head */
+ u32 pitch; /* in: line size in byte */
+};
+
+/* audio: initialize */
+struct ps3av_pkt_audio_init {
+ struct ps3av_send_hdr send_hdr;
+ /* recv */
+ u32 reserved;
+};
+
+/* audio: mode setting */
+struct ps3av_pkt_audio_mode {
+ struct ps3av_send_hdr send_hdr;
+ u8 avport; /* in: avport */
+ u8 reserved0[3];
+ u32 mask; /* in: mask */
+ u32 audio_num_of_ch; /* in: number of ch */
+ u32 audio_fs; /* in: sampling freq */
+ u32 audio_word_bits; /* in: sample bit size */
+ u32 audio_format; /* in: audio output format */
+ u32 audio_source; /* in: audio source */
+ u8 audio_enable[4]; /* in: audio enable */
+ u8 audio_swap[4]; /* in: audio swap */
+ u8 audio_map[4]; /* in: audio map */
+ u32 audio_layout; /* in: speaker layout */
+ u32 audio_downmix; /* in: audio downmix permission */
+ u32 audio_downmix_level;
+ u8 audio_cs_info[8]; /* in: IEC channel status */
+};
+
+/* audio: mute */
+struct ps3av_audio_mute {
+ u8 avport; /* in: opt_port optical */
+ u8 reserved[3];
+ u32 mute; /* in: mute */
+};
+
+struct ps3av_pkt_audio_mute {
+ struct ps3av_send_hdr send_hdr;
+ struct ps3av_audio_mute mute[PS3AV_OPT_PORT_MAX];
+};
+
+/* audio: active/inactive */
+struct ps3av_pkt_audio_active {
+ struct ps3av_send_hdr send_hdr;
+ u32 audio_port; /* in: audio active/inactive port */
+};
+
+/* audio: SPDIF user bit */
+struct ps3av_pkt_audio_spdif_bit {
+ u16 version;
+ u16 size; /* size of command packet */
+ u32 cid; /* command id */
+ u8 avport; /* in: avport SPDIF only */
+ u8 reserved[3];
+ u32 audio_port; /* in: SPDIF only */
+ u32 spdif_bit_data[12]; /* in: user bit data */
+};
+
+/* audio: audio control */
+struct ps3av_pkt_audio_ctrl {
+ u16 version;
+ u16 size; /* size of command packet */
+ u32 cid; /* command id */
+ u32 audio_ctrl_id; /* in: control id */
+ u32 audio_ctrl_data[4]; /* in: control data */
+};
+
+/* avb:param */
+#define PS3AV_PKT_AVB_PARAM_MAX_BUF_SIZE \
+ (PS3AV_AVB_NUM_VIDEO*sizeof(struct ps3av_pkt_video_mode) + \
+ PS3AV_AVB_NUM_AUDIO*sizeof(struct ps3av_pkt_audio_mode) + \
+ PS3AV_AVB_NUM_AV_VIDEO*sizeof(struct ps3av_pkt_av_video_cs) + \
+ PS3AV_AVB_NUM_AV_AUDIO*sizeof(struct ps3av_pkt_av_audio_param))
+
+struct ps3av_pkt_avb_param {
+ struct ps3av_send_hdr send_hdr;
+ u16 num_of_video_pkt;
+ u16 num_of_audio_pkt;
+ u16 num_of_av_video_pkt;
+ u16 num_of_av_audio_pkt;
+ /*
+ * The actual buffer layout depends on the fields above:
+ *
+ * struct ps3av_pkt_video_mode video[num_of_video_pkt];
+ * struct ps3av_pkt_audio_mode audio[num_of_audio_pkt];
+ * struct ps3av_pkt_av_video_cs av_video[num_of_av_video_pkt];
+ * struct ps3av_pkt_av_audio_param av_audio[num_of_av_audio_pkt];
+ */
+ u8 buf[PS3AV_PKT_AVB_PARAM_MAX_BUF_SIZE];
+};
+
+/* channel status */
+extern u8 ps3av_mode_cs_info[];
+
+/** command status **/
+#define PS3AV_STATUS_SUCCESS 0x0000 /* success */
+#define PS3AV_STATUS_RECEIVE_VUART_ERROR 0x0001 /* receive vuart error */
+#define PS3AV_STATUS_SYSCON_COMMUNICATE_FAIL 0x0002 /* syscon communication error */
+#define PS3AV_STATUS_INVALID_COMMAND 0x0003 /* obsolete invalid CID */
+#define PS3AV_STATUS_INVALID_PORT 0x0004 /* invalid port number */
+#define PS3AV_STATUS_INVALID_VID 0x0005 /* invalid video format */
+#define PS3AV_STATUS_INVALID_COLOR_SPACE 0x0006 /* invalid video colose space */
+#define PS3AV_STATUS_INVALID_FS 0x0007 /* invalid audio sampling freq */
+#define PS3AV_STATUS_INVALID_AUDIO_CH 0x0008 /* invalid audio channel number */
+#define PS3AV_STATUS_UNSUPPORTED_VERSION 0x0009 /* version mismatch */
+#define PS3AV_STATUS_INVALID_SAMPLE_SIZE 0x000a /* invalid audio sample bit size */
+#define PS3AV_STATUS_FAILURE 0x000b /* other failures */
+#define PS3AV_STATUS_UNSUPPORTED_COMMAND 0x000c /* unsupported cid */
+#define PS3AV_STATUS_BUFFER_OVERFLOW 0x000d /* write buffer overflow */
+#define PS3AV_STATUS_INVALID_VIDEO_PARAM 0x000e /* invalid video param */
+#define PS3AV_STATUS_NO_SEL 0x000f /* not exist selector */
+#define PS3AV_STATUS_INVALID_AV_PARAM 0x0010 /* invalid backend param */
+#define PS3AV_STATUS_INVALID_AUDIO_PARAM 0x0011 /* invalid audio param */
+#define PS3AV_STATUS_UNSUPPORTED_HDMI_MODE 0x0012 /* unsupported hdmi mode */
+#define PS3AV_STATUS_NO_SYNC_HEAD 0x0013 /* sync head failed */
+
+extern void ps3av_set_hdr(u32, u16, struct ps3av_send_hdr *);
+extern int ps3av_do_pkt(u32, u16, size_t, struct ps3av_send_hdr *);
+
+extern int ps3av_cmd_init(void);
+extern int ps3av_cmd_fin(void);
+extern int ps3av_cmd_av_video_mute(int, u32 *, u32);
+extern int ps3av_cmd_av_video_disable_sig(u32);
+extern int ps3av_cmd_av_tv_mute(u32, u32);
+extern int ps3av_cmd_enable_event(void);
+extern int ps3av_cmd_av_hdmi_mode(u8);
+extern u32 ps3av_cmd_set_av_video_cs(void *, u32, int, int, int, u32);
+extern u32 ps3av_cmd_set_video_mode(void *, u32, int, int, u32);
+extern int ps3av_cmd_video_format_black(u32, u32, u32);
+extern int ps3av_cmd_av_audio_mute(int, u32 *, u32);
+extern u32 ps3av_cmd_set_av_audio_param(void *, u32,
+ const struct ps3av_pkt_audio_mode *,
+ u32);
+extern void ps3av_cmd_set_audio_mode(struct ps3av_pkt_audio_mode *, u32, u32,
+ u32, u32, u32, u32);
+extern int ps3av_cmd_audio_mode(struct ps3av_pkt_audio_mode *);
+extern int ps3av_cmd_audio_mute(int, u32 *, u32);
+extern int ps3av_cmd_audio_active(int, u32);
+extern int ps3av_cmd_avb_param(struct ps3av_pkt_avb_param *, u32);
+extern int ps3av_cmd_av_get_hw_conf(struct ps3av_pkt_av_get_hw_conf *);
+extern int ps3av_cmd_video_get_monitor_info(struct ps3av_pkt_av_get_monitor_info *,
+ u32);
+
+extern int ps3av_set_video_mode(int);
+extern int ps3av_set_audio_mode(u32, u32, u32, u32, u32);
+extern int ps3av_get_auto_mode(void);
+extern int ps3av_get_mode(void);
+extern int ps3av_video_mode2res(u32, u32 *, u32 *);
+extern int ps3av_video_mute(int);
+extern int ps3av_audio_mute(int);
+extern int ps3av_audio_mute_analog(int);
+#endif /* _ASM_POWERPC_PS3AV_H_ */
diff --git a/arch/powerpc/include/asm/ps3gpu.h b/arch/powerpc/include/asm/ps3gpu.h
new file mode 100644
index 000000000..9645c3047
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3gpu.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PS3 GPU declarations.
+ *
+ * Copyright 2009 Sony Corporation
+ */
+
+#ifndef _ASM_POWERPC_PS3GPU_H
+#define _ASM_POWERPC_PS3GPU_H
+
+#include <linux/mutex.h>
+
+#include <asm/lv1call.h>
+
+
+#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC 0x101
+#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP 0x102
+
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP 0x600
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT_SYNC 0x602
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE 0x603
+
+#define L1GPU_FB_BLIT_WAIT_FOR_COMPLETION (1ULL << 32)
+
+#define L1GPU_DISPLAY_SYNC_HSYNC 1
+#define L1GPU_DISPLAY_SYNC_VSYNC 2
+
+
+/* mutex synchronizing GPU accesses and video mode changes */
+extern struct mutex ps3_gpu_mutex;
+
+
+static inline int lv1_gpu_display_sync(u64 context_handle, u64 head,
+ u64 ddr_offset)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
+ head, ddr_offset, 0, 0);
+}
+
+static inline int lv1_gpu_display_flip(u64 context_handle, u64 head,
+ u64 ddr_offset)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP,
+ head, ddr_offset, 0, 0);
+}
+
+static inline int lv1_gpu_fb_setup(u64 context_handle, u64 xdr_lpar,
+ u64 xdr_size, u64 ioif_offset)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP,
+ xdr_lpar, xdr_size, ioif_offset, 0);
+}
+
+static inline int lv1_gpu_fb_blit(u64 context_handle, u64 ddr_offset,
+ u64 ioif_offset, u64 sync_width, u64 pitch)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
+ ddr_offset, ioif_offset, sync_width,
+ pitch);
+}
+
+static inline int lv1_gpu_fb_close(u64 context_handle)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE, 0,
+ 0, 0, 0);
+}
+
+#endif /* _ASM_POWERPC_PS3GPU_H */
diff --git a/arch/powerpc/include/asm/ps3stor.h b/arch/powerpc/include/asm/ps3stor.h
new file mode 100644
index 000000000..1d8279014
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3stor.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PS3 Storage Devices
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ */
+
+#ifndef _ASM_POWERPC_PS3STOR_H_
+#define _ASM_POWERPC_PS3STOR_H_
+
+#include <linux/interrupt.h>
+
+#include <asm/ps3.h>
+
+
+struct ps3_storage_region {
+ unsigned int id;
+ u64 start;
+ u64 size;
+};
+
+struct ps3_storage_device {
+ struct ps3_system_bus_device sbd;
+
+ struct ps3_dma_region dma_region;
+ unsigned int irq;
+ u64 blk_size;
+
+ u64 tag;
+ u64 lv1_status;
+ struct completion done;
+
+ unsigned long bounce_size;
+ void *bounce_buf;
+ u64 bounce_lpar;
+ dma_addr_t bounce_dma;
+
+ unsigned int num_regions;
+ unsigned long accessible_regions;
+ unsigned int region_idx; /* first accessible region */
+ struct ps3_storage_region regions[]; /* Must be last */
+};
+
+static inline struct ps3_storage_device *to_ps3_storage_device(struct device *dev)
+{
+ return container_of(dev, struct ps3_storage_device, sbd.core);
+}
+
+extern int ps3stor_setup(struct ps3_storage_device *dev,
+ irq_handler_t handler);
+extern void ps3stor_teardown(struct ps3_storage_device *dev);
+extern u64 ps3stor_read_write_sectors(struct ps3_storage_device *dev, u64 lpar,
+ u64 start_sector, u64 sectors,
+ int write);
+extern u64 ps3stor_send_command(struct ps3_storage_device *dev, u64 cmd,
+ u64 arg1, u64 arg2, u64 arg3, u64 arg4);
+
+#endif /* _ASM_POWERPC_PS3STOR_H_ */
diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h
new file mode 100644
index 000000000..714a35f0d
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-walk.h
@@ -0,0 +1,88 @@
+#ifndef _ASM_POWERPC_PTE_WALK_H
+#define _ASM_POWERPC_PTE_WALK_H
+
+#include <linux/sched.h>
+
+/* Don't use this directly */
+extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hshift);
+
+static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hshift)
+{
+ pte_t *pte;
+
+ VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
+ pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
+
+#if defined(CONFIG_DEBUG_VM) && \
+ !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
+ /*
+ * We should not find huge page if these configs are not enabled.
+ */
+ if (hshift)
+ WARN_ON(*hshift);
+#endif
+ return pte;
+}
+
+static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
+{
+ pgd_t *pgdir = init_mm.pgd;
+ return __find_linux_pte(pgdir, ea, NULL, hshift);
+}
+
+/*
+ * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
+ * physical address, without taking locks. This can be used in real-mode.
+ */
+static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
+{
+ pte_t *ptep;
+ phys_addr_t pa;
+ int hugepage_shift;
+
+ /*
+ * init_mm does not free page tables, and does not do THP. It may
+ * have huge pages from huge vmalloc / ioremap etc.
+ */
+ ptep = find_init_mm_pte(addr, &hugepage_shift);
+ if (WARN_ON(!ptep))
+ return 0;
+
+ pa = PFN_PHYS(pte_pfn(*ptep));
+
+ if (!hugepage_shift)
+ hugepage_shift = PAGE_SHIFT;
+
+ pa |= addr & ((1ul << hugepage_shift) - 1);
+
+ return pa;
+}
+
+/*
+ * This is what we should always use. Any other lockless page table lookup needs
+ * careful audit against THP split.
+ */
+static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hshift)
+{
+ pte_t *pte;
+
+ VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
+ VM_WARN(pgdir != current->mm->pgd,
+ "%s lock less page table lookup called on wrong mm\n", __func__);
+ pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
+
+#if defined(CONFIG_DEBUG_VM) && \
+ !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
+ /*
+ * We should not find huge page if these configs are not enabled.
+ */
+ if (hshift)
+ WARN_ON(*hshift);
+#endif
+ return pte;
+}
+
+#endif /* _ASM_POWERPC_PTE_WALK_H */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
new file mode 100644
index 000000000..2efec6d87
--- /dev/null
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -0,0 +1,398 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ *
+ * this should only contain volatile regs
+ * since we can keep non-volatile in the thread_struct
+ * should set this up when only volatiles are saved
+ * by intr code.
+ *
+ * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
+ * that the overall structure is a multiple of 16 bytes in length.
+ *
+ * Note that the offsets of the fields in this struct correspond with
+ * the PT_* values below. This simplifies arch/powerpc/kernel/ptrace.c.
+ */
+#ifndef _ASM_POWERPC_PTRACE_H
+#define _ASM_POWERPC_PTRACE_H
+
+#include <linux/err.h>
+#include <uapi/asm/ptrace.h>
+#include <asm/asm-const.h>
+#include <asm/reg.h>
+
+#ifndef __ASSEMBLY__
+struct pt_regs
+{
+ union {
+ struct user_pt_regs user_regs;
+ struct {
+ unsigned long gpr[32];
+ unsigned long nip;
+ unsigned long msr;
+ unsigned long orig_gpr3;
+ unsigned long ctr;
+ unsigned long link;
+ unsigned long xer;
+ unsigned long ccr;
+#ifdef CONFIG_PPC64
+ unsigned long softe;
+#else
+ unsigned long mq;
+#endif
+ unsigned long trap;
+ union {
+ unsigned long dar;
+ unsigned long dear;
+ };
+ union {
+ unsigned long dsisr;
+ unsigned long esr;
+ };
+ unsigned long result;
+ };
+ };
+#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_KUAP)
+ union {
+ struct {
+#ifdef CONFIG_PPC64
+ unsigned long ppr;
+ unsigned long exit_result;
+#endif
+ union {
+#ifdef CONFIG_PPC_KUAP
+ unsigned long kuap;
+#endif
+#ifdef CONFIG_PPC_PKEY
+ unsigned long amr;
+#endif
+ };
+#ifdef CONFIG_PPC_PKEY
+ unsigned long iamr;
+#endif
+ };
+ unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
+ };
+#endif
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+ struct { /* Must be a multiple of 16 bytes */
+ unsigned long mas0;
+ unsigned long mas1;
+ unsigned long mas2;
+ unsigned long mas3;
+ unsigned long mas6;
+ unsigned long mas7;
+ unsigned long srr0;
+ unsigned long srr1;
+ unsigned long csrr0;
+ unsigned long csrr1;
+ unsigned long dsrr0;
+ unsigned long dsrr1;
+ };
+#endif
+};
+#endif
+
+
+#define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs))
+
+// Always displays as "REGS" in memory dumps
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define STACK_FRAME_REGS_MARKER ASM_CONST(0x52454753)
+#else
+#define STACK_FRAME_REGS_MARKER ASM_CONST(0x53474552)
+#endif
+
+#ifdef __powerpc64__
+
+/*
+ * Size of redzone that userspace is allowed to use below the stack
+ * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in
+ * the new ELFv2 little-endian ABI, so we allow the larger amount.
+ *
+ * For kernel code we allow a 288-byte redzone, in order to conserve
+ * kernel stack space; gcc currently only uses 288 bytes, and will
+ * hopefully allow explicit control of the redzone size in future.
+ */
+#define USER_REDZONE_SIZE 512
+#define KERNEL_REDZONE_SIZE 288
+
+#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
+#define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
+#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
+ STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
+#define STACK_FRAME_MARKER 12
+
+#ifdef CONFIG_PPC64_ELF_ABI_V2
+#define STACK_FRAME_MIN_SIZE 32
+#else
+#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
+#endif
+
+/* Size of dummy stack frame allocated when calling signal handler. */
+#define __SIGNAL_FRAMESIZE 128
+#define __SIGNAL_FRAMESIZE32 64
+
+#else /* __powerpc64__ */
+
+#define USER_REDZONE_SIZE 0
+#define KERNEL_REDZONE_SIZE 0
+#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
+#define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
+#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
+#define STACK_FRAME_MARKER 2
+#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
+
+/* Size of stack frame allocated when calling signal handler. */
+#define __SIGNAL_FRAMESIZE 64
+
+#endif /* __powerpc64__ */
+
+#ifndef __ASSEMBLY__
+#include <asm/paca.h>
+
+#ifdef CONFIG_SMP
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+long do_syscall_trace_enter(struct pt_regs *regs);
+void do_syscall_trace_leave(struct pt_regs *regs);
+
+static inline void set_return_regs_changed(void)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ local_paca->hsrr_valid = 0;
+ local_paca->srr_valid = 0;
+#endif
+}
+
+static inline void regs_set_return_ip(struct pt_regs *regs, unsigned long ip)
+{
+ regs->nip = ip;
+ set_return_regs_changed();
+}
+
+static inline void regs_set_return_msr(struct pt_regs *regs, unsigned long msr)
+{
+ regs->msr = msr;
+ set_return_regs_changed();
+}
+
+static inline void regs_add_return_ip(struct pt_regs *regs, long offset)
+{
+ regs_set_return_ip(regs, regs->nip + offset);
+}
+
+static inline unsigned long instruction_pointer(struct pt_regs *regs)
+{
+ return regs->nip;
+}
+
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs_set_return_ip(regs, val);
+}
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return regs->gpr[1];
+}
+
+static inline unsigned long frame_pointer(struct pt_regs *regs)
+{
+ return 0;
+}
+
+#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
+
+#define force_successful_syscall_return() \
+ do { \
+ set_thread_flag(TIF_NOERROR); \
+ } while(0)
+
+#define current_pt_regs() \
+ ((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1)
+
+/*
+ * The 4 low bits (0xf) are available as flags to overload the trap word,
+ * because interrupt vectors have minimum alignment of 0x10. TRAP_FLAGS_MASK
+ * must cover the bits used as flags, including bit 0 which is used as the
+ * "norestart" bit.
+ */
+#ifdef __powerpc64__
+#define TRAP_FLAGS_MASK 0x1
+#else
+/*
+ * On 4xx we use bit 1 in the trap word to indicate whether the exception
+ * is a critical exception (1 means it is).
+ */
+#define TRAP_FLAGS_MASK 0xf
+#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
+#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
+#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
+#endif /* __powerpc64__ */
+#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
+
+static __always_inline void set_trap(struct pt_regs *regs, unsigned long val)
+{
+ regs->trap = (regs->trap & TRAP_FLAGS_MASK) | (val & ~TRAP_FLAGS_MASK);
+}
+
+static inline bool trap_is_scv(struct pt_regs *regs)
+{
+ return (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x3000);
+}
+
+static inline bool trap_is_unsupported_scv(struct pt_regs *regs)
+{
+ return IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x7ff0;
+}
+
+static inline bool trap_is_syscall(struct pt_regs *regs)
+{
+ return (trap_is_scv(regs) || TRAP(regs) == 0xc00);
+}
+
+static inline bool trap_norestart(struct pt_regs *regs)
+{
+ return regs->trap & 0x1;
+}
+
+static __always_inline void set_trap_norestart(struct pt_regs *regs)
+{
+ regs->trap |= 0x1;
+}
+
+#define kernel_stack_pointer(regs) ((regs)->gpr[1])
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+ if (trap_is_scv(regs))
+ return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
+ else
+ return !(regs->ccr & 0x10000000);
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ if (trap_is_scv(regs))
+ return regs->gpr[3];
+
+ if (is_syscall_success(regs))
+ return regs->gpr[3];
+ else
+ return -regs->gpr[3];
+}
+
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+ regs->gpr[3] = rc;
+}
+
+static inline bool cpu_has_msr_ri(void)
+{
+ return !IS_ENABLED(CONFIG_BOOKE_OR_40x);
+}
+
+static inline bool regs_is_unrecoverable(struct pt_regs *regs)
+{
+ return unlikely(cpu_has_msr_ri() && !(regs->msr & MSR_RI));
+}
+
+static inline void regs_set_recoverable(struct pt_regs *regs)
+{
+ if (cpu_has_msr_ri())
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
+}
+
+static inline void regs_set_unrecoverable(struct pt_regs *regs)
+{
+ if (cpu_has_msr_ri())
+ regs_set_return_msr(regs, regs->msr & ~MSR_RI);
+}
+
+#define arch_has_single_step() (1)
+#define arch_has_block_step() (true)
+#define ARCH_HAS_USER_SINGLE_STEP_REPORT
+
+/*
+ * kprobe-based event tracer support
+ */
+
+#include <linux/stddef.h>
+#include <linux/thread_info.h>
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, dsisr))
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset number of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+
+static inline bool regs_within_kernel_stack(struct pt_regs *regs,
+ unsigned long addr)
+{
+ return ((addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#ifndef __powerpc64__
+/* We need PT_SOFTE defined at all time to avoid #ifdefs */
+#define PT_SOFTE PT_MQ
+#else /* __powerpc64__ */
+#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */
+#define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */
+#define PT_VSCR_32 (PT_VR0 + 32*4 + 3)
+#define PT_VRSAVE_32 (PT_VR0 + 33*4)
+#define PT_VSR0_32 300 /* each VSR reg occupies 4 slots in 32-bit */
+#endif /* __powerpc64__ */
+#endif /* _ASM_POWERPC_PTRACE_H */
diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
new file mode 100644
index 000000000..b676c4fb9
--- /dev/null
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_QSPINLOCK_H
+#define _ASM_POWERPC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+#include <asm/paravirt.h>
+
+#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+ if (!is_shared_processor())
+ native_queued_spin_lock_slowpath(lock, val);
+ else
+ __pv_queued_spin_lock_slowpath(lock, val);
+}
+
+#define queued_spin_unlock queued_spin_unlock
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ if (!is_shared_processor())
+ smp_store_release(&lock->locked, 0);
+ else
+ __pv_queued_spin_unlock(lock);
+}
+
+#else
+extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+#endif
+
+static __always_inline void queued_spin_lock(struct qspinlock *lock)
+{
+ u32 val = 0;
+
+ if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
+ return;
+
+ queued_spin_lock_slowpath(lock, val);
+}
+#define queued_spin_lock queued_spin_lock
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define SPIN_THRESHOLD (1<<15) /* not tuned */
+
+static __always_inline void pv_wait(u8 *ptr, u8 val)
+{
+ if (*ptr != val)
+ return;
+ yield_to_any();
+ /*
+ * We could pass in a CPU here if waiting in the queue and yield to
+ * the previous CPU in the queue.
+ */
+}
+
+static __always_inline void pv_kick(int cpu)
+{
+ prod_cpu(cpu);
+}
+
+extern void __pv_init_lock_hash(void);
+
+static inline void pv_spinlocks_init(void)
+{
+ __pv_init_lock_hash();
+}
+
+#endif
+
+/*
+ * Queued spinlocks rely heavily on smp_cond_load_relaxed() to busy-wait,
+ * which was found to have performance problems if implemented with
+ * the preferred spin_begin()/spin_end() SMT priority pattern. Use the
+ * generic version instead.
+ */
+
+#include <asm-generic/qspinlock.h>
+
+#endif /* _ASM_POWERPC_QSPINLOCK_H */
diff --git a/arch/powerpc/include/asm/qspinlock_paravirt.h b/arch/powerpc/include/asm/qspinlock_paravirt.h
new file mode 100644
index 000000000..6b60e7736
--- /dev/null
+++ b/arch/powerpc/include/asm/qspinlock_paravirt.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_QSPINLOCK_PARAVIRT_H
+#define _ASM_POWERPC_QSPINLOCK_PARAVIRT_H
+
+EXPORT_SYMBOL(__pv_queued_spin_unlock);
+
+#endif /* _ASM_POWERPC_QSPINLOCK_PARAVIRT_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
new file mode 100644
index 000000000..8fda87af2
--- /dev/null
+++ b/arch/powerpc/include/asm/reg.h
@@ -0,0 +1,1466 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Contains the definition of registers common to all PowerPC variants.
+ * If a register definition has been changed in a different PowerPC
+ * variant, we will case it in #ifndef XXX ... #endif, and have the
+ * number used in the Programming Environments Manual For 32-Bit
+ * Implementations of the PowerPC Architecture (a.k.a. Green Book) here.
+ */
+
+#ifndef _ASM_POWERPC_REG_H
+#define _ASM_POWERPC_REG_H
+#ifdef __KERNEL__
+
+#include <linux/stringify.h>
+#include <linux/const.h>
+#include <asm/cputable.h>
+#include <asm/asm-const.h>
+#include <asm/feature-fixups.h>
+
+/* Pickup Book E specific registers. */
+#ifdef CONFIG_BOOKE_OR_40x
+#include <asm/reg_booke.h>
+#endif
+
+#ifdef CONFIG_FSL_EMB_PERFMON
+#include <asm/reg_fsl_emb.h>
+#endif
+
+#include <asm/reg_8xx.h>
+
+#define MSR_SF_LG 63 /* Enable 64 bit mode */
+#define MSR_HV_LG 60 /* Hypervisor state */
+#define MSR_TS_T_LG 34 /* Trans Mem state: Transactional */
+#define MSR_TS_S_LG 33 /* Trans Mem state: Suspended */
+#define MSR_TS_LG 33 /* Trans Mem state (2 bits) */
+#define MSR_TM_LG 32 /* Trans Mem Available */
+#define MSR_VEC_LG 25 /* Enable AltiVec */
+#define MSR_VSX_LG 23 /* Enable VSX */
+#define MSR_S_LG 22 /* Secure state */
+#define MSR_POW_LG 18 /* Enable Power Management */
+#define MSR_WE_LG 18 /* Wait State Enable */
+#define MSR_TGPR_LG 17 /* TLB Update registers in use */
+#define MSR_CE_LG 17 /* Critical Interrupt Enable */
+#define MSR_ILE_LG 16 /* Interrupt Little Endian */
+#define MSR_EE_LG 15 /* External Interrupt Enable */
+#define MSR_PR_LG 14 /* Problem State / Privilege Level */
+#define MSR_FP_LG 13 /* Floating Point enable */
+#define MSR_ME_LG 12 /* Machine Check Enable */
+#define MSR_FE0_LG 11 /* Floating Exception mode 0 */
+#define MSR_SE_LG 10 /* Single Step */
+#define MSR_BE_LG 9 /* Branch Trace */
+#define MSR_DE_LG 9 /* Debug Exception Enable */
+#define MSR_FE1_LG 8 /* Floating Exception mode 1 */
+#define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */
+#define MSR_IR_LG 5 /* Instruction Relocate */
+#define MSR_DR_LG 4 /* Data Relocate */
+#define MSR_PE_LG 3 /* Protection Enable */
+#define MSR_PX_LG 2 /* Protection Exclusive Mode */
+#define MSR_PMM_LG 2 /* Performance monitor */
+#define MSR_RI_LG 1 /* Recoverable Exception */
+#define MSR_LE_LG 0 /* Little Endian */
+
+#ifdef __ASSEMBLY__
+#define __MASK(X) (1<<(X))
+#else
+#define __MASK(X) (1UL<<(X))
+#endif
+
+#ifdef CONFIG_PPC64
+#define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
+#define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
+#define MSR_S __MASK(MSR_S_LG) /* Secure state */
+#else
+/* so tests for these bits fail on 32-bit */
+#define MSR_SF 0
+#define MSR_HV 0
+#define MSR_S 0
+#endif
+
+/*
+ * To be used in shared book E/book S, this avoids needing to worry about
+ * book S/book E in shared code
+ */
+#ifndef MSR_SPE
+#define MSR_SPE 0
+#endif
+
+#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
+#define MSR_VSX __MASK(MSR_VSX_LG) /* Enable VSX */
+#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
+#define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */
+#define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */
+#define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */
+#define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */
+#define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */
+#define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */
+#define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */
+#define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */
+#define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */
+#define MSR_SE __MASK(MSR_SE_LG) /* Single Step */
+#define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */
+#define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */
+#define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */
+#define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */
+#define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */
+#define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */
+#define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */
+#define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */
+#ifndef MSR_PMM
+#define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */
+#endif
+#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
+#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
+
+#define MSR_TM __MASK(MSR_TM_LG) /* Transactional Mem Available */
+#define MSR_TS_N 0 /* Non-transactional */
+#define MSR_TS_S __MASK(MSR_TS_S_LG) /* Transaction Suspended */
+#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
+#define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */
+#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
+#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
+#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
+#else
+#define MSR_TM_ACTIVE(x) ((void)(x), 0)
+#endif
+
+#if defined(CONFIG_PPC_BOOK3S_64)
+#define MSR_64BIT MSR_SF
+
+/* Server variant */
+#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_HV)
+#ifdef __BIG_ENDIAN__
+#define MSR_ __MSR
+#define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV)
+#else
+#define MSR_ (__MSR | MSR_LE)
+#define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV | MSR_LE)
+#endif
+#define MSR_KERNEL (MSR_ | MSR_64BIT)
+#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
+#define MSR_USER64 (MSR_USER32 | MSR_64BIT)
+#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx)
+/* Default MSR for kernel mode. */
+#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
+#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
+#endif
+
+#ifndef MSR_64BIT
+#define MSR_64BIT 0
+#endif
+
+/* Condition Register related */
+#define CR0_SHIFT 28
+#define CR0_MASK 0xF
+#define CR0_TBEGIN_FAILURE (0x2 << 28) /* 0b0010 */
+
+
+/* Power Management - Processor Stop Status and Control Register Fields */
+#define PSSCR_RL_MASK 0x0000000F /* Requested Level */
+#define PSSCR_MTL_MASK 0x000000F0 /* Maximum Transition Level */
+#define PSSCR_TR_MASK 0x00000300 /* Transition State */
+#define PSSCR_PSLL_MASK 0x000F0000 /* Power-Saving Level Limit */
+#define PSSCR_EC 0x00100000 /* Exit Criterion */
+#define PSSCR_ESL 0x00200000 /* Enable State Loss */
+#define PSSCR_SD 0x00400000 /* Status Disable */
+#define PSSCR_PLS 0xf000000000000000 /* Power-saving Level Status */
+#define PSSCR_PLS_SHIFT 60
+#define PSSCR_GUEST_VIS 0xf0000000000003ffUL /* Guest-visible PSSCR fields */
+#define PSSCR_FAKE_SUSPEND 0x00000400 /* Fake-suspend bit (P9 DD2.2) */
+#define PSSCR_FAKE_SUSPEND_LG 10 /* Fake-suspend bit position */
+
+/* Floating Point Status and Control Register (FPSCR) Fields */
+#define FPSCR_FX 0x80000000 /* FPU exception summary */
+#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
+#define FPSCR_VX 0x20000000 /* Invalid operation summary */
+#define FPSCR_OX 0x10000000 /* Overflow exception summary */
+#define FPSCR_UX 0x08000000 /* Underflow exception summary */
+#define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */
+#define FPSCR_XX 0x02000000 /* Inexact exception summary */
+#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
+#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
+#define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */
+#define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */
+#define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */
+#define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */
+#define FPSCR_FR 0x00040000 /* Fraction rounded */
+#define FPSCR_FI 0x00020000 /* Fraction inexact */
+#define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */
+#define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */
+#define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */
+#define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */
+#define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */
+#define FPSCR_VE 0x00000080 /* Invalid op exception enable */
+#define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */
+#define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */
+#define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */
+#define FPSCR_XE 0x00000008 /* FP inexact exception enable */
+#define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */
+#define FPSCR_RN 0x00000003 /* FPU rounding control */
+
+/* Bit definitions for SPEFSCR. */
+#define SPEFSCR_SOVH 0x80000000 /* Summary integer overflow high */
+#define SPEFSCR_OVH 0x40000000 /* Integer overflow high */
+#define SPEFSCR_FGH 0x20000000 /* Embedded FP guard bit high */
+#define SPEFSCR_FXH 0x10000000 /* Embedded FP sticky bit high */
+#define SPEFSCR_FINVH 0x08000000 /* Embedded FP invalid operation high */
+#define SPEFSCR_FDBZH 0x04000000 /* Embedded FP div by zero high */
+#define SPEFSCR_FUNFH 0x02000000 /* Embedded FP underflow high */
+#define SPEFSCR_FOVFH 0x01000000 /* Embedded FP overflow high */
+#define SPEFSCR_FINXS 0x00200000 /* Embedded FP inexact sticky */
+#define SPEFSCR_FINVS 0x00100000 /* Embedded FP invalid op. sticky */
+#define SPEFSCR_FDBZS 0x00080000 /* Embedded FP div by zero sticky */
+#define SPEFSCR_FUNFS 0x00040000 /* Embedded FP underflow sticky */
+#define SPEFSCR_FOVFS 0x00020000 /* Embedded FP overflow sticky */
+#define SPEFSCR_MODE 0x00010000 /* Embedded FP mode */
+#define SPEFSCR_SOV 0x00008000 /* Integer summary overflow */
+#define SPEFSCR_OV 0x00004000 /* Integer overflow */
+#define SPEFSCR_FG 0x00002000 /* Embedded FP guard bit */
+#define SPEFSCR_FX 0x00001000 /* Embedded FP sticky bit */
+#define SPEFSCR_FINV 0x00000800 /* Embedded FP invalid operation */
+#define SPEFSCR_FDBZ 0x00000400 /* Embedded FP div by zero */
+#define SPEFSCR_FUNF 0x00000200 /* Embedded FP underflow */
+#define SPEFSCR_FOVF 0x00000100 /* Embedded FP overflow */
+#define SPEFSCR_FINXE 0x00000040 /* Embedded FP inexact enable */
+#define SPEFSCR_FINVE 0x00000020 /* Embedded FP invalid op. enable */
+#define SPEFSCR_FDBZE 0x00000010 /* Embedded FP div by zero enable */
+#define SPEFSCR_FUNFE 0x00000008 /* Embedded FP underflow enable */
+#define SPEFSCR_FOVFE 0x00000004 /* Embedded FP overflow enable */
+#define SPEFSCR_FRMC 0x00000003 /* Embedded FP rounding mode control */
+
+/* Special Purpose Registers (SPRNs)*/
+
+#ifdef CONFIG_40x
+#define SPRN_PID 0x3B1 /* Process ID */
+#else
+#define SPRN_PID 0x030 /* Process ID */
+#ifdef CONFIG_BOOKE
+#define SPRN_PID0 SPRN_PID/* Process ID Register 0 */
+#endif
+#endif
+
+#define SPRN_CTR 0x009 /* Count Register */
+#define SPRN_DSCR 0x11
+#define SPRN_CFAR 0x1c /* Come From Address Register */
+#define SPRN_AMR 0x1d /* Authority Mask Register */
+#define SPRN_UAMOR 0x9d /* User Authority Mask Override Register */
+#define SPRN_AMOR 0x15d /* Authority Mask Override Register */
+#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
+#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
+#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */
+#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
+
+#define TEXASR_FC_LG (63 - 7) /* Failure Code */
+#define TEXASR_AB_LG (63 - 31) /* Abort */
+#define TEXASR_SU_LG (63 - 32) /* Suspend */
+#define TEXASR_HV_LG (63 - 34) /* Hypervisor state*/
+#define TEXASR_PR_LG (63 - 35) /* Privilege level */
+#define TEXASR_FS_LG (63 - 36) /* failure summary */
+#define TEXASR_EX_LG (63 - 37) /* TFIAR exact bit */
+#define TEXASR_ROT_LG (63 - 38) /* ROT bit */
+
+#define TEXASR_ABORT __MASK(TEXASR_AB_LG) /* terminated by tabort or treclaim */
+#define TEXASR_SUSP __MASK(TEXASR_SU_LG) /* tx failed in suspended state */
+#define TEXASR_HV __MASK(TEXASR_HV_LG) /* MSR[HV] when failure occurred */
+#define TEXASR_PR __MASK(TEXASR_PR_LG) /* MSR[PR] when failure occurred */
+#define TEXASR_FS __MASK(TEXASR_FS_LG) /* TEXASR Failure Summary */
+#define TEXASR_EXACT __MASK(TEXASR_EX_LG) /* TFIAR value is exact */
+#define TEXASR_ROT __MASK(TEXASR_ROT_LG)
+#define TEXASR_FC (ASM_CONST(0xFF) << TEXASR_FC_LG)
+
+#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
+
+#define SPRN_TIDR 144 /* Thread ID register */
+#define SPRN_CTRLF 0x088
+#define SPRN_CTRLT 0x098
+#define CTRL_CT 0xc0000000 /* current thread */
+#define CTRL_CT0 0x80000000 /* thread 0 */
+#define CTRL_CT1 0x40000000 /* thread 1 */
+#define CTRL_TE 0x00c00000 /* thread enable */
+#define CTRL_RUNLATCH 0x1
+#define SPRN_DAWR0 0xB4
+#define SPRN_DAWR1 0xB5
+#define SPRN_RPR 0xBA /* Relative Priority Register */
+#define SPRN_CIABR 0xBB
+#define CIABR_PRIV 0x3
+#define CIABR_PRIV_USER 1
+#define CIABR_PRIV_SUPER 2
+#define CIABR_PRIV_HYPER 3
+#define SPRN_DAWRX0 0xBC
+#define SPRN_DAWRX1 0xBD
+#define DAWRX_USER __MASK(0)
+#define DAWRX_KERNEL __MASK(1)
+#define DAWRX_HYP __MASK(2)
+#define DAWRX_WTI __MASK(3)
+#define DAWRX_WT __MASK(4)
+#define DAWRX_DR __MASK(5)
+#define DAWRX_DW __MASK(6)
+#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
+#define SPRN_DABR2 0x13D /* e300 */
+#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */
+#define DABRX_USER __MASK(0)
+#define DABRX_KERNEL __MASK(1)
+#define DABRX_HYP __MASK(2)
+#define DABRX_BTI __MASK(3)
+#define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER)
+#define SPRN_DAR 0x013 /* Data Address Register */
+#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
+#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+#define DSISR_BAD_DIRECT_ST 0x80000000 /* Obsolete: Direct store error */
+#define DSISR_NOHPTE 0x40000000 /* no translation found */
+#define DSISR_ATTR_CONFLICT 0x20000000 /* P9: Process vs. Partition attr */
+#define DSISR_NOEXEC_OR_G 0x10000000 /* Alias of SRR1 bit, see below */
+#define DSISR_PROTFAULT 0x08000000 /* protection fault */
+#define DSISR_BADACCESS 0x04000000 /* bad access to CI or G */
+#define DSISR_ISSTORE 0x02000000 /* access was a store */
+#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+#define DSISR_NOSEGMENT 0x00200000 /* STAB miss (unsupported) */
+#define DSISR_KEYFAULT 0x00200000 /* Storage Key fault */
+#define DSISR_BAD_EXT_CTRL 0x00100000 /* Obsolete: External ctrl error */
+#define DSISR_UNSUPP_MMU 0x00080000 /* P9: Unsupported MMU config */
+#define DSISR_SET_RC 0x00040000 /* P9: Failed setting of R/C bits */
+#define DSISR_PRTABLE_FAULT 0x00020000 /* P9: Fault on process table */
+#define DSISR_ICSWX_NO_CT 0x00004000 /* P7: icswx unavailable cp type */
+#define DSISR_BAD_COPYPASTE 0x00000008 /* P9: Copy/Paste on wrong memtype */
+#define DSISR_BAD_AMO 0x00000004 /* P9: Incorrect AMO opcode */
+#define DSISR_BAD_CI_LDST 0x00000002 /* P8: Bad HV CI load/store */
+
+/*
+ * DSISR_NOEXEC_OR_G doesn't actually exist. This bit is always
+ * 0 on DSIs. However, on ISIs, the corresponding bit in SRR1
+ * indicates an attempt at executing from a no-execute PTE
+ * or segment or from a guarded page.
+ *
+ * We add a definition here for completeness as we alias
+ * DSISR and SRR1 in do_page_fault.
+ */
+
+/*
+ * DSISR bits that are treated as a fault. Any bit set
+ * here will skip hash_page, and cause do_page_fault to
+ * trigger a SIGBUS or SIGSEGV:
+ */
+#define DSISR_BAD_FAULT_32S (DSISR_BAD_DIRECT_ST | \
+ DSISR_BADACCESS | \
+ DSISR_BAD_EXT_CTRL)
+#define DSISR_BAD_FAULT_64S (DSISR_BAD_FAULT_32S | \
+ DSISR_ATTR_CONFLICT | \
+ DSISR_UNSUPP_MMU | \
+ DSISR_PRTABLE_FAULT | \
+ DSISR_ICSWX_NO_CT | \
+ DSISR_BAD_COPYPASTE | \
+ DSISR_BAD_AMO | \
+ DSISR_BAD_CI_LDST)
+/*
+ * These bits are equivalent in SRR1 and DSISR for 0x400
+ * instruction access interrupts on Book3S
+ */
+#define DSISR_SRR1_MATCH_32S (DSISR_NOHPTE | \
+ DSISR_NOEXEC_OR_G | \
+ DSISR_PROTFAULT)
+#define DSISR_SRR1_MATCH_64S (DSISR_SRR1_MATCH_32S | \
+ DSISR_KEYFAULT | \
+ DSISR_UNSUPP_MMU | \
+ DSISR_SET_RC | \
+ DSISR_PRTABLE_FAULT)
+
+#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
+#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
+#define SPRN_CIR 0x11B /* Chip Information Register (hyper, R/0) */
+#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
+#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
+#define SPRN_TBU40 0x11E /* Timebase upper 40 bits (hyper, R/W) */
+#define SPRN_SPURR 0x134 /* Scaled PURR */
+#define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */
+#define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */
+#define SPRN_HDSISR 0x132
+#define SPRN_HDAR 0x133
+#define SPRN_HDEC 0x136 /* Hypervisor Decrementer */
+#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
+#define SPRN_RMOR 0x138 /* Real mode offset register */
+#define SPRN_HRMOR 0x139 /* Real mode offset register */
+#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
+#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+#define SPRN_ASDR 0x330 /* Access segment descriptor register */
+#define SPRN_IC 0x350 /* Virtual Instruction Count */
+#define SPRN_VTB 0x351 /* Virtual Time Base */
+#define SPRN_LDBAR 0x352 /* LD Base Address Register */
+#define SPRN_PMICR 0x354 /* Power Management Idle Control Reg */
+#define SPRN_PMSR 0x355 /* Power Management Status Reg */
+#define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */
+#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
+#define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */
+#define SPRN_TRIG2 0x372
+#define SPRN_PMCR 0x374 /* Power Management Control Register */
+#define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */
+
+/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_PREFIX_LG 13 /* Enable Prefix Instructions */
+#define FSCR_SCV_LG 12 /* Enable System Call Vectored */
+#define FSCR_MSGP_LG 10 /* Enable MSGP */
+#define FSCR_TAR_LG 8 /* Enable Target Address Register */
+#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
+#define FSCR_TM_LG 5 /* Enable Transactional Memory */
+#define FSCR_BHRB_LG 4 /* Enable Branch History Rolling Buffer*/
+#define FSCR_PM_LG 3 /* Enable prob/priv access to PMU SPRs */
+#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */
+#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
+#define FSCR_FP_LG 0 /* Enable Floating Point */
+#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
+#define FSCR_PREFIX __MASK(FSCR_PREFIX_LG)
+#define FSCR_SCV __MASK(FSCR_SCV_LG)
+#define FSCR_TAR __MASK(FSCR_TAR_LG)
+#define FSCR_EBB __MASK(FSCR_EBB_LG)
+#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
+#define FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
+#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
+#define HFSCR_MSGP __MASK(FSCR_MSGP_LG)
+#define HFSCR_TAR __MASK(FSCR_TAR_LG)
+#define HFSCR_EBB __MASK(FSCR_EBB_LG)
+#define HFSCR_TM __MASK(FSCR_TM_LG)
+#define HFSCR_PM __MASK(FSCR_PM_LG)
+#define HFSCR_BHRB __MASK(FSCR_BHRB_LG)
+#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
+#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
+#define HFSCR_FP __MASK(FSCR_FP_LG)
+#define HFSCR_INTR_CAUSE FSCR_INTR_CAUSE
+#define SPRN_TAR 0x32f /* Target Address Register */
+#define SPRN_LPCR 0x13E /* LPAR Control Register */
+#define LPCR_VPM0 ASM_CONST(0x8000000000000000)
+#define LPCR_VPM1 ASM_CONST(0x4000000000000000)
+#define LPCR_ISL ASM_CONST(0x2000000000000000)
+#define LPCR_VC_SH 61
+#define LPCR_DPFD_SH 52
+#define LPCR_DPFD (ASM_CONST(7) << LPCR_DPFD_SH)
+#define LPCR_VRMASD_SH 47
+#define LPCR_VRMASD (ASM_CONST(0x1f) << LPCR_VRMASD_SH)
+#define LPCR_VRMA_L ASM_CONST(0x0008000000000000)
+#define LPCR_VRMA_LP0 ASM_CONST(0x0001000000000000)
+#define LPCR_VRMA_LP1 ASM_CONST(0x0000800000000000)
+#define LPCR_RMLS 0x1C000000 /* Implementation dependent RMO limit sel */
+#define LPCR_RMLS_SH 26
+#define LPCR_HAIL ASM_CONST(0x0000000004000000) /* HV AIL (ISAv3.1) */
+#define LPCR_ILE ASM_CONST(0x0000000002000000) /* !HV irqs set MSR:LE */
+#define LPCR_AIL ASM_CONST(0x0000000001800000) /* Alternate interrupt location */
+#define LPCR_AIL_0 ASM_CONST(0x0000000000000000) /* MMU off exception offset 0x0 */
+#define LPCR_AIL_3 ASM_CONST(0x0000000001800000) /* MMU on exception offset 0xc00...4xxx */
+#define LPCR_ONL ASM_CONST(0x0000000000040000) /* online - PURR/SPURR count */
+#define LPCR_LD ASM_CONST(0x0000000000020000) /* large decremeter */
+#define LPCR_PECE ASM_CONST(0x000000000001f000) /* powersave exit cause enable */
+#define LPCR_PECEDP ASM_CONST(0x0000000000010000) /* directed priv dbells cause exit */
+#define LPCR_PECEDH ASM_CONST(0x0000000000008000) /* directed hyp dbells cause exit */
+#define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */
+#define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */
+#define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */
+#define LPCR_PECE_HVEE ASM_CONST(0x0000400000000000) /* P9 Wakeup on HV interrupts */
+#define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */
+#define LPCR_MER_SH 11
+#define LPCR_GTSE ASM_CONST(0x0000000000000400) /* Guest Translation Shootdown Enable */
+#define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */
+#define LPCR_HEIC ASM_CONST(0x0000000000000010) /* Hypervisor External Interrupt Control */
+#define LPCR_LPES 0x0000000c
+#define LPCR_LPES0 ASM_CONST(0x0000000000000008) /* LPAR Env selector 0 */
+#define LPCR_LPES1 ASM_CONST(0x0000000000000004) /* LPAR Env selector 1 */
+#define LPCR_LPES_SH 2
+#define LPCR_RMI ASM_CONST(0x0000000000000002) /* real mode is cache inhibit */
+#define LPCR_HVICE ASM_CONST(0x0000000000000002) /* P9: HV interrupt enable */
+#define LPCR_HDICE ASM_CONST(0x0000000000000001) /* Hyp Decr enable (HV,PR,EE) */
+#define LPCR_UPRT ASM_CONST(0x0000000000400000) /* Use Process Table (ISA 3) */
+#define LPCR_HR ASM_CONST(0x0000000000100000)
+#ifndef SPRN_LPID
+#define SPRN_LPID 0x13F /* Logical Partition Identifier */
+#endif
+#define SPRN_HMER 0x150 /* Hypervisor maintenance exception reg */
+#define HMER_DEBUG_TRIG (1ul << (63 - 17)) /* Debug trigger */
+#define SPRN_HMEER 0x151 /* Hyp maintenance exception enable reg */
+#define SPRN_PCR 0x152 /* Processor compatibility register */
+#define PCR_VEC_DIS (__MASK(63-0)) /* Vec. disable (bit NA since POWER8) */
+#define PCR_VSX_DIS (__MASK(63-1)) /* VSX disable (bit NA since POWER8) */
+#define PCR_TM_DIS (__MASK(63-2)) /* Trans. memory disable (POWER8) */
+#define PCR_MMA_DIS (__MASK(63-3)) /* Matrix-Multiply Accelerator */
+#define PCR_HIGH_BITS (PCR_MMA_DIS | PCR_VEC_DIS | PCR_VSX_DIS | PCR_TM_DIS)
+/*
+ * These bits are used in the function kvmppc_set_arch_compat() to specify and
+ * determine both the compatibility level which we want to emulate and the
+ * compatibility level which the host is capable of emulating.
+ */
+#define PCR_ARCH_300 0x10 /* Architecture 3.00 */
+#define PCR_ARCH_207 0x8 /* Architecture 2.07 */
+#define PCR_ARCH_206 0x4 /* Architecture 2.06 */
+#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
+#define PCR_LOW_BITS (PCR_ARCH_207 | PCR_ARCH_206 | PCR_ARCH_205 | PCR_ARCH_300)
+#define PCR_MASK ~(PCR_HIGH_BITS | PCR_LOW_BITS) /* PCR Reserved Bits */
+#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
+#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
+#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */
+#define SPRN_TLBRPNR 0x156 /* P7 TLB control register */
+#define SPRN_TLBLPIDR 0x157 /* P7 TLB control register */
+#define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */
+#define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */
+#define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */
+#define SPRN_DBAT1U 0x21A /* Data BAT 1 Upper Register */
+#define SPRN_DBAT2L 0x21D /* Data BAT 2 Lower Register */
+#define SPRN_DBAT2U 0x21C /* Data BAT 2 Upper Register */
+#define SPRN_DBAT3L 0x21F /* Data BAT 3 Lower Register */
+#define SPRN_DBAT3U 0x21E /* Data BAT 3 Upper Register */
+#define SPRN_DBAT4L 0x239 /* Data BAT 4 Lower Register */
+#define SPRN_DBAT4U 0x238 /* Data BAT 4 Upper Register */
+#define SPRN_DBAT5L 0x23B /* Data BAT 5 Lower Register */
+#define SPRN_DBAT5U 0x23A /* Data BAT 5 Upper Register */
+#define SPRN_DBAT6L 0x23D /* Data BAT 6 Lower Register */
+#define SPRN_DBAT6U 0x23C /* Data BAT 6 Upper Register */
+#define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */
+#define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */
+#define SPRN_PPR 0x380 /* SMT Thread status Register */
+#define SPRN_TSCR 0x399 /* Thread Switch Control Register */
+
+#define SPRN_DEC 0x016 /* Decrement Register */
+#define SPRN_PIT 0x3DB /* Programmable Interval Timer (40x/BOOKE) */
+
+#define SPRN_DER 0x095 /* Debug Enable Register */
+#define DER_RSTE 0x40000000 /* Reset Interrupt */
+#define DER_CHSTPE 0x20000000 /* Check Stop */
+#define DER_MCIE 0x10000000 /* Machine Check Interrupt */
+#define DER_EXTIE 0x02000000 /* External Interrupt */
+#define DER_ALIE 0x01000000 /* Alignment Interrupt */
+#define DER_PRIE 0x00800000 /* Program Interrupt */
+#define DER_FPUVIE 0x00400000 /* FP Unavailable Interrupt */
+#define DER_DECIE 0x00200000 /* Decrementer Interrupt */
+#define DER_SYSIE 0x00040000 /* System Call Interrupt */
+#define DER_TRE 0x00020000 /* Trace Interrupt */
+#define DER_SEIE 0x00004000 /* FP SW Emulation Interrupt */
+#define DER_ITLBMSE 0x00002000 /* Imp. Spec. Instruction TLB Miss */
+#define DER_ITLBERE 0x00001000 /* Imp. Spec. Instruction TLB Error */
+#define DER_DTLBMSE 0x00000800 /* Imp. Spec. Data TLB Miss */
+#define DER_DTLBERE 0x00000400 /* Imp. Spec. Data TLB Error */
+#define DER_LBRKE 0x00000008 /* Load/Store Breakpoint Interrupt */
+#define DER_IBRKE 0x00000004 /* Instruction Breakpoint Interrupt */
+#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */
+#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */
+#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */
+#define SPRN_DHDES 0x0B1 /* Directed Hyp. Doorbell Exc. State */
+#define SPRN_DPDES 0x0B0 /* Directed Priv. Doorbell Exc. State */
+#define SPRN_EAR 0x11A /* External Address Register */
+#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
+#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Register */
+#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
+#define HID0_HDICE_SH (63 - 23) /* 970 HDEC interrupt enable */
+#define HID0_EMCP (1<<31) /* Enable Machine Check pin */
+#define HID0_EBA (1<<29) /* Enable Bus Address Parity */
+#define HID0_EBD (1<<28) /* Enable Bus Data Parity */
+#define HID0_SBCLK (1<<27)
+#define HID0_EICE (1<<26)
+#define HID0_TBEN (1<<26) /* Timebase enable - 745x */
+#define HID0_ECLK (1<<25)
+#define HID0_PAR (1<<24)
+#define HID0_STEN (1<<24) /* Software table search enable - 745x */
+#define HID0_HIGH_BAT (1<<23) /* Enable high BATs - 7455 */
+#define HID0_DOZE (1<<23)
+#define HID0_NAP (1<<22)
+#define HID0_SLEEP (1<<21)
+#define HID0_DPM (1<<20)
+#define HID0_BHTCLR (1<<18) /* Clear branch history table - 7450 */
+#define HID0_XAEN (1<<17) /* Extended addressing enable - 7450 */
+#define HID0_NHR (1<<16) /* Not hard reset (software bit-7450)*/
+#define HID0_ICE (1<<15) /* Instruction Cache Enable */
+#define HID0_DCE (1<<14) /* Data Cache Enable */
+#define HID0_ILOCK (1<<13) /* Instruction Cache Lock */
+#define HID0_DLOCK (1<<12) /* Data Cache Lock */
+#define HID0_ICFI (1<<11) /* Instr. Cache Flash Invalidate */
+#define HID0_DCI (1<<10) /* Data Cache Invalidate */
+#define HID0_SPD (1<<9) /* Speculative disable */
+#define HID0_DAPUEN (1<<8) /* Debug APU enable */
+#define HID0_SGE (1<<7) /* Store Gathering Enable */
+#define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */
+#define HID0_DCFA (1<<6) /* Data Cache Flush Assist */
+#define HID0_LRSTK (1<<4) /* Link register stack - 745x */
+#define HID0_BTIC (1<<5) /* Branch Target Instr Cache Enable */
+#define HID0_ABE (1<<3) /* Address Broadcast Enable */
+#define HID0_FOLD (1<<3) /* Branch Folding enable - 745x */
+#define HID0_BHTE (1<<2) /* Branch History Table Enable */
+#define HID0_BTCD (1<<1) /* Branch target cache disable */
+#define HID0_NOPDST (1<<1) /* No-op dst, dstt, etc. instr. */
+#define HID0_NOPTI (1<<0) /* No-op dcbt and dcbst instr. */
+/* POWER8 HID0 bits */
+#define HID0_POWER8_4LPARMODE __MASK(61)
+#define HID0_POWER8_2LPARMODE __MASK(57)
+#define HID0_POWER8_1TO2LPAR __MASK(52)
+#define HID0_POWER8_1TO4LPAR __MASK(51)
+#define HID0_POWER8_DYNLPARDIS __MASK(48)
+
+/* POWER9 HID0 bits */
+#define HID0_POWER9_RADIX __MASK(63 - 8)
+
+#define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
+#ifdef CONFIG_PPC_BOOK3S_32
+#define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */
+#define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */
+#define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */
+#define HID1_PC1 (1<<15) /* 7450 PLL_CFG[1] */
+#define HID1_PC2 (1<<14) /* 7450 PLL_CFG[2] */
+#define HID1_PC3 (1<<13) /* 7450 PLL_CFG[3] */
+#define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */
+#define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */
+#define HID1_PS (1<<16) /* 750FX PLL selection */
+#endif
+#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
+#define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */
+#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
+#define SPRN_IABR2 0x3FA /* 83xx */
+#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
+#define SPRN_IAMR 0x03D /* Instr. Authority Mask Reg */
+#define SPRN_HID4 0x3F4 /* 970 HID4 */
+#define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */
+#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
+#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */
+#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */
+#define HID4_RMOR (0xFFFFul << HID4_RMOR_SH)
+#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */
+#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */
+#define HID4_LPID1_SH 0 /* partition ID top 2 bits */
+#define SPRN_HID4_GEKKO 0x3F3 /* Gekko HID4 */
+#define SPRN_HID5 0x3F6 /* 970 HID5 */
+#define SPRN_HID6 0x3F9 /* BE HID 6 */
+#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
+#define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */
+#define SPRN_TSC_CELL 0x399 /* Thread switch control on Cell */
+#define TSC_CELL_DEC_ENABLE_0 0x400000 /* Decrementer Interrupt */
+#define TSC_CELL_DEC_ENABLE_1 0x200000 /* Decrementer Interrupt */
+#define TSC_CELL_EE_ENABLE 0x100000 /* External Interrupt */
+#define TSC_CELL_EE_BOOST 0x080000 /* External Interrupt Boost */
+#define SPRN_TSC 0x3FD /* Thread switch control on others */
+#define SPRN_TST 0x3FC /* Thread switch timeout on others */
+#if !defined(SPRN_IAC1) && !defined(SPRN_IAC2)
+#define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */
+#define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */
+#endif
+#define SPRN_IBAT0L 0x211 /* Instruction BAT 0 Lower Register */
+#define SPRN_IBAT0U 0x210 /* Instruction BAT 0 Upper Register */
+#define SPRN_IBAT1L 0x213 /* Instruction BAT 1 Lower Register */
+#define SPRN_IBAT1U 0x212 /* Instruction BAT 1 Upper Register */
+#define SPRN_IBAT2L 0x215 /* Instruction BAT 2 Lower Register */
+#define SPRN_IBAT2U 0x214 /* Instruction BAT 2 Upper Register */
+#define SPRN_IBAT3L 0x217 /* Instruction BAT 3 Lower Register */
+#define SPRN_IBAT3U 0x216 /* Instruction BAT 3 Upper Register */
+#define SPRN_IBAT4L 0x231 /* Instruction BAT 4 Lower Register */
+#define SPRN_IBAT4U 0x230 /* Instruction BAT 4 Upper Register */
+#define SPRN_IBAT5L 0x233 /* Instruction BAT 5 Lower Register */
+#define SPRN_IBAT5U 0x232 /* Instruction BAT 5 Upper Register */
+#define SPRN_IBAT6L 0x235 /* Instruction BAT 6 Lower Register */
+#define SPRN_IBAT6U 0x234 /* Instruction BAT 6 Upper Register */
+#define SPRN_IBAT7L 0x237 /* Instruction BAT 7 Lower Register */
+#define SPRN_IBAT7U 0x236 /* Instruction BAT 7 Upper Register */
+#define SPRN_ICMP 0x3D5 /* Instruction TLB Compare Register */
+#define SPRN_ICTC 0x3FB /* Instruction Cache Throttling Control Reg */
+#ifndef SPRN_ICTRL
+#define SPRN_ICTRL 0x3F3 /* 1011 7450 icache and interrupt ctrl */
+#endif
+#define ICTRL_EICE 0x08000000 /* enable icache parity errs */
+#define ICTRL_EDC 0x04000000 /* enable dcache parity errs */
+#define ICTRL_EICP 0x00000100 /* enable icache par. check */
+#define SPRN_IMISS 0x3D4 /* Instruction TLB Miss Register */
+#define SPRN_IMMR 0x27E /* Internal Memory Map Register */
+#define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Register */
+#define SPRN_L2CR2 0x3f8
+#define L2CR_L2E 0x80000000 /* L2 enable */
+#define L2CR_L2PE 0x40000000 /* L2 parity enable */
+#define L2CR_L2SIZ_MASK 0x30000000 /* L2 size mask */
+#define L2CR_L2SIZ_256KB 0x10000000 /* L2 size 256KB */
+#define L2CR_L2SIZ_512KB 0x20000000 /* L2 size 512KB */
+#define L2CR_L2SIZ_1MB 0x30000000 /* L2 size 1MB */
+#define L2CR_L2CLK_MASK 0x0e000000 /* L2 clock mask */
+#define L2CR_L2CLK_DISABLED 0x00000000 /* L2 clock disabled */
+#define L2CR_L2CLK_DIV1 0x02000000 /* L2 clock / 1 */
+#define L2CR_L2CLK_DIV1_5 0x04000000 /* L2 clock / 1.5 */
+#define L2CR_L2CLK_DIV2 0x08000000 /* L2 clock / 2 */
+#define L2CR_L2CLK_DIV2_5 0x0a000000 /* L2 clock / 2.5 */
+#define L2CR_L2CLK_DIV3 0x0c000000 /* L2 clock / 3 */
+#define L2CR_L2RAM_MASK 0x01800000 /* L2 RAM type mask */
+#define L2CR_L2RAM_FLOW 0x00000000 /* L2 RAM flow through */
+#define L2CR_L2RAM_PIPE 0x01000000 /* L2 RAM pipelined */
+#define L2CR_L2RAM_PIPE_LW 0x01800000 /* L2 RAM pipelined latewr */
+#define L2CR_L2DO 0x00400000 /* L2 data only */
+#define L2CR_L2I 0x00200000 /* L2 global invalidate */
+#define L2CR_L2CTL 0x00100000 /* L2 RAM control */
+#define L2CR_L2WT 0x00080000 /* L2 write-through */
+#define L2CR_L2TS 0x00040000 /* L2 test support */
+#define L2CR_L2OH_MASK 0x00030000 /* L2 output hold mask */
+#define L2CR_L2OH_0_5 0x00000000 /* L2 output hold 0.5 ns */
+#define L2CR_L2OH_1_0 0x00010000 /* L2 output hold 1.0 ns */
+#define L2CR_L2SL 0x00008000 /* L2 DLL slow */
+#define L2CR_L2DF 0x00004000 /* L2 differential clock */
+#define L2CR_L2BYP 0x00002000 /* L2 DLL bypass */
+#define L2CR_L2IP 0x00000001 /* L2 GI in progress */
+#define L2CR_L2IO_745x 0x00100000 /* L2 instr. only (745x) */
+#define L2CR_L2DO_745x 0x00010000 /* L2 data only (745x) */
+#define L2CR_L2REP_745x 0x00001000 /* L2 repl. algorithm (745x) */
+#define L2CR_L2HWF_745x 0x00000800 /* L2 hardware flush (745x) */
+#define SPRN_L3CR 0x3FA /* Level 3 Cache Control Register */
+#define L3CR_L3E 0x80000000 /* L3 enable */
+#define L3CR_L3PE 0x40000000 /* L3 data parity enable */
+#define L3CR_L3APE 0x20000000 /* L3 addr parity enable */
+#define L3CR_L3SIZ 0x10000000 /* L3 size */
+#define L3CR_L3CLKEN 0x08000000 /* L3 clock enable */
+#define L3CR_L3RES 0x04000000 /* L3 special reserved bit */
+#define L3CR_L3CLKDIV 0x03800000 /* L3 clock divisor */
+#define L3CR_L3IO 0x00400000 /* L3 instruction only */
+#define L3CR_L3SPO 0x00040000 /* L3 sample point override */
+#define L3CR_L3CKSP 0x00030000 /* L3 clock sample point */
+#define L3CR_L3PSP 0x0000e000 /* L3 P-clock sample point */
+#define L3CR_L3REP 0x00001000 /* L3 replacement algorithm */
+#define L3CR_L3HWF 0x00000800 /* L3 hardware flush */
+#define L3CR_L3I 0x00000400 /* L3 global invalidate */
+#define L3CR_L3RT 0x00000300 /* L3 SRAM type */
+#define L3CR_L3NIRCA 0x00000080 /* L3 non-integer ratio clock adj. */
+#define L3CR_L3DO 0x00000040 /* L3 data only mode */
+#define L3CR_PMEN 0x00000004 /* L3 private memory enable */
+#define L3CR_PMSIZ 0x00000001 /* L3 private memory size */
+
+#define SPRN_MSSCR0 0x3f6 /* Memory Subsystem Control Register 0 */
+#define SPRN_MSSSR0 0x3f7 /* Memory Subsystem Status Register 1 */
+#define SPRN_LDSTCR 0x3f8 /* Load/Store control register */
+#define SPRN_LDSTDB 0x3f4 /* */
+#define SPRN_LR 0x008 /* Link Register */
+#ifndef SPRN_PIR
+#define SPRN_PIR 0x3FF /* Processor Identification Register */
+#endif
+#define SPRN_TIR 0x1BE /* Thread Identification Register */
+#define SPRN_PTCR 0x1D0 /* Partition table control Register */
+#define SPRN_PSPB 0x09F /* Problem State Priority Boost reg */
+#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
+#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
+#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
+#define SPRN_PVR 0x11F /* Processor Version Register */
+#define SPRN_RPA 0x3D6 /* Required Physical Address Register */
+#define SPRN_SDA 0x3BF /* Sampled Data Address Register */
+#define SPRN_SDR1 0x019 /* MMU Hash Base Register */
+#define SPRN_ASR 0x118 /* Address Space Register */
+#define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */
+#define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */
+#define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
+#define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */
+#define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */
+#define SPRN_USPRG3 0x103 /* SPRG3 userspace read */
+#define SPRN_SPRG4 0x114 /* Special Purpose Register General 4 */
+#define SPRN_USPRG4 0x104 /* SPRG4 userspace read */
+#define SPRN_SPRG5 0x115 /* Special Purpose Register General 5 */
+#define SPRN_USPRG5 0x105 /* SPRG5 userspace read */
+#define SPRN_SPRG6 0x116 /* Special Purpose Register General 6 */
+#define SPRN_USPRG6 0x106 /* SPRG6 userspace read */
+#define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */
+#define SPRN_USPRG7 0x107 /* SPRG7 userspace read */
+#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
+#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
+
+#ifdef CONFIG_PPC_BOOK3S
+/*
+ * Bits loaded from MSR upon interrupt.
+ * PPC (64-bit) bits 33-36,42-47 are interrupt dependent, the others are
+ * loaded from MSR. The exception is that SRESET and MCE do not always load
+ * bit 62 (RI) from MSR. Don't use PPC_BITMASK for this because 32-bit uses
+ * it.
+ */
+#define SRR1_MSR_BITS (~0x783f0000UL)
+#endif
+
+#define SRR1_ISI_NOPT 0x40000000 /* ISI: Not found in hash */
+#define SRR1_ISI_N_G_OR_CIP 0x10000000 /* ISI: Access is no-exec or G or CI for a prefixed instruction */
+#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
+#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
+#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */
+#define SRR1_WAKEMCE_RESVD 0x003c0000 /* Unused/reserved value used by MCE wakeup to indicate cause to idle wakeup handler */
+#define SRR1_WAKESYSERR 0x00300000 /* System error */
+#define SRR1_WAKEEE 0x00200000 /* External interrupt */
+#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
+#define SRR1_WAKEMT 0x00280000 /* mtctrl */
+#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
+#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
+#define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell on P8 */
+#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
+#define SRR1_WAKERESET 0x00100000 /* System reset */
+#define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */
+#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
+#define SRR1_WS_HVLOSS 0x00030000 /* HV resources not maintained */
+#define SRR1_WS_GPRLOSS 0x00020000 /* GPRs not maintained */
+#define SRR1_WS_NOLOSS 0x00010000 /* All resources maintained */
+#define SRR1_PROGTM 0x00200000 /* TM Bad Thing */
+#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */
+#define SRR1_PROGILL 0x00080000 /* Illegal instruction */
+#define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */
+#define SRR1_PROGTRAP 0x00020000 /* Trap */
+#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */
+
+#define SRR1_MCE_MCP 0x00080000 /* Machine check signal caused interrupt */
+#define SRR1_BOUNDARY 0x10000000 /* Prefixed instruction crosses 64-byte boundary */
+#define SRR1_PREFIXED 0x20000000 /* Exception caused by prefixed instruction */
+
+#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
+#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
+#define HSRR1_DENORM 0x00100000 /* Denorm exception */
+#define HSRR1_HISI_WRITE 0x00010000 /* HISI bcs couldn't update mem */
+
+#define SPRN_TBCTL 0x35f /* PA6T Timebase control register */
+#define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */
+#define TBCTL_RESTART 0x0000000100000000ull /* Restart all tbs */
+#define TBCTL_UPDATE_UPPER 0x0000000200000000ull /* Set upper 32 bits */
+#define TBCTL_UPDATE_LOWER 0x0000000300000000ull /* Set lower 32 bits */
+
+#ifndef SPRN_SVR
+#define SPRN_SVR 0x11E /* System Version Register */
+#endif
+#define SPRN_THRM1 0x3FC /* Thermal Management Register 1 */
+/* these bits were defined in inverted endian sense originally, ugh, confusing */
+#define THRM1_TIN (1 << 31)
+#define THRM1_TIV (1 << 30)
+#define THRM1_THRES(x) ((x&0x7f)<<23)
+#define THRM3_SITV(x) ((x & 0x1fff) << 1)
+#define THRM1_TID (1<<2)
+#define THRM1_TIE (1<<1)
+#define THRM1_V (1<<0)
+#define SPRN_THRM2 0x3FD /* Thermal Management Register 2 */
+#define SPRN_THRM3 0x3FE /* Thermal Management Register 3 */
+#define THRM3_E (1<<0)
+#define SPRN_TLBMISS 0x3D4 /* 980 7450 TLB Miss Register */
+#define SPRN_UMMCR0 0x3A8 /* User Monitor Mode Control Register 0 */
+#define SPRN_UMMCR1 0x3AC /* User Monitor Mode Control Register 0 */
+#define SPRN_UPMC1 0x3A9 /* User Performance Counter Register 1 */
+#define SPRN_UPMC2 0x3AA /* User Performance Counter Register 2 */
+#define SPRN_UPMC3 0x3AD /* User Performance Counter Register 3 */
+#define SPRN_UPMC4 0x3AE /* User Performance Counter Register 4 */
+#define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */
+#define SPRN_VRSAVE 0x100 /* Vector Register Save Register */
+#define SPRN_XER 0x001 /* Fixed Point Exception Register */
+
+#define SPRN_MMCR0_GEKKO 0x3B8 /* Gekko Monitor Mode Control Register 0 */
+#define SPRN_MMCR1_GEKKO 0x3BC /* Gekko Monitor Mode Control Register 1 */
+#define SPRN_PMC1_GEKKO 0x3B9 /* Gekko Performance Monitor Control 1 */
+#define SPRN_PMC2_GEKKO 0x3BA /* Gekko Performance Monitor Control 2 */
+#define SPRN_PMC3_GEKKO 0x3BD /* Gekko Performance Monitor Control 3 */
+#define SPRN_PMC4_GEKKO 0x3BE /* Gekko Performance Monitor Control 4 */
+#define SPRN_WPAR_GEKKO 0x399 /* Gekko Write Pipe Address Register */
+
+#define SPRN_SCOMC 0x114 /* SCOM Access Control */
+#define SPRN_SCOMD 0x115 /* SCOM Access DATA */
+
+/* Performance monitor SPRs */
+#ifdef CONFIG_PPC64
+#define SPRN_MMCR0 795
+#define MMCR0_FC 0x80000000UL /* freeze counters */
+#define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
+#define MMCR0_KERNEL_DISABLE MMCR0_FCS
+#define MMCR0_FCP 0x20000000UL /* freeze in problem state */
+#define MMCR0_PROBLEM_DISABLE MMCR0_FCP
+#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
+#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
+#define MMCR0_PMXE ASM_CONST(0x04000000) /* perf mon exception enable */
+#define MMCR0_FCECE ASM_CONST(0x02000000) /* freeze ctrs on enabled cond or event */
+#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
+#define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */
+#define MMCR0_EBE 0x00100000UL /* Event based branch enable */
+#define MMCR0_PMCC 0x000c0000UL /* PMC control */
+#define MMCR0_PMCCEXT ASM_CONST(0x00000200) /* PMCCEXT control */
+#define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */
+#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
+#define MMCR0_PMCjCE ASM_CONST(0x00004000) /* PMCj count enable*/
+#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
+#define MMCR0_PMAO_SYNC ASM_CONST(0x00000800) /* PMU intr is synchronous */
+#define MMCR0_C56RUN ASM_CONST(0x00000100) /* PMC5/6 count when RUN=0 */
+/* performance monitor alert has occurred, set to 0 after handling exception */
+#define MMCR0_PMAO ASM_CONST(0x00000080)
+#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
+#define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */
+#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
+#define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
+#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
+#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
+#define SPRN_MMCR1 798
+#define SPRN_MMCR2 785
+#define SPRN_MMCR3 754
+#define SPRN_UMMCR2 769
+#define SPRN_UMMCR3 738
+#define SPRN_MMCRA 0x312
+#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
+#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
+#define MMCRA_SDAR_ERAT_MISS 0x20000000UL
+#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
+#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
+#define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */
+#define MMCRA_SLOT_SHIFT 24
+#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
+#define MMCRA_BHRB_DISABLE _UL(0x2000000000) // BHRB disable bit for ISA v3.1
+#define POWER6_MMCRA_SDSYNC 0x0000080000000000ULL /* SDAR/SIAR synced */
+#define POWER6_MMCRA_SIHV 0x0000040000000000ULL
+#define POWER6_MMCRA_SIPR 0x0000020000000000ULL
+#define POWER6_MMCRA_THRM 0x00000020UL
+#define POWER6_MMCRA_OTHER 0x0000000EUL
+
+#define POWER7P_MMCRA_SIAR_VALID 0x10000000 /* P7+ SIAR contents valid */
+#define POWER7P_MMCRA_SDAR_VALID 0x08000000 /* P7+ SDAR contents valid */
+
+#define SPRN_MMCRH 316 /* Hypervisor monitor mode control register */
+#define SPRN_MMCRS 894 /* Supervisor monitor mode control register */
+#define SPRN_MMCRC 851 /* Core monitor mode control register */
+#define SPRN_EBBHR 804 /* Event based branch handler register */
+#define SPRN_EBBRR 805 /* Event based branch return register */
+#define SPRN_BESCR 806 /* Branch event status and control register */
+#define BESCR_GE 0x8000000000000000ULL /* Global Enable */
+#define SPRN_WORT 895 /* Workload optimization register - thread */
+#define SPRN_WORC 863 /* Workload optimization register - core */
+
+#define SPRN_PMC1 787
+#define SPRN_PMC2 788
+#define SPRN_PMC3 789
+#define SPRN_PMC4 790
+#define SPRN_PMC5 791
+#define SPRN_PMC6 792
+#define SPRN_PMC7 793
+#define SPRN_PMC8 794
+#define SPRN_SIER 784
+#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
+#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
+#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
+#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
+#define SPRN_SIER2 752
+#define SPRN_SIER3 753
+#define SPRN_USIER2 736
+#define SPRN_USIER3 737
+#define SPRN_SIAR 796
+#define SPRN_SDAR 797
+#define SPRN_TACR 888
+#define SPRN_TCSCR 889
+#define SPRN_CSIGR 890
+#define SPRN_SPMC1 892
+#define SPRN_SPMC2 893
+
+/* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */
+#define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
+#define MMCR2_USER_MASK 0x4020100804020000UL /* (FC1P|FC2P|FC3P|FC4P|FC5P|FC6P) */
+#define SIER_USER_MASK 0x7fffffUL
+
+#define SPRN_PA6T_MMCR0 795
+#define PA6T_MMCR0_EN0 0x0000000000000001UL
+#define PA6T_MMCR0_EN1 0x0000000000000002UL
+#define PA6T_MMCR0_EN2 0x0000000000000004UL
+#define PA6T_MMCR0_EN3 0x0000000000000008UL
+#define PA6T_MMCR0_EN4 0x0000000000000010UL
+#define PA6T_MMCR0_EN5 0x0000000000000020UL
+#define PA6T_MMCR0_SUPEN 0x0000000000000040UL
+#define PA6T_MMCR0_PREN 0x0000000000000080UL
+#define PA6T_MMCR0_HYPEN 0x0000000000000100UL
+#define PA6T_MMCR0_FCM0 0x0000000000000200UL
+#define PA6T_MMCR0_FCM1 0x0000000000000400UL
+#define PA6T_MMCR0_INTGEN 0x0000000000000800UL
+#define PA6T_MMCR0_INTEN0 0x0000000000001000UL
+#define PA6T_MMCR0_INTEN1 0x0000000000002000UL
+#define PA6T_MMCR0_INTEN2 0x0000000000004000UL
+#define PA6T_MMCR0_INTEN3 0x0000000000008000UL
+#define PA6T_MMCR0_INTEN4 0x0000000000010000UL
+#define PA6T_MMCR0_INTEN5 0x0000000000020000UL
+#define PA6T_MMCR0_DISCNT 0x0000000000040000UL
+#define PA6T_MMCR0_UOP 0x0000000000080000UL
+#define PA6T_MMCR0_TRG 0x0000000000100000UL
+#define PA6T_MMCR0_TRGEN 0x0000000000200000UL
+#define PA6T_MMCR0_TRGREG 0x0000000001600000UL
+#define PA6T_MMCR0_SIARLOG 0x0000000002000000UL
+#define PA6T_MMCR0_SDARLOG 0x0000000004000000UL
+#define PA6T_MMCR0_PROEN 0x0000000008000000UL
+#define PA6T_MMCR0_PROLOG 0x0000000010000000UL
+#define PA6T_MMCR0_DAMEN2 0x0000000020000000UL
+#define PA6T_MMCR0_DAMEN3 0x0000000040000000UL
+#define PA6T_MMCR0_DAMEN4 0x0000000080000000UL
+#define PA6T_MMCR0_DAMEN5 0x0000000100000000UL
+#define PA6T_MMCR0_DAMSEL2 0x0000000200000000UL
+#define PA6T_MMCR0_DAMSEL3 0x0000000400000000UL
+#define PA6T_MMCR0_DAMSEL4 0x0000000800000000UL
+#define PA6T_MMCR0_DAMSEL5 0x0000001000000000UL
+#define PA6T_MMCR0_HANDDIS 0x0000002000000000UL
+#define PA6T_MMCR0_PCTEN 0x0000004000000000UL
+#define PA6T_MMCR0_SOCEN 0x0000008000000000UL
+#define PA6T_MMCR0_SOCMOD 0x0000010000000000UL
+
+#define SPRN_PA6T_MMCR1 798
+#define PA6T_MMCR1_ES2 0x00000000000000ffUL
+#define PA6T_MMCR1_ES3 0x000000000000ff00UL
+#define PA6T_MMCR1_ES4 0x0000000000ff0000UL
+#define PA6T_MMCR1_ES5 0x00000000ff000000UL
+
+#define SPRN_PA6T_UPMC0 771 /* User PerfMon Counter 0 */
+#define SPRN_PA6T_UPMC1 772 /* ... */
+#define SPRN_PA6T_UPMC2 773
+#define SPRN_PA6T_UPMC3 774
+#define SPRN_PA6T_UPMC4 775
+#define SPRN_PA6T_UPMC5 776
+#define SPRN_PA6T_UMMCR0 779 /* User Monitor Mode Control Register 0 */
+#define SPRN_PA6T_SIAR 780 /* Sampled Instruction Address */
+#define SPRN_PA6T_UMMCR1 782 /* User Monitor Mode Control Register 1 */
+#define SPRN_PA6T_SIER 785 /* Sampled Instruction Event Register */
+#define SPRN_PA6T_PMC0 787
+#define SPRN_PA6T_PMC1 788
+#define SPRN_PA6T_PMC2 789
+#define SPRN_PA6T_PMC3 790
+#define SPRN_PA6T_PMC4 791
+#define SPRN_PA6T_PMC5 792
+#define SPRN_PA6T_TSR0 793 /* Timestamp Register 0 */
+#define SPRN_PA6T_TSR1 794 /* Timestamp Register 1 */
+#define SPRN_PA6T_TSR2 799 /* Timestamp Register 2 */
+#define SPRN_PA6T_TSR3 784 /* Timestamp Register 3 */
+
+#define SPRN_PA6T_IER 981 /* Icache Error Register */
+#define SPRN_PA6T_DER 982 /* Dcache Error Register */
+#define SPRN_PA6T_BER 862 /* BIU Error Address Register */
+#define SPRN_PA6T_MER 849 /* MMU Error Register */
+
+#define SPRN_PA6T_IMA0 880 /* Instruction Match Array 0 */
+#define SPRN_PA6T_IMA1 881 /* ... */
+#define SPRN_PA6T_IMA2 882
+#define SPRN_PA6T_IMA3 883
+#define SPRN_PA6T_IMA4 884
+#define SPRN_PA6T_IMA5 885
+#define SPRN_PA6T_IMA6 886
+#define SPRN_PA6T_IMA7 887
+#define SPRN_PA6T_IMA8 888
+#define SPRN_PA6T_IMA9 889
+#define SPRN_PA6T_BTCR 978 /* Breakpoint and Tagging Control Register */
+#define SPRN_PA6T_IMAAT 979 /* Instruction Match Array Action Table */
+#define SPRN_PA6T_PCCR 1019 /* Power Counter Control Register */
+#define SPRN_BKMK 1020 /* Cell Bookmark Register */
+#define SPRN_PA6T_RPCCR 1021 /* Retire PC Trace Control Register */
+
+
+#else /* 32-bit */
+#define SPRN_MMCR0 952 /* Monitor Mode Control Register 0 */
+#define MMCR0_FC 0x80000000UL /* freeze counters */
+#define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
+#define MMCR0_FCP 0x20000000UL /* freeze in problem state */
+#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
+#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
+#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
+#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */
+#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
+#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
+#define MMCR0_PMCnCE 0x00004000UL /* count enable for all but PMC 1*/
+#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
+#define MMCR0_PMC1SEL 0x00001fc0UL /* PMC 1 Event */
+#define MMCR0_PMC2SEL 0x0000003fUL /* PMC 2 Event */
+
+#define SPRN_MMCR1 956
+#define MMCR1_PMC3SEL 0xf8000000UL /* PMC 3 Event */
+#define MMCR1_PMC4SEL 0x07c00000UL /* PMC 4 Event */
+#define MMCR1_PMC5SEL 0x003e0000UL /* PMC 5 Event */
+#define MMCR1_PMC6SEL 0x0001f800UL /* PMC 6 Event */
+#define SPRN_MMCR2 944
+#define SPRN_PMC1 953 /* Performance Counter Register 1 */
+#define SPRN_PMC2 954 /* Performance Counter Register 2 */
+#define SPRN_PMC3 957 /* Performance Counter Register 3 */
+#define SPRN_PMC4 958 /* Performance Counter Register 4 */
+#define SPRN_PMC5 945 /* Performance Counter Register 5 */
+#define SPRN_PMC6 946 /* Performance Counter Register 6 */
+
+#define SPRN_SIAR 955 /* Sampled Instruction Address Register */
+
+/* Bit definitions for MMCR0 and PMC1 / PMC2. */
+#define MMCR0_PMC1_CYCLES (1 << 7)
+#define MMCR0_PMC1_ICACHEMISS (5 << 7)
+#define MMCR0_PMC1_DTLB (6 << 7)
+#define MMCR0_PMC2_DCACHEMISS 0x6
+#define MMCR0_PMC2_CYCLES 0x1
+#define MMCR0_PMC2_ITLB 0x7
+#define MMCR0_PMC2_LOADMISSTIME 0x5
+#endif
+
+/*
+ * SPRG usage:
+ *
+ * All 64-bit:
+ * - SPRG1 stores PACA pointer except 64-bit server in
+ * HV mode in which case it is HSPRG0
+ *
+ * 64-bit server:
+ * - SPRG0 scratch for TM recheckpoint/reclaim (reserved for HV on Power4)
+ * - SPRG2 scratch for exception vectors
+ * - SPRG3 CPU and NUMA node for VDSO getcpu (user visible)
+ * - HSPRG0 stores PACA in HV mode
+ * - HSPRG1 scratch for "HV" exceptions
+ *
+ * 64-bit embedded
+ * - SPRG0 generic exception scratch
+ * - SPRG2 TLB exception stack
+ * - SPRG3 critical exception scratch (user visible, sorry!)
+ * - SPRG4 unused (user visible)
+ * - SPRG6 TLB miss scratch (user visible, sorry !)
+ * - SPRG7 CPU and NUMA node for VDSO getcpu (user visible)
+ * - SPRG8 machine check exception scratch
+ * - SPRG9 debug exception scratch
+ *
+ * All 32-bit:
+ * - SPRG3 current thread_struct physical addr pointer
+ * (virtual on BookE, physical on others)
+ *
+ * 32-bit classic:
+ * - SPRG0 scratch for exception vectors
+ * - SPRG1 scratch for exception vectors
+ * - SPRG2 indicator that we are in RTAS
+ * - SPRG4 (603 only) pseudo TLB LRU data
+ *
+ * 32-bit 40x:
+ * - SPRG0 scratch for exception vectors
+ * - SPRG1 scratch for exception vectors
+ * - SPRG2 scratch for exception vectors
+ * - SPRG4 scratch for exception vectors (not 403)
+ * - SPRG5 scratch for exception vectors (not 403)
+ * - SPRG6 scratch for exception vectors (not 403)
+ * - SPRG7 scratch for exception vectors (not 403)
+ *
+ * 32-bit 440 and FSL BookE:
+ * - SPRG0 scratch for exception vectors
+ * - SPRG1 scratch for exception vectors (*)
+ * - SPRG2 scratch for crit interrupts handler
+ * - SPRG4 scratch for exception vectors
+ * - SPRG5 scratch for exception vectors
+ * - SPRG6 scratch for machine check handler
+ * - SPRG7 scratch for exception vectors
+ * - SPRG9 scratch for debug vectors (e500 only)
+ *
+ * Additionally, BookE separates "read" and "write"
+ * of those registers. That allows to use the userspace
+ * readable variant for reads, which can avoid a fault
+ * with KVM type virtualization.
+ *
+ * 32-bit 8xx:
+ * - SPRG0 scratch for exception vectors
+ * - SPRG1 scratch for exception vectors
+ * - SPRG2 scratch for exception vectors
+ *
+ */
+#ifdef CONFIG_PPC64
+#define SPRN_SPRG_PACA SPRN_SPRG1
+#else
+#define SPRN_SPRG_THREAD SPRN_SPRG3
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#define SPRN_SPRG_SCRATCH0 SPRN_SPRG2
+#define SPRN_SPRG_HPACA SPRN_HSPRG0
+#define SPRN_SPRG_HSCRATCH0 SPRN_HSPRG1
+#define SPRN_SPRG_VDSO_READ SPRN_USPRG3
+#define SPRN_SPRG_VDSO_WRITE SPRN_SPRG3
+
+#define GET_PACA(rX) \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ mfspr rX,SPRN_SPRG_PACA; \
+ FTR_SECTION_ELSE_NESTED(66); \
+ mfspr rX,SPRN_SPRG_HPACA; \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
+
+#define SET_PACA(rX) \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ mtspr SPRN_SPRG_PACA,rX; \
+ FTR_SECTION_ELSE_NESTED(66); \
+ mtspr SPRN_SPRG_HPACA,rX; \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
+
+#define GET_SCRATCH0(rX) \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ mfspr rX,SPRN_SPRG_SCRATCH0; \
+ FTR_SECTION_ELSE_NESTED(66); \
+ mfspr rX,SPRN_SPRG_HSCRATCH0; \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
+
+#define SET_SCRATCH0(rX) \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ mtspr SPRN_SPRG_SCRATCH0,rX; \
+ FTR_SECTION_ELSE_NESTED(66); \
+ mtspr SPRN_SPRG_HSCRATCH0,rX; \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
+
+#else /* CONFIG_PPC_BOOK3S_64 */
+#define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0
+#define SET_SCRATCH0(rX) mtspr SPRN_SPRG_SCRATCH0,rX
+
+#endif
+
+#ifdef CONFIG_PPC_BOOK3E_64
+#define SPRN_SPRG_MC_SCRATCH SPRN_SPRG8
+#define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG3
+#define SPRN_SPRG_DBG_SCRATCH SPRN_SPRG9
+#define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2
+#define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6
+#define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0
+#define SPRN_SPRG_GDBELL_SCRATCH SPRN_SPRG_GEN_SCRATCH
+#define SPRN_SPRG_VDSO_READ SPRN_USPRG7
+#define SPRN_SPRG_VDSO_WRITE SPRN_SPRG7
+
+#define SET_PACA(rX) mtspr SPRN_SPRG_PACA,rX
+#define GET_PACA(rX) mfspr rX,SPRN_SPRG_PACA
+
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_32
+#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0
+#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
+#define SPRN_SPRG_SCRATCH2 SPRN_SPRG2
+#define SPRN_SPRG_603_LRU SPRN_SPRG4
+#endif
+
+#ifdef CONFIG_40x
+#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0
+#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
+#define SPRN_SPRG_SCRATCH2 SPRN_SPRG2
+#define SPRN_SPRG_SCRATCH3 SPRN_SPRG4
+#define SPRN_SPRG_SCRATCH4 SPRN_SPRG5
+#define SPRN_SPRG_SCRATCH5 SPRN_SPRG6
+#define SPRN_SPRG_SCRATCH6 SPRN_SPRG7
+#endif
+
+#ifdef CONFIG_BOOKE
+#define SPRN_SPRG_RSCRATCH0 SPRN_SPRG0
+#define SPRN_SPRG_WSCRATCH0 SPRN_SPRG0
+#define SPRN_SPRG_RSCRATCH1 SPRN_SPRG1
+#define SPRN_SPRG_WSCRATCH1 SPRN_SPRG1
+#define SPRN_SPRG_RSCRATCH_CRIT SPRN_SPRG2
+#define SPRN_SPRG_WSCRATCH_CRIT SPRN_SPRG2
+#define SPRN_SPRG_RSCRATCH2 SPRN_SPRG4R
+#define SPRN_SPRG_WSCRATCH2 SPRN_SPRG4W
+#define SPRN_SPRG_RSCRATCH3 SPRN_SPRG5R
+#define SPRN_SPRG_WSCRATCH3 SPRN_SPRG5W
+#define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG1
+#define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG1
+#define SPRN_SPRG_RSCRATCH4 SPRN_SPRG7R
+#define SPRN_SPRG_WSCRATCH4 SPRN_SPRG7W
+#define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG9
+#define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG9
+#endif
+
+#ifdef CONFIG_PPC_8xx
+#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0
+#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
+#define SPRN_SPRG_SCRATCH2 SPRN_SPRG2
+#endif
+
+
+
+/*
+ * An mtfsf instruction with the L bit set. On CPUs that support this a
+ * full 64bits of FPSCR is restored and on other CPUs the L bit is ignored.
+ *
+ * Until binutils gets the new form of mtfsf, hardwire the instruction.
+ */
+#ifdef CONFIG_PPC64
+#define MTFSF_L(REG) \
+ .long (0xfc00058e | ((0xff) << 17) | ((REG) << 11) | (1 << 25))
+#else
+#define MTFSF_L(REG) mtfsf 0xff, (REG)
+#endif
+
+/* Processor Version Register (PVR) field extraction */
+
+#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
+#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
+
+#define pvr_version_is(pvr) (PVR_VER(mfspr(SPRN_PVR)) == (pvr))
+
+/*
+ * IBM has further subdivided the standard PowerPC 16-bit version and
+ * revision subfields of the PVR for the PowerPC 403s into the following:
+ */
+
+#define PVR_FAM(pvr) (((pvr) >> 20) & 0xFFF) /* Family field */
+#define PVR_MEM(pvr) (((pvr) >> 16) & 0xF) /* Member field */
+#define PVR_CORE(pvr) (((pvr) >> 12) & 0xF) /* Core field */
+#define PVR_CFG(pvr) (((pvr) >> 8) & 0xF) /* Configuration field */
+#define PVR_MAJ(pvr) (((pvr) >> 4) & 0xF) /* Major revision field */
+#define PVR_MIN(pvr) (((pvr) >> 0) & 0xF) /* Minor revision field */
+
+/* Processor Version Numbers */
+
+#define PVR_403GA 0x00200000
+#define PVR_403GB 0x00200100
+#define PVR_403GC 0x00200200
+#define PVR_403GCX 0x00201400
+#define PVR_405GP 0x40110000
+#define PVR_476 0x11a52000
+#define PVR_476FPE 0x7ff50000
+#define PVR_STB03XXX 0x40310000
+#define PVR_NP405H 0x41410000
+#define PVR_NP405L 0x41610000
+#define PVR_601 0x00010000
+#define PVR_602 0x00050000
+#define PVR_603 0x00030000
+#define PVR_603e 0x00060000
+#define PVR_603ev 0x00070000
+#define PVR_603r 0x00071000
+#define PVR_604 0x00040000
+#define PVR_604e 0x00090000
+#define PVR_604r 0x000A0000
+#define PVR_620 0x00140000
+#define PVR_740 0x00080000
+#define PVR_750 PVR_740
+#define PVR_740P 0x10080000
+#define PVR_750P PVR_740P
+#define PVR_7400 0x000C0000
+#define PVR_7410 0x800C0000
+#define PVR_7450 0x80000000
+#define PVR_8540 0x80200000
+#define PVR_8560 0x80200000
+#define PVR_VER_E500V1 0x8020
+#define PVR_VER_E500V2 0x8021
+#define PVR_VER_E500MC 0x8023
+#define PVR_VER_E5500 0x8024
+#define PVR_VER_E6500 0x8040
+#define PVR_VER_7450 0x8000
+#define PVR_VER_7455 0x8001
+#define PVR_VER_7447 0x8002
+#define PVR_VER_7447A 0x8003
+#define PVR_VER_7448 0x8004
+
+/*
+ * For the 8xx processors, all of them report the same PVR family for
+ * the PowerPC core. The various versions of these processors must be
+ * differentiated by the version number in the Communication Processor
+ * Module (CPM).
+ */
+#define PVR_8xx 0x00500000
+
+#define PVR_8240 0x00810100
+#define PVR_8245 0x80811014
+#define PVR_8260 PVR_8240
+
+/* 476 Simulator seems to currently have the PVR of the 602... */
+#define PVR_476_ISS 0x00052000
+
+/* 64-bit processors */
+#define PVR_NORTHSTAR 0x0033
+#define PVR_PULSAR 0x0034
+#define PVR_POWER4 0x0035
+#define PVR_ICESTAR 0x0036
+#define PVR_SSTAR 0x0037
+#define PVR_POWER4p 0x0038
+#define PVR_970 0x0039
+#define PVR_POWER5 0x003A
+#define PVR_POWER5p 0x003B
+#define PVR_970FX 0x003C
+#define PVR_POWER6 0x003E
+#define PVR_POWER7 0x003F
+#define PVR_630 0x0040
+#define PVR_630p 0x0041
+#define PVR_970MP 0x0044
+#define PVR_970GX 0x0045
+#define PVR_POWER7p 0x004A
+#define PVR_POWER8E 0x004B
+#define PVR_POWER8NVL 0x004C
+#define PVR_POWER8 0x004D
+#define PVR_POWER9 0x004E
+#define PVR_POWER10 0x0080
+#define PVR_BE 0x0070
+#define PVR_PA6T 0x0090
+
+/* "Logical" PVR values defined in PAPR, representing architecture levels */
+#define PVR_ARCH_204 0x0f000001
+#define PVR_ARCH_205 0x0f000002
+#define PVR_ARCH_206 0x0f000003
+#define PVR_ARCH_206p 0x0f100003
+#define PVR_ARCH_207 0x0f000004
+#define PVR_ARCH_300 0x0f000005
+#define PVR_ARCH_31 0x0f000006
+
+/* Macros for setting and retrieving special purpose registers */
+#ifndef __ASSEMBLY__
+
+#if defined(CONFIG_PPC64) || defined(__CHECKER__)
+typedef struct {
+ u32 val;
+#ifdef CONFIG_PPC64
+ u32 suffix;
+#endif
+} __packed ppc_inst_t;
+#else
+typedef u32 ppc_inst_t;
+#endif
+
+#define mfmsr() ({unsigned long rval; \
+ asm volatile("mfmsr %0" : "=r" (rval) : \
+ : "memory"); rval;})
+#ifdef CONFIG_PPC_BOOK3S_64
+#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
+ : : "r" (v) : "memory")
+#define mtmsr(v) __mtmsrd((v), 0)
+#define __MTMSR "mtmsrd"
+#else
+#define mtmsr(v) asm volatile("mtmsr %0" : \
+ : "r" ((unsigned long)(v)) \
+ : "memory")
+#define __mtmsrd(v, l) BUILD_BUG()
+#define __MTMSR "mtmsr"
+#endif
+
+static inline void mtmsr_isync(unsigned long val)
+{
+ asm volatile(__MTMSR " %0; " ASM_FTR_IFCLR("isync", "nop", %1) : :
+ "r" (val), "i" (CPU_FTR_ARCH_206) : "memory");
+}
+
+#define mfspr(rn) ({unsigned long rval; \
+ asm volatile("mfspr %0," __stringify(rn) \
+ : "=r" (rval)); rval;})
+#ifndef mtspr
+#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \
+ : "r" ((unsigned long)(v)) \
+ : "memory")
+#endif
+#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",2" : : : "memory")
+
+static inline void wrtee(unsigned long val)
+{
+ if (__builtin_constant_p(val))
+ asm volatile("wrteei %0" : : "i" ((val & MSR_EE) ? 1 : 0) : "memory");
+ else
+ asm volatile("wrtee %0" : : "r" (val) : "memory");
+}
+
+extern unsigned long msr_check_and_set(unsigned long bits);
+extern bool strict_msr_control;
+extern void __msr_check_and_clear(unsigned long bits);
+static inline void msr_check_and_clear(unsigned long bits)
+{
+ if (strict_msr_control)
+ __msr_check_and_clear(bits);
+}
+
+#ifdef CONFIG_PPC32
+static inline u32 mfsr(u32 idx)
+{
+ u32 val;
+
+ if (__builtin_constant_p(idx))
+ asm volatile("mfsr %0, %1" : "=r" (val): "i" (idx >> 28));
+ else
+ asm volatile("mfsrin %0, %1" : "=r" (val): "r" (idx));
+
+ return val;
+}
+
+static inline void mtsr(u32 val, u32 idx)
+{
+ if (__builtin_constant_p(idx))
+ asm volatile("mtsr %1, %0" : : "r" (val), "i" (idx >> 28));
+ else
+ asm volatile("mtsrin %0, %1" : : "r" (val), "r" (idx));
+}
+#endif
+
+extern unsigned long current_stack_frame(void);
+
+register unsigned long current_stack_pointer asm("r1");
+
+extern unsigned long scom970_read(unsigned int address);
+extern void scom970_write(unsigned int address, unsigned long value);
+
+struct pt_regs;
+
+extern void ppc_save_regs(struct pt_regs *regs);
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
new file mode 100644
index 000000000..299ee7be0
--- /dev/null
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Contains register definitions common to PowerPC 8xx CPUs. Notice
+ */
+#ifndef _ASM_POWERPC_REG_8xx_H
+#define _ASM_POWERPC_REG_8xx_H
+
+/* Cache control on the MPC8xx is provided through some additional
+ * special purpose registers.
+ */
+#define SPRN_IC_CST 560 /* Instruction cache control/status */
+#define SPRN_IC_ADR 561 /* Address needed for some commands */
+#define SPRN_IC_DAT 562 /* Read-only data register */
+#define SPRN_DC_CST 568 /* Data cache control/status */
+#define SPRN_DC_ADR 569 /* Address needed for some commands */
+#define SPRN_DC_DAT 570 /* Read-only data register */
+
+/* Misc Debug */
+#define SPRN_DPDR 630
+#define SPRN_MI_CAM 816
+#define SPRN_MI_RAM0 817
+#define SPRN_MI_RAM1 818
+#define SPRN_MD_CAM 824
+#define SPRN_MD_RAM0 825
+#define SPRN_MD_RAM1 826
+
+/* Special MSR manipulation registers */
+#define SPRN_EIE 80 /* External interrupt enable (EE=1, RI=1) */
+#define SPRN_EID 81 /* External interrupt disable (EE=0, RI=1) */
+#define SPRN_NRI 82 /* Non recoverable interrupt (EE=0, RI=0) */
+
+/* Debug registers */
+#define SPRN_CMPA 144
+#define SPRN_COUNTA 150
+#define SPRN_CMPE 152
+#define SPRN_CMPF 153
+#define SPRN_LCTRL1 156
+#define LCTRL1_CTE_GT 0xc0000000
+#define LCTRL1_CTF_LT 0x14000000
+#define LCTRL1_CRWE_RW 0x00000000
+#define LCTRL1_CRWE_RO 0x00040000
+#define LCTRL1_CRWE_WO 0x000c0000
+#define LCTRL1_CRWF_RW 0x00000000
+#define LCTRL1_CRWF_RO 0x00010000
+#define LCTRL1_CRWF_WO 0x00030000
+#define SPRN_LCTRL2 157
+#define LCTRL2_LW0EN 0x80000000
+#define LCTRL2_LW0LA_E 0x00000000
+#define LCTRL2_LW0LA_F 0x04000000
+#define LCTRL2_LW0LA_EandF 0x08000000
+#define LCTRL2_LW0LADC 0x02000000
+#define LCTRL2_SLW0EN 0x00000002
+#ifdef CONFIG_PPC_8xx
+#define SPRN_ICTRL 158
+#endif
+#define SPRN_BAR 159
+
+/* Commands. Only the first few are available to the instruction cache.
+*/
+#define IDC_ENABLE 0x02000000 /* Cache enable */
+#define IDC_DISABLE 0x04000000 /* Cache disable */
+#define IDC_LDLCK 0x06000000 /* Load and lock */
+#define IDC_UNLINE 0x08000000 /* Unlock line */
+#define IDC_UNALL 0x0a000000 /* Unlock all */
+#define IDC_INVALL 0x0c000000 /* Invalidate all */
+
+#define DC_FLINE 0x0e000000 /* Flush data cache line */
+#define DC_SFWT 0x01000000 /* Set forced writethrough mode */
+#define DC_CFWT 0x03000000 /* Clear forced writethrough mode */
+#define DC_SLES 0x05000000 /* Set little endian swap mode */
+#define DC_CLES 0x07000000 /* Clear little endian swap mode */
+
+/* Status.
+*/
+#define IDC_ENABLED 0x80000000 /* Cache is enabled */
+#define IDC_CERR1 0x00200000 /* Cache error 1 */
+#define IDC_CERR2 0x00100000 /* Cache error 2 */
+#define IDC_CERR3 0x00080000 /* Cache error 3 */
+
+#define DC_DFWT 0x40000000 /* Data cache is forced write through */
+#define DC_LES 0x20000000 /* Caches are little endian mode */
+
+#endif /* _ASM_POWERPC_REG_8xx_H */
diff --git a/arch/powerpc/include/asm/reg_a2.h b/arch/powerpc/include/asm/reg_a2.h
new file mode 100644
index 000000000..74fba29e9
--- /dev/null
+++ b/arch/powerpc/include/asm/reg_a2.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Register definitions specific to the A2 core
+ *
+ * Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
+ */
+
+#ifndef __ASM_POWERPC_REG_A2_H__
+#define __ASM_POWERPC_REG_A2_H__
+
+#include <asm/asm-const.h>
+
+#define SPRN_TENSR 0x1b5
+#define SPRN_TENS 0x1b6 /* Thread ENable Set */
+#define SPRN_TENC 0x1b7 /* Thread ENable Clear */
+
+#define SPRN_A2_CCR0 0x3f0 /* Core Configuration Register 0 */
+#define SPRN_A2_CCR1 0x3f1 /* Core Configuration Register 1 */
+#define SPRN_A2_CCR2 0x3f2 /* Core Configuration Register 2 */
+#define SPRN_MMUCR0 0x3fc /* MMU Control Register 0 */
+#define SPRN_MMUCR1 0x3fd /* MMU Control Register 1 */
+#define SPRN_MMUCR2 0x3fe /* MMU Control Register 2 */
+#define SPRN_MMUCR3 0x3ff /* MMU Control Register 3 */
+
+#define SPRN_IAR 0x372
+
+#define SPRN_IUCR0 0x3f3
+#define IUCR0_ICBI_ACK 0x1000
+
+#define SPRN_XUCR0 0x3f6 /* Execution Unit Config Register 0 */
+
+#define A2_IERAT_SIZE 16
+#define A2_DERAT_SIZE 32
+
+/* A2 MMUCR0 bits */
+#define MMUCR0_ECL 0x80000000 /* Extended Class for TLB fills */
+#define MMUCR0_TID_NZ 0x40000000 /* TID is non-zero */
+#define MMUCR0_TS 0x10000000 /* Translation space for TLB fills */
+#define MMUCR0_TGS 0x20000000 /* Guest space for TLB fills */
+#define MMUCR0_TLBSEL 0x0c000000 /* TLB or ERAT target for TLB fills */
+#define MMUCR0_TLBSEL_U 0x00000000 /* TLBSEL = UTLB */
+#define MMUCR0_TLBSEL_I 0x08000000 /* TLBSEL = I-ERAT */
+#define MMUCR0_TLBSEL_D 0x0c000000 /* TLBSEL = D-ERAT */
+#define MMUCR0_LOCKSRSH 0x02000000 /* Use TLB lock on tlbsx. */
+#define MMUCR0_TID_MASK 0x000000ff /* TID field */
+
+/* A2 MMUCR1 bits */
+#define MMUCR1_IRRE 0x80000000 /* I-ERAT round robin enable */
+#define MMUCR1_DRRE 0x40000000 /* D-ERAT round robin enable */
+#define MMUCR1_REE 0x20000000 /* Reference Exception Enable*/
+#define MMUCR1_CEE 0x10000000 /* Change exception enable */
+#define MMUCR1_CSINV_ALL 0x00000000 /* Inval ERAT on all CS evts */
+#define MMUCR1_CSINV_NISYNC 0x04000000 /* Inval ERAT on all ex isync*/
+#define MMUCR1_CSINV_NEVER 0x0c000000 /* Don't inval ERAT on CS */
+#define MMUCR1_ICTID 0x00080000 /* IERAT class field as TID */
+#define MMUCR1_ITTID 0x00040000 /* IERAT thdid field as TID */
+#define MMUCR1_DCTID 0x00020000 /* DERAT class field as TID */
+#define MMUCR1_DTTID 0x00010000 /* DERAT thdid field as TID */
+#define MMUCR1_DCCD 0x00008000 /* DERAT class ignore */
+#define MMUCR1_TLBWE_BINV 0x00004000 /* back invalidate on tlbwe */
+
+/* A2 MMUCR2 bits */
+#define MMUCR2_PSSEL_SHIFT 4
+
+/* A2 MMUCR3 bits */
+#define MMUCR3_THID 0x0000000f /* Thread ID */
+
+/* *** ERAT TLB bits definitions */
+#define TLB0_EPN_MASK ASM_CONST(0xfffffffffffff000)
+#define TLB0_CLASS_MASK ASM_CONST(0x0000000000000c00)
+#define TLB0_CLASS_00 ASM_CONST(0x0000000000000000)
+#define TLB0_CLASS_01 ASM_CONST(0x0000000000000400)
+#define TLB0_CLASS_10 ASM_CONST(0x0000000000000800)
+#define TLB0_CLASS_11 ASM_CONST(0x0000000000000c00)
+#define TLB0_V ASM_CONST(0x0000000000000200)
+#define TLB0_X ASM_CONST(0x0000000000000100)
+#define TLB0_SIZE_MASK ASM_CONST(0x00000000000000f0)
+#define TLB0_SIZE_4K ASM_CONST(0x0000000000000010)
+#define TLB0_SIZE_64K ASM_CONST(0x0000000000000030)
+#define TLB0_SIZE_1M ASM_CONST(0x0000000000000050)
+#define TLB0_SIZE_16M ASM_CONST(0x0000000000000070)
+#define TLB0_SIZE_1G ASM_CONST(0x00000000000000a0)
+#define TLB0_THDID_MASK ASM_CONST(0x000000000000000f)
+#define TLB0_THDID_0 ASM_CONST(0x0000000000000001)
+#define TLB0_THDID_1 ASM_CONST(0x0000000000000002)
+#define TLB0_THDID_2 ASM_CONST(0x0000000000000004)
+#define TLB0_THDID_3 ASM_CONST(0x0000000000000008)
+#define TLB0_THDID_ALL ASM_CONST(0x000000000000000f)
+
+#define TLB1_RESVATTR ASM_CONST(0x00f0000000000000)
+#define TLB1_U0 ASM_CONST(0x0008000000000000)
+#define TLB1_U1 ASM_CONST(0x0004000000000000)
+#define TLB1_U2 ASM_CONST(0x0002000000000000)
+#define TLB1_U3 ASM_CONST(0x0001000000000000)
+#define TLB1_R ASM_CONST(0x0000800000000000)
+#define TLB1_C ASM_CONST(0x0000400000000000)
+#define TLB1_RPN_MASK ASM_CONST(0x000003fffffff000)
+#define TLB1_W ASM_CONST(0x0000000000000800)
+#define TLB1_I ASM_CONST(0x0000000000000400)
+#define TLB1_M ASM_CONST(0x0000000000000200)
+#define TLB1_G ASM_CONST(0x0000000000000100)
+#define TLB1_E ASM_CONST(0x0000000000000080)
+#define TLB1_VF ASM_CONST(0x0000000000000040)
+#define TLB1_UX ASM_CONST(0x0000000000000020)
+#define TLB1_SX ASM_CONST(0x0000000000000010)
+#define TLB1_UW ASM_CONST(0x0000000000000008)
+#define TLB1_SW ASM_CONST(0x0000000000000004)
+#define TLB1_UR ASM_CONST(0x0000000000000002)
+#define TLB1_SR ASM_CONST(0x0000000000000001)
+
+/* A2 erativax attributes definitions */
+#define ERATIVAX_RS_IS_ALL 0x000
+#define ERATIVAX_RS_IS_TID 0x040
+#define ERATIVAX_RS_IS_CLASS 0x080
+#define ERATIVAX_RS_IS_FULLMATCH 0x0c0
+#define ERATIVAX_CLASS_00 0x000
+#define ERATIVAX_CLASS_01 0x010
+#define ERATIVAX_CLASS_10 0x020
+#define ERATIVAX_CLASS_11 0x030
+#define ERATIVAX_PSIZE_4K (TLB_PSIZE_4K >> 1)
+#define ERATIVAX_PSIZE_64K (TLB_PSIZE_64K >> 1)
+#define ERATIVAX_PSIZE_1M (TLB_PSIZE_1M >> 1)
+#define ERATIVAX_PSIZE_16M (TLB_PSIZE_16M >> 1)
+#define ERATIVAX_PSIZE_1G (TLB_PSIZE_1G >> 1)
+
+/* A2 eratilx attributes definitions */
+#define ERATILX_T_ALL 0
+#define ERATILX_T_TID 1
+#define ERATILX_T_TGS 2
+#define ERATILX_T_FULLMATCH 3
+#define ERATILX_T_CLASS0 4
+#define ERATILX_T_CLASS1 5
+#define ERATILX_T_CLASS2 6
+#define ERATILX_T_CLASS3 7
+
+/* XUCR0 bits */
+#define XUCR0_TRACE_UM_T0 0x40000000 /* Thread 0 */
+#define XUCR0_TRACE_UM_T1 0x20000000 /* Thread 1 */
+#define XUCR0_TRACE_UM_T2 0x10000000 /* Thread 2 */
+#define XUCR0_TRACE_UM_T3 0x08000000 /* Thread 3 */
+
+/* A2 CCR0 register */
+#define A2_CCR0_PME_DISABLED 0x00000000
+#define A2_CCR0_PME_SLEEP 0x40000000
+#define A2_CCR0_PME_RVW 0x80000000
+#define A2_CCR0_PME_DISABLED2 0xc0000000
+
+/* A2 CCR2 register */
+#define A2_CCR2_ERAT_ONLY_MODE 0x00000001
+#define A2_CCR2_ENABLE_ICSWX 0x00000002
+#define A2_CCR2_ENABLE_PC 0x20000000
+#define A2_CCR2_ENABLE_TRACE 0x40000000
+
+#endif /* __ASM_POWERPC_REG_A2_H__ */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
new file mode 100644
index 000000000..af56980b6
--- /dev/null
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -0,0 +1,700 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Contains register definitions common to the Book E PowerPC
+ * specification. Notice that while the IBM-40x series of CPUs
+ * are not true Book E PowerPCs, they borrowed a number of features
+ * before Book E was finalized, and are included here as well. Unfortunately,
+ * they sometimes used different locations than true Book E CPUs did.
+ *
+ * Copyright 2009-2010 Freescale Semiconductor, Inc.
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_REG_BOOKE_H__
+#define __ASM_POWERPC_REG_BOOKE_H__
+
+#include <asm/ppc-opcode.h>
+
+/* Machine State Register (MSR) Fields */
+#define MSR_GS_LG 28 /* Guest state */
+#define MSR_UCLE_LG 26 /* User-mode cache lock enable */
+#define MSR_SPE_LG 25 /* Enable SPE */
+#define MSR_DWE_LG 10 /* Debug Wait Enable */
+#define MSR_UBLE_LG 10 /* BTB lock enable (e500) */
+#define MSR_IS_LG MSR_IR_LG /* Instruction Space */
+#define MSR_DS_LG MSR_DR_LG /* Data Space */
+#define MSR_PMM_LG 2 /* Performance monitor mark bit */
+#define MSR_CM_LG 31 /* Computation Mode (0=32-bit, 1=64-bit) */
+
+#define MSR_GS __MASK(MSR_GS_LG)
+#define MSR_UCLE __MASK(MSR_UCLE_LG)
+#define MSR_SPE __MASK(MSR_SPE_LG)
+#define MSR_DWE __MASK(MSR_DWE_LG)
+#define MSR_UBLE __MASK(MSR_UBLE_LG)
+#define MSR_IS __MASK(MSR_IS_LG)
+#define MSR_DS __MASK(MSR_DS_LG)
+#define MSR_PMM __MASK(MSR_PMM_LG)
+#define MSR_CM __MASK(MSR_CM_LG)
+
+#if defined(CONFIG_PPC_BOOK3E_64)
+#define MSR_64BIT MSR_CM
+
+#define MSR_ (MSR_ME | MSR_RI | MSR_CE)
+#define MSR_KERNEL (MSR_ | MSR_64BIT)
+#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
+#define MSR_USER64 (MSR_USER32 | MSR_64BIT)
+#elif defined (CONFIG_40x)
+#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
+#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
+#else
+#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_CE)
+#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
+#endif
+
+/* Special Purpose Registers (SPRNs)*/
+#define SPRN_DECAR 0x036 /* Decrementer Auto Reload Register */
+#define SPRN_IVPR 0x03F /* Interrupt Vector Prefix Register */
+#define SPRN_USPRG0 0x100 /* User Special Purpose Register General 0 */
+#define SPRN_SPRG3R 0x103 /* Special Purpose Register General 3 Read */
+#define SPRN_SPRG4R 0x104 /* Special Purpose Register General 4 Read */
+#define SPRN_SPRG5R 0x105 /* Special Purpose Register General 5 Read */
+#define SPRN_SPRG6R 0x106 /* Special Purpose Register General 6 Read */
+#define SPRN_SPRG7R 0x107 /* Special Purpose Register General 7 Read */
+#define SPRN_SPRG4W 0x114 /* Special Purpose Register General 4 Write */
+#define SPRN_SPRG5W 0x115 /* Special Purpose Register General 5 Write */
+#define SPRN_SPRG6W 0x116 /* Special Purpose Register General 6 Write */
+#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */
+#define SPRN_EPCR 0x133 /* Embedded Processor Control Register */
+#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */
+#define SPRN_DBCR4 0x233 /* Debug Control Register 4 */
+#define SPRN_MSRP 0x137 /* MSR Protect Register */
+#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */
+#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */
+#define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */
+#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */
+#define SPRN_LPID 0x152 /* Logical Partition ID */
+#define SPRN_MAS8 0x155 /* MMU Assist Register 8 */
+#define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */
+#define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */
+#define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */
+#define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */
+#define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */
+#define SPRN_GSPRG0 0x170 /* Guest SPRG0 */
+#define SPRN_GSPRG1 0x171 /* Guest SPRG1 */
+#define SPRN_GSPRG2 0x172 /* Guest SPRG2 */
+#define SPRN_GSPRG3 0x173 /* Guest SPRG3 */
+#define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */
+#define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */
+#define SPRN_GSRR0 0x17A /* Guest SRR0 */
+#define SPRN_GSRR1 0x17B /* Guest SRR1 */
+#define SPRN_GEPR 0x17C /* Guest EPR */
+#define SPRN_GDEAR 0x17D /* Guest DEAR */
+#define SPRN_GPIR 0x17E /* Guest PIR */
+#define SPRN_GESR 0x17F /* Guest Exception Syndrome Register */
+#define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */
+#define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */
+#define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */
+#define SPRN_IVOR3 0x193 /* Interrupt Vector Offset Register 3 */
+#define SPRN_IVOR4 0x194 /* Interrupt Vector Offset Register 4 */
+#define SPRN_IVOR5 0x195 /* Interrupt Vector Offset Register 5 */
+#define SPRN_IVOR6 0x196 /* Interrupt Vector Offset Register 6 */
+#define SPRN_IVOR7 0x197 /* Interrupt Vector Offset Register 7 */
+#define SPRN_IVOR8 0x198 /* Interrupt Vector Offset Register 8 */
+#define SPRN_IVOR9 0x199 /* Interrupt Vector Offset Register 9 */
+#define SPRN_IVOR10 0x19A /* Interrupt Vector Offset Register 10 */
+#define SPRN_IVOR11 0x19B /* Interrupt Vector Offset Register 11 */
+#define SPRN_IVOR12 0x19C /* Interrupt Vector Offset Register 12 */
+#define SPRN_IVOR13 0x19D /* Interrupt Vector Offset Register 13 */
+#define SPRN_IVOR14 0x19E /* Interrupt Vector Offset Register 14 */
+#define SPRN_IVOR15 0x19F /* Interrupt Vector Offset Register 15 */
+#define SPRN_IVOR38 0x1B0 /* Interrupt Vector Offset Register 38 */
+#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */
+#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */
+#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */
+#define SPRN_IVOR42 0x1B4 /* Interrupt Vector Offset Register 42 */
+#define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */
+#define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */
+#define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */
+#define SPRN_GIVOR8 0x1BB /* Guest IVOR8 */
+#define SPRN_GIVOR13 0x1BC /* Guest IVOR13 */
+#define SPRN_GIVOR14 0x1BD /* Guest IVOR14 */
+#define SPRN_GIVPR 0x1BF /* Guest IVPR */
+#define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */
+#define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */
+#define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */
+#define SPRN_L1CFG0 0x203 /* L1 Cache Configure Register 0 */
+#define SPRN_L1CFG1 0x204 /* L1 Cache Configure Register 1 */
+#define SPRN_ATB 0x20E /* Alternate Time Base */
+#define SPRN_ATBL 0x20E /* Alternate Time Base Lower */
+#define SPRN_ATBU 0x20F /* Alternate Time Base Upper */
+#define SPRN_IVOR32 0x210 /* Interrupt Vector Offset Register 32 */
+#define SPRN_IVOR33 0x211 /* Interrupt Vector Offset Register 33 */
+#define SPRN_IVOR34 0x212 /* Interrupt Vector Offset Register 34 */
+#define SPRN_IVOR35 0x213 /* Interrupt Vector Offset Register 35 */
+#define SPRN_IVOR36 0x214 /* Interrupt Vector Offset Register 36 */
+#define SPRN_IVOR37 0x215 /* Interrupt Vector Offset Register 37 */
+#define SPRN_MCARU 0x239 /* Machine Check Address Register Upper */
+#define SPRN_MCSRR0 0x23A /* Machine Check Save and Restore Register 0 */
+#define SPRN_MCSRR1 0x23B /* Machine Check Save and Restore Register 1 */
+#define SPRN_MCSR 0x23C /* Machine Check Status Register */
+#define SPRN_MCAR 0x23D /* Machine Check Address Register */
+#define SPRN_DSRR0 0x23E /* Debug Save and Restore Register 0 */
+#define SPRN_DSRR1 0x23F /* Debug Save and Restore Register 1 */
+#define SPRN_SPRG8 0x25C /* Special Purpose Register General 8 */
+#define SPRN_SPRG9 0x25D /* Special Purpose Register General 9 */
+#define SPRN_L1CSR2 0x25E /* L1 Cache Control and Status Register 2 */
+#define SPRN_MAS0 0x270 /* MMU Assist Register 0 */
+#define SPRN_MAS1 0x271 /* MMU Assist Register 1 */
+#define SPRN_MAS2 0x272 /* MMU Assist Register 2 */
+#define SPRN_MAS3 0x273 /* MMU Assist Register 3 */
+#define SPRN_MAS4 0x274 /* MMU Assist Register 4 */
+#define SPRN_MAS5 0x153 /* MMU Assist Register 5 */
+#define SPRN_MAS6 0x276 /* MMU Assist Register 6 */
+#define SPRN_PID1 0x279 /* Process ID Register 1 */
+#define SPRN_PID2 0x27A /* Process ID Register 2 */
+#define SPRN_TLB0CFG 0x2B0 /* TLB 0 Config Register */
+#define SPRN_TLB1CFG 0x2B1 /* TLB 1 Config Register */
+#define SPRN_TLB2CFG 0x2B2 /* TLB 2 Config Register */
+#define SPRN_TLB3CFG 0x2B3 /* TLB 3 Config Register */
+#define SPRN_EPR 0x2BE /* External Proxy Register */
+#define SPRN_CCR1 0x378 /* Core Configuration Register 1 */
+#define SPRN_ZPR 0x3B0 /* Zone Protection Register (40x) */
+#define SPRN_MAS7 0x3B0 /* MMU Assist Register 7 */
+#define SPRN_MMUCR 0x3B2 /* MMU Control Register */
+#define SPRN_CCR0 0x3B3 /* Core Configuration Register 0 */
+#define SPRN_EPLC 0x3B3 /* External Process ID Load Context */
+#define SPRN_EPSC 0x3B4 /* External Process ID Store Context */
+#define SPRN_SGR 0x3B9 /* Storage Guarded Register */
+#define SPRN_DCWR 0x3BA /* Data Cache Write-thru Register */
+#define SPRN_SLER 0x3BB /* Little-endian real mode */
+#define SPRN_SU0R 0x3BC /* "User 0" real mode (40x) */
+#define SPRN_DCMP 0x3D1 /* Data TLB Compare Register */
+#define SPRN_ICDBDR 0x3D3 /* Instruction Cache Debug Data Register */
+#define SPRN_EVPR 0x3D6 /* Exception Vector Prefix Register */
+#define SPRN_L1CSR0 0x3F2 /* L1 Cache Control and Status Register 0 */
+#define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */
+#define SPRN_MMUCSR0 0x3F4 /* MMU Control and Status Register 0 */
+#define SPRN_MMUCFG 0x3F7 /* MMU Configuration Register */
+#define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */
+#define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */
+#define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */
+#define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */
+#define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */
+#define SPRN_PWRMGTCR0 0x3FB /* Power management control register 0 */
+#define SPRN_SVR 0x3FF /* System Version Register */
+
+/*
+ * SPRs which have conflicting definitions on true Book E versus classic,
+ * or IBM 40x.
+ */
+#ifdef CONFIG_BOOKE
+#define SPRN_CSRR0 0x03A /* Critical Save and Restore Register 0 */
+#define SPRN_CSRR1 0x03B /* Critical Save and Restore Register 1 */
+#define SPRN_DEAR 0x03D /* Data Error Address Register */
+#define SPRN_ESR 0x03E /* Exception Syndrome Register */
+#define SPRN_PIR 0x11E /* Processor Identification Register */
+#define SPRN_DBSR 0x130 /* Debug Status Register */
+#define SPRN_DBCR0 0x134 /* Debug Control Register 0 */
+#define SPRN_DBCR1 0x135 /* Debug Control Register 1 */
+#define SPRN_IAC1 0x138 /* Instruction Address Compare 1 */
+#define SPRN_IAC2 0x139 /* Instruction Address Compare 2 */
+#define SPRN_DAC1 0x13C /* Data Address Compare 1 */
+#define SPRN_DAC2 0x13D /* Data Address Compare 2 */
+#define SPRN_TSR 0x150 /* Timer Status Register */
+#define SPRN_TCR 0x154 /* Timer Control Register */
+#endif /* Book E */
+#ifdef CONFIG_40x
+#define SPRN_DBCR1 0x3BD /* Debug Control Register 1 */
+#define SPRN_ESR 0x3D4 /* Exception Syndrome Register */
+#define SPRN_DEAR 0x3D5 /* Data Error Address Register */
+#define SPRN_TSR 0x3D8 /* Timer Status Register */
+#define SPRN_TCR 0x3DA /* Timer Control Register */
+#define SPRN_SRR2 0x3DE /* Save/Restore Register 2 */
+#define SPRN_SRR3 0x3DF /* Save/Restore Register 3 */
+#define SPRN_DBSR 0x3F0 /* Debug Status Register */
+#define SPRN_DBCR0 0x3F2 /* Debug Control Register 0 */
+#define SPRN_DAC1 0x3F6 /* Data Address Compare 1 */
+#define SPRN_DAC2 0x3F7 /* Data Address Compare 2 */
+#define SPRN_CSRR0 SPRN_SRR2 /* Critical Save and Restore Register 0 */
+#define SPRN_CSRR1 SPRN_SRR3 /* Critical Save and Restore Register 1 */
+#endif
+#define SPRN_HACOP 0x15F /* Hypervisor Available Coprocessor Register */
+
+/* Bit definitions for CCR1. */
+#define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */
+#define CCR1_TCS 0x00000080 /* Timer Clock Select */
+
+/* Bit definitions for PWRMGTCR0. */
+#define PWRMGTCR0_PW20_WAIT (1 << 14) /* PW20 state enable bit */
+#define PWRMGTCR0_PW20_ENT_SHIFT 8
+#define PWRMGTCR0_PW20_ENT 0x3F00
+#define PWRMGTCR0_AV_IDLE_PD_EN (1 << 22) /* Altivec idle enable */
+#define PWRMGTCR0_AV_IDLE_CNT_SHIFT 16
+#define PWRMGTCR0_AV_IDLE_CNT 0x3F0000
+
+/* Bit definitions for the MCSR. */
+#define MCSR_MCS 0x80000000 /* Machine Check Summary */
+#define MCSR_IB 0x40000000 /* Instruction PLB Error */
+#define MCSR_DRB 0x20000000 /* Data Read PLB Error */
+#define MCSR_DWB 0x10000000 /* Data Write PLB Error */
+#define MCSR_TLBP 0x08000000 /* TLB Parity Error */
+#define MCSR_ICP 0x04000000 /* I-Cache Parity Error */
+#define MCSR_DCSP 0x02000000 /* D-Cache Search Parity Error */
+#define MCSR_DCFP 0x01000000 /* D-Cache Flush Parity Error */
+#define MCSR_IMPE 0x00800000 /* Imprecise Machine Check Exception */
+
+#define PPC47x_MCSR_GPR 0x01000000 /* GPR parity error */
+#define PPC47x_MCSR_FPR 0x00800000 /* FPR parity error */
+#define PPC47x_MCSR_IPR 0x00400000 /* Imprecise Machine Check Exception */
+
+#ifdef CONFIG_PPC_E500
+/* All e500 */
+#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */
+#define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */
+
+/* e500v1/v2 */
+#define MCSR_DCP_PERR 0x20000000UL /* D-Cache Push Parity Error */
+#define MCSR_DCPERR 0x10000000UL /* D-Cache Parity Error */
+#define MCSR_BUS_IAERR 0x00000080UL /* Instruction Address Error */
+#define MCSR_BUS_RAERR 0x00000040UL /* Read Address Error */
+#define MCSR_BUS_WAERR 0x00000020UL /* Write Address Error */
+#define MCSR_BUS_IBERR 0x00000010UL /* Instruction Data Error */
+#define MCSR_BUS_RBERR 0x00000008UL /* Read Data Bus Error */
+#define MCSR_BUS_WBERR 0x00000004UL /* Write Data Bus Error */
+#define MCSR_BUS_IPERR 0x00000002UL /* Instruction parity Error */
+#define MCSR_BUS_RPERR 0x00000001UL /* Read parity Error */
+
+/* e500mc */
+#define MCSR_DCPERR_MC 0x20000000UL /* D-Cache Parity Error */
+#define MCSR_L2MMU_MHIT 0x08000000UL /* Hit on multiple TLB entries */
+#define MCSR_NMI 0x00100000UL /* Non-Maskable Interrupt */
+#define MCSR_MAV 0x00080000UL /* MCAR address valid */
+#define MCSR_MEA 0x00040000UL /* MCAR is effective address */
+#define MCSR_IF 0x00010000UL /* Instruction Fetch */
+#define MCSR_LD 0x00008000UL /* Load */
+#define MCSR_ST 0x00004000UL /* Store */
+#define MCSR_LDG 0x00002000UL /* Guarded Load */
+#define MCSR_TLBSYNC 0x00000002UL /* Multiple tlbsyncs detected */
+#define MCSR_BSL2_ERR 0x00000001UL /* Backside L2 cache error */
+
+#define MSRP_UCLEP 0x04000000 /* Protect MSR[UCLE] */
+#define MSRP_DEP 0x00000200 /* Protect MSR[DE] */
+#define MSRP_PMMP 0x00000004 /* Protect MSR[PMM] */
+#endif
+
+/* Bit definitions for the HID1 */
+#ifdef CONFIG_PPC_E500
+/* e500v1/v2 */
+#define HID1_PLL_CFG_MASK 0xfc000000 /* PLL_CFG input pins */
+#define HID1_RFXE 0x00020000 /* Read fault exception enable */
+#define HID1_R1DPE 0x00008000 /* R1 data bus parity enable */
+#define HID1_R2DPE 0x00004000 /* R2 data bus parity enable */
+#define HID1_ASTME 0x00002000 /* Address bus streaming mode enable */
+#define HID1_ABE 0x00001000 /* Address broadcast enable */
+#define HID1_MPXTT 0x00000400 /* MPX re-map transfer type */
+#define HID1_ATS 0x00000080 /* Atomic status */
+#define HID1_MID_MASK 0x0000000f /* MID input pins */
+#endif
+
+/* Bit definitions for the DBSR. */
+/*
+ * DBSR bits which have conflicting definitions on true Book E versus IBM 40x.
+ */
+#ifdef CONFIG_BOOKE
+#define DBSR_IDE 0x80000000 /* Imprecise Debug Event */
+#define DBSR_MRR 0x30000000 /* Most Recent Reset */
+#define DBSR_IC 0x08000000 /* Instruction Completion */
+#define DBSR_BT 0x04000000 /* Branch Taken */
+#define DBSR_IRPT 0x02000000 /* Exception Debug Event */
+#define DBSR_TIE 0x01000000 /* Trap Instruction Event */
+#define DBSR_IAC1 0x00800000 /* Instr Address Compare 1 Event */
+#define DBSR_IAC2 0x00400000 /* Instr Address Compare 2 Event */
+#define DBSR_IAC3 0x00200000 /* Instr Address Compare 3 Event */
+#define DBSR_IAC4 0x00100000 /* Instr Address Compare 4 Event */
+#define DBSR_DAC1R 0x00080000 /* Data Addr Compare 1 Read Event */
+#define DBSR_DAC1W 0x00040000 /* Data Addr Compare 1 Write Event */
+#define DBSR_DAC2R 0x00020000 /* Data Addr Compare 2 Read Event */
+#define DBSR_DAC2W 0x00010000 /* Data Addr Compare 2 Write Event */
+#define DBSR_RET 0x00008000 /* Return Debug Event */
+#define DBSR_CIRPT 0x00000040 /* Critical Interrupt Taken Event */
+#define DBSR_CRET 0x00000020 /* Critical Return Debug Event */
+#define DBSR_IAC12ATS 0x00000002 /* Instr Address Compare 1/2 Toggle */
+#define DBSR_IAC34ATS 0x00000001 /* Instr Address Compare 3/4 Toggle */
+#endif
+#ifdef CONFIG_40x
+#define DBSR_IC 0x80000000 /* Instruction Completion */
+#define DBSR_BT 0x40000000 /* Branch taken */
+#define DBSR_IRPT 0x20000000 /* Exception Debug Event */
+#define DBSR_TIE 0x10000000 /* Trap Instruction debug Event */
+#define DBSR_IAC1 0x04000000 /* Instruction Address Compare 1 Event */
+#define DBSR_IAC2 0x02000000 /* Instruction Address Compare 2 Event */
+#define DBSR_IAC3 0x00080000 /* Instruction Address Compare 3 Event */
+#define DBSR_IAC4 0x00040000 /* Instruction Address Compare 4 Event */
+#define DBSR_DAC1R 0x01000000 /* Data Address Compare 1 Read Event */
+#define DBSR_DAC1W 0x00800000 /* Data Address Compare 1 Write Event */
+#define DBSR_DAC2R 0x00400000 /* Data Address Compare 2 Read Event */
+#define DBSR_DAC2W 0x00200000 /* Data Address Compare 2 Write Event */
+#endif
+
+/* Bit definitions related to the ESR. */
+#define ESR_MCI 0x80000000 /* Machine Check - Instruction */
+#define ESR_IMCP 0x80000000 /* Instr. Machine Check - Protection */
+#define ESR_IMCN 0x40000000 /* Instr. Machine Check - Non-config */
+#define ESR_IMCB 0x20000000 /* Instr. Machine Check - Bus error */
+#define ESR_IMCT 0x10000000 /* Instr. Machine Check - Timeout */
+#define ESR_PIL 0x08000000 /* Program Exception - Illegal */
+#define ESR_PPR 0x04000000 /* Program Exception - Privileged */
+#define ESR_PTR 0x02000000 /* Program Exception - Trap */
+#define ESR_FP 0x01000000 /* Floating Point Operation */
+#define ESR_DST 0x00800000 /* Storage Exception - Data miss */
+#define ESR_DIZ 0x00400000 /* Storage Exception - Zone fault */
+#define ESR_ST 0x00800000 /* Store Operation */
+#define ESR_DLK 0x00200000 /* Data Cache Locking */
+#define ESR_ILK 0x00100000 /* Instr. Cache Locking */
+#define ESR_PUO 0x00040000 /* Unimplemented Operation exception */
+#define ESR_BO 0x00020000 /* Byte Ordering */
+#define ESR_SPV 0x00000080 /* Signal Processing operation */
+
+/* Bit definitions related to the DBCR0. */
+#if defined(CONFIG_40x)
+#define DBCR0_EDM 0x80000000 /* External Debug Mode */
+#define DBCR0_IDM 0x40000000 /* Internal Debug Mode */
+#define DBCR0_RST 0x30000000 /* all the bits in the RST field */
+#define DBCR0_RST_SYSTEM 0x30000000 /* System Reset */
+#define DBCR0_RST_CHIP 0x20000000 /* Chip Reset */
+#define DBCR0_RST_CORE 0x10000000 /* Core Reset */
+#define DBCR0_RST_NONE 0x00000000 /* No Reset */
+#define DBCR0_IC 0x08000000 /* Instruction Completion */
+#define DBCR0_ICMP DBCR0_IC
+#define DBCR0_BT 0x04000000 /* Branch Taken */
+#define DBCR0_BRT DBCR0_BT
+#define DBCR0_EDE 0x02000000 /* Exception Debug Event */
+#define DBCR0_IRPT DBCR0_EDE
+#define DBCR0_TDE 0x01000000 /* TRAP Debug Event */
+#define DBCR0_IA1 0x00800000 /* Instr Addr compare 1 enable */
+#define DBCR0_IAC1 DBCR0_IA1
+#define DBCR0_IA2 0x00400000 /* Instr Addr compare 2 enable */
+#define DBCR0_IAC2 DBCR0_IA2
+#define DBCR0_IA12 0x00200000 /* Instr Addr 1-2 range enable */
+#define DBCR0_IA12X 0x00100000 /* Instr Addr 1-2 range eXclusive */
+#define DBCR0_IA3 0x00080000 /* Instr Addr compare 3 enable */
+#define DBCR0_IAC3 DBCR0_IA3
+#define DBCR0_IA4 0x00040000 /* Instr Addr compare 4 enable */
+#define DBCR0_IAC4 DBCR0_IA4
+#define DBCR0_IA34 0x00020000 /* Instr Addr 3-4 range Enable */
+#define DBCR0_IA34X 0x00010000 /* Instr Addr 3-4 range eXclusive */
+#define DBCR0_IA12T 0x00008000 /* Instr Addr 1-2 range Toggle */
+#define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */
+#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
+
+#define dbcr_iac_range(task) ((task)->thread.debug.dbcr0)
+#define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */
+#define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */
+#define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */
+#define DBCR_IAC34I DBCR0_IA34 /* Range Inclusive */
+#define DBCR_IAC34X (DBCR0_IA34 | DBCR0_IA34X) /* Range Exclusive */
+#define DBCR_IAC34MODE (DBCR0_IA34 | DBCR0_IA34X) /* IAC 3-4 Mode Bits */
+
+/* Bit definitions related to the DBCR1. */
+#define DBCR1_DAC1R 0x80000000 /* DAC1 Read Debug Event */
+#define DBCR1_DAC2R 0x40000000 /* DAC2 Read Debug Event */
+#define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */
+#define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */
+
+#define dbcr_dac(task) ((task)->thread.debug.dbcr1)
+#define DBCR_DAC1R DBCR1_DAC1R
+#define DBCR_DAC1W DBCR1_DAC1W
+#define DBCR_DAC2R DBCR1_DAC2R
+#define DBCR_DAC2W DBCR1_DAC2W
+
+/*
+ * Are there any active Debug Events represented in the
+ * Debug Control Registers?
+ */
+#define DBCR0_ACTIVE_EVENTS (DBCR0_ICMP | DBCR0_IAC1 | DBCR0_IAC2 | \
+ DBCR0_IAC3 | DBCR0_IAC4)
+#define DBCR1_ACTIVE_EVENTS (DBCR1_DAC1R | DBCR1_DAC2R | \
+ DBCR1_DAC1W | DBCR1_DAC2W)
+#define DBCR_ACTIVE_EVENTS(dbcr0, dbcr1) (((dbcr0) & DBCR0_ACTIVE_EVENTS) || \
+ ((dbcr1) & DBCR1_ACTIVE_EVENTS))
+
+#elif defined(CONFIG_BOOKE)
+#define DBCR0_EDM 0x80000000 /* External Debug Mode */
+#define DBCR0_IDM 0x40000000 /* Internal Debug Mode */
+#define DBCR0_RST 0x30000000 /* all the bits in the RST field */
+/* DBCR0_RST_* is 44x specific and not followed in fsl booke */
+#define DBCR0_RST_SYSTEM 0x30000000 /* System Reset */
+#define DBCR0_RST_CHIP 0x20000000 /* Chip Reset */
+#define DBCR0_RST_CORE 0x10000000 /* Core Reset */
+#define DBCR0_RST_NONE 0x00000000 /* No Reset */
+#define DBCR0_ICMP 0x08000000 /* Instruction Completion */
+#define DBCR0_IC DBCR0_ICMP
+#define DBCR0_BRT 0x04000000 /* Branch Taken */
+#define DBCR0_BT DBCR0_BRT
+#define DBCR0_IRPT 0x02000000 /* Exception Debug Event */
+#define DBCR0_TDE 0x01000000 /* TRAP Debug Event */
+#define DBCR0_TIE DBCR0_TDE
+#define DBCR0_IAC1 0x00800000 /* Instr Addr compare 1 enable */
+#define DBCR0_IAC2 0x00400000 /* Instr Addr compare 2 enable */
+#define DBCR0_IAC3 0x00200000 /* Instr Addr compare 3 enable */
+#define DBCR0_IAC4 0x00100000 /* Instr Addr compare 4 enable */
+#define DBCR0_DAC1R 0x00080000 /* DAC 1 Read enable */
+#define DBCR0_DAC1W 0x00040000 /* DAC 1 Write enable */
+#define DBCR0_DAC2R 0x00020000 /* DAC 2 Read enable */
+#define DBCR0_DAC2W 0x00010000 /* DAC 2 Write enable */
+#define DBCR0_RET 0x00008000 /* Return Debug Event */
+#define DBCR0_CIRPT 0x00000040 /* Critical Interrupt Taken Event */
+#define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */
+#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
+
+#define dbcr_dac(task) ((task)->thread.debug.dbcr0)
+#define DBCR_DAC1R DBCR0_DAC1R
+#define DBCR_DAC1W DBCR0_DAC1W
+#define DBCR_DAC2R DBCR0_DAC2R
+#define DBCR_DAC2W DBCR0_DAC2W
+
+/* Bit definitions related to the DBCR1. */
+#define DBCR1_IAC1US 0xC0000000 /* Instr Addr Cmp 1 Sup/User */
+#define DBCR1_IAC1ER 0x30000000 /* Instr Addr Cmp 1 Eff/Real */
+#define DBCR1_IAC1ER_01 0x10000000 /* reserved */
+#define DBCR1_IAC1ER_10 0x20000000 /* Instr Addr Cmp 1 Eff/Real MSR[IS]=0 */
+#define DBCR1_IAC1ER_11 0x30000000 /* Instr Addr Cmp 1 Eff/Real MSR[IS]=1 */
+#define DBCR1_IAC2US 0x0C000000 /* Instr Addr Cmp 2 Sup/User */
+#define DBCR1_IAC2ER 0x03000000 /* Instr Addr Cmp 2 Eff/Real */
+#define DBCR1_IAC2ER_01 0x01000000 /* reserved */
+#define DBCR1_IAC2ER_10 0x02000000 /* Instr Addr Cmp 2 Eff/Real MSR[IS]=0 */
+#define DBCR1_IAC2ER_11 0x03000000 /* Instr Addr Cmp 2 Eff/Real MSR[IS]=1 */
+#define DBCR1_IAC12M 0x00800000 /* Instr Addr 1-2 range enable */
+#define DBCR1_IAC12MX 0x00C00000 /* Instr Addr 1-2 range eXclusive */
+#define DBCR1_IAC12AT 0x00010000 /* Instr Addr 1-2 range Toggle */
+#define DBCR1_IAC3US 0x0000C000 /* Instr Addr Cmp 3 Sup/User */
+#define DBCR1_IAC3ER 0x00003000 /* Instr Addr Cmp 3 Eff/Real */
+#define DBCR1_IAC3ER_01 0x00001000 /* reserved */
+#define DBCR1_IAC3ER_10 0x00002000 /* Instr Addr Cmp 3 Eff/Real MSR[IS]=0 */
+#define DBCR1_IAC3ER_11 0x00003000 /* Instr Addr Cmp 3 Eff/Real MSR[IS]=1 */
+#define DBCR1_IAC4US 0x00000C00 /* Instr Addr Cmp 4 Sup/User */
+#define DBCR1_IAC4ER 0x00000300 /* Instr Addr Cmp 4 Eff/Real */
+#define DBCR1_IAC4ER_01 0x00000100 /* Instr Addr Cmp 4 Eff/Real MSR[IS]=0 */
+#define DBCR1_IAC4ER_10 0x00000200 /* Instr Addr Cmp 4 Eff/Real MSR[IS]=0 */
+#define DBCR1_IAC4ER_11 0x00000300 /* Instr Addr Cmp 4 Eff/Real MSR[IS]=1 */
+#define DBCR1_IAC34M 0x00000080 /* Instr Addr 3-4 range enable */
+#define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */
+#define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */
+
+#define dbcr_iac_range(task) ((task)->thread.debug.dbcr1)
+#define DBCR_IAC12I DBCR1_IAC12M /* Range Inclusive */
+#define DBCR_IAC12X DBCR1_IAC12MX /* Range Exclusive */
+#define DBCR_IAC12MODE DBCR1_IAC12MX /* IAC 1-2 Mode Bits */
+#define DBCR_IAC34I DBCR1_IAC34M /* Range Inclusive */
+#define DBCR_IAC34X DBCR1_IAC34MX /* Range Exclusive */
+#define DBCR_IAC34MODE DBCR1_IAC34MX /* IAC 3-4 Mode Bits */
+
+/* Bit definitions related to the DBCR2. */
+#define DBCR2_DAC1US 0xC0000000 /* Data Addr Cmp 1 Sup/User */
+#define DBCR2_DAC1ER 0x30000000 /* Data Addr Cmp 1 Eff/Real */
+#define DBCR2_DAC2US 0x0C000000 /* Data Addr Cmp 2 Sup/User */
+#define DBCR2_DAC2ER 0x03000000 /* Data Addr Cmp 2 Eff/Real */
+#define DBCR2_DAC12M 0x00800000 /* DAC 1-2 range enable */
+#define DBCR2_DAC12MM 0x00400000 /* DAC 1-2 Mask mode*/
+#define DBCR2_DAC12MX 0x00C00000 /* DAC 1-2 range eXclusive */
+#define DBCR2_DAC12MODE 0x00C00000 /* DAC 1-2 Mode Bits */
+#define DBCR2_DAC12A 0x00200000 /* DAC 1-2 Asynchronous */
+#define DBCR2_DVC1M 0x000C0000 /* Data Value Comp 1 Mode */
+#define DBCR2_DVC1M_SHIFT 18 /* # of bits to shift DBCR2_DVC1M */
+#define DBCR2_DVC2M 0x00030000 /* Data Value Comp 2 Mode */
+#define DBCR2_DVC2M_SHIFT 16 /* # of bits to shift DBCR2_DVC2M */
+#define DBCR2_DVC1BE 0x00000F00 /* Data Value Comp 1 Byte */
+#define DBCR2_DVC1BE_SHIFT 8 /* # of bits to shift DBCR2_DVC1BE */
+#define DBCR2_DVC2BE 0x0000000F /* Data Value Comp 2 Byte */
+#define DBCR2_DVC2BE_SHIFT 0 /* # of bits to shift DBCR2_DVC2BE */
+
+/*
+ * Are there any active Debug Events represented in the
+ * Debug Control Registers?
+ */
+#define DBCR0_ACTIVE_EVENTS (DBCR0_ICMP | DBCR0_IAC1 | DBCR0_IAC2 | \
+ DBCR0_IAC3 | DBCR0_IAC4 | DBCR0_DAC1R | \
+ DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W)
+#define DBCR1_ACTIVE_EVENTS 0
+
+#define DBCR_ACTIVE_EVENTS(dbcr0, dbcr1) (((dbcr0) & DBCR0_ACTIVE_EVENTS) || \
+ ((dbcr1) & DBCR1_ACTIVE_EVENTS))
+#endif /* #elif defined(CONFIG_BOOKE) */
+
+/* Bit definitions related to the TCR. */
+#define TCR_WP(x) (((x)&0x3)<<30) /* WDT Period */
+#define TCR_WP_MASK TCR_WP(3)
+#define WP_2_17 0 /* 2^17 clocks */
+#define WP_2_21 1 /* 2^21 clocks */
+#define WP_2_25 2 /* 2^25 clocks */
+#define WP_2_29 3 /* 2^29 clocks */
+#define TCR_WRC(x) (((x)&0x3)<<28) /* WDT Reset Control */
+#define TCR_WRC_MASK TCR_WRC(3)
+#define WRC_NONE 0 /* No reset will occur */
+#define WRC_CORE 1 /* Core reset will occur */
+#define WRC_CHIP 2 /* Chip reset will occur */
+#define WRC_SYSTEM 3 /* System reset will occur */
+#define TCR_WIE 0x08000000 /* WDT Interrupt Enable */
+#define TCR_PIE 0x04000000 /* PIT Interrupt Enable */
+#define TCR_DIE TCR_PIE /* DEC Interrupt Enable */
+#define TCR_FP(x) (((x)&0x3)<<24) /* FIT Period */
+#define TCR_FP_MASK TCR_FP(3)
+#define FP_2_9 0 /* 2^9 clocks */
+#define FP_2_13 1 /* 2^13 clocks */
+#define FP_2_17 2 /* 2^17 clocks */
+#define FP_2_21 3 /* 2^21 clocks */
+#define TCR_FIE 0x00800000 /* FIT Interrupt Enable */
+#define TCR_ARE 0x00400000 /* Auto Reload Enable */
+
+#ifdef CONFIG_PPC_E500
+#define TCR_GET_WP(tcr) ((((tcr) & 0xC0000000) >> 30) | \
+ (((tcr) & 0x1E0000) >> 15))
+#else
+#define TCR_GET_WP(tcr) (((tcr) & 0xC0000000) >> 30)
+#endif
+
+/* Bit definitions for the TSR. */
+#define TSR_ENW 0x80000000 /* Enable Next Watchdog */
+#define TSR_WIS 0x40000000 /* WDT Interrupt Status */
+#define TSR_WRS(x) (((x)&0x3)<<28) /* WDT Reset Status */
+#define WRS_NONE 0 /* No WDT reset occurred */
+#define WRS_CORE 1 /* WDT forced core reset */
+#define WRS_CHIP 2 /* WDT forced chip reset */
+#define WRS_SYSTEM 3 /* WDT forced system reset */
+#define TSR_PIS 0x08000000 /* PIT Interrupt Status */
+#define TSR_DIS TSR_PIS /* DEC Interrupt Status */
+#define TSR_FIS 0x04000000 /* FIT Interrupt Status */
+
+/* Bit definitions for the DCCR. */
+#define DCCR_NOCACHE 0 /* Noncacheable */
+#define DCCR_CACHE 1 /* Cacheable */
+
+/* Bit definitions for DCWR. */
+#define DCWR_COPY 0 /* Copy-back */
+#define DCWR_WRITE 1 /* Write-through */
+
+/* Bit definitions for ICCR. */
+#define ICCR_NOCACHE 0 /* Noncacheable */
+#define ICCR_CACHE 1 /* Cacheable */
+
+/* Bit definitions for L1CSR0. */
+#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */
+#define L1CSR0_CUL 0x00000400 /* Data Cache Unable to Lock */
+#define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */
+#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */
+#define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */
+#define L1CSR0_DCE 0x00000001 /* Data Cache Enable */
+
+/* Bit definitions for L1CSR1. */
+#define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */
+#define L1CSR1_ICLFR 0x00000100 /* Instr Cache Lock Bits Flash Reset */
+#define L1CSR1_ICFI 0x00000002 /* Instr Cache Flash Invalidate */
+#define L1CSR1_ICE 0x00000001 /* Instr Cache Enable */
+
+/* Bit definitions for L1CSR2. */
+#define L1CSR2_DCWS 0x40000000 /* Data Cache write shadow */
+
+/* Bit definitions for BUCSR. */
+#define BUCSR_STAC_EN 0x01000000 /* Segment Target Address Cache */
+#define BUCSR_LS_EN 0x00400000 /* Link Stack */
+#define BUCSR_BBFI 0x00000200 /* Branch Buffer flash invalidate */
+#define BUCSR_BPEN 0x00000001 /* Branch prediction enable */
+#define BUCSR_INIT (BUCSR_STAC_EN | BUCSR_LS_EN | BUCSR_BBFI | BUCSR_BPEN)
+
+/* Bit definitions for L2CSR0. */
+#define L2CSR0_L2E 0x80000000 /* L2 Cache Enable */
+#define L2CSR0_L2PE 0x40000000 /* L2 Cache Parity/ECC Enable */
+#define L2CSR0_L2WP 0x1c000000 /* L2 I/D Way Partioning */
+#define L2CSR0_L2CM 0x03000000 /* L2 Cache Coherency Mode */
+#define L2CSR0_L2FI 0x00200000 /* L2 Cache Flash Invalidate */
+#define L2CSR0_L2IO 0x00100000 /* L2 Cache Instruction Only */
+#define L2CSR0_L2DO 0x00010000 /* L2 Cache Data Only */
+#define L2CSR0_L2REP 0x00003000 /* L2 Line Replacement Algo */
+#define L2CSR0_L2FL 0x00000800 /* L2 Cache Flush */
+#define L2CSR0_L2LFC 0x00000400 /* L2 Cache Lock Flash Clear */
+#define L2CSR0_L2LOA 0x00000080 /* L2 Cache Lock Overflow Allocate */
+#define L2CSR0_L2LO 0x00000020 /* L2 Cache Lock Overflow */
+
+/* Bit definitions for SGR. */
+#define SGR_NORMAL 0 /* Speculative fetching allowed. */
+#define SGR_GUARDED 1 /* Speculative fetching disallowed. */
+
+/* Bit definitions for EPCR */
+#define SPRN_EPCR_EXTGS 0x80000000 /* External Input interrupt
+ * directed to Guest state */
+#define SPRN_EPCR_DTLBGS 0x40000000 /* Data TLB Error interrupt
+ * directed to guest state */
+#define SPRN_EPCR_ITLBGS 0x20000000 /* Instr. TLB error interrupt
+ * directed to guest state */
+#define SPRN_EPCR_DSIGS 0x10000000 /* Data Storage interrupt
+ * directed to guest state */
+#define SPRN_EPCR_ISIGS 0x08000000 /* Instr. Storage interrupt
+ * directed to guest state */
+#define SPRN_EPCR_DUVD 0x04000000 /* Disable Hypervisor Debug */
+#define SPRN_EPCR_ICM 0x02000000 /* Interrupt computation mode
+ * (copied to MSR:CM on intr) */
+#define SPRN_EPCR_GICM 0x01000000 /* Guest Interrupt Comp. mode */
+#define SPRN_EPCR_DGTMI 0x00800000 /* Disable TLB Guest Management
+ * instructions */
+#define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates
+ * for hypervisor */
+
+/* Bit definitions for EPLC/EPSC */
+#define EPC_EPR 0x80000000 /* 1 = user, 0 = kernel */
+#define EPC_EPR_SHIFT 31
+#define EPC_EAS 0x40000000 /* Address Space */
+#define EPC_EAS_SHIFT 30
+#define EPC_EGS 0x20000000 /* 1 = guest, 0 = hypervisor */
+#define EPC_EGS_SHIFT 29
+#define EPC_ELPID 0x00ff0000
+#define EPC_ELPID_SHIFT 16
+#define EPC_EPID 0x00003fff
+#define EPC_EPID_SHIFT 0
+
+/* Some 476 specific registers */
+#define SPRN_SSPCR 830
+#define SPRN_USPCR 831
+#define SPRN_ISPCR 829
+#define SPRN_MMUBE0 820
+#define MMUBE0_IBE0_SHIFT 24
+#define MMUBE0_IBE1_SHIFT 16
+#define MMUBE0_IBE2_SHIFT 8
+#define MMUBE0_VBE0 0x00000004
+#define MMUBE0_VBE1 0x00000002
+#define MMUBE0_VBE2 0x00000001
+#define SPRN_MMUBE1 821
+#define MMUBE1_IBE3_SHIFT 24
+#define MMUBE1_IBE4_SHIFT 16
+#define MMUBE1_IBE5_SHIFT 8
+#define MMUBE1_VBE3 0x00000004
+#define MMUBE1_VBE4 0x00000002
+#define MMUBE1_VBE5 0x00000001
+
+#define TMRN_TMCFG0 16 /* Thread Management Configuration Register 0 */
+#define TMRN_TMCFG0_NPRIBITS 0x003f0000 /* Bits of thread priority */
+#define TMRN_TMCFG0_NPRIBITS_SHIFT 16
+#define TMRN_TMCFG0_NATHRD 0x00003f00 /* Number of active threads */
+#define TMRN_TMCFG0_NATHRD_SHIFT 8
+#define TMRN_TMCFG0_NTHRD 0x0000003f /* Number of threads */
+#define TMRN_IMSR0 0x120 /* Initial MSR Register 0 (e6500) */
+#define TMRN_IMSR1 0x121 /* Initial MSR Register 1 (e6500) */
+#define TMRN_INIA0 0x140 /* Next Instruction Address Register 0 */
+#define TMRN_INIA1 0x141 /* Next Instruction Address Register 1 */
+#define SPRN_TENSR 0x1b5 /* Thread Enable Status Register */
+#define SPRN_TENS 0x1b6 /* Thread Enable Set Register */
+#define SPRN_TENC 0x1b7 /* Thread Enable Clear Register */
+
+#define TEN_THREAD(x) (1 << (x))
+
+#ifndef __ASSEMBLY__
+#define mftmr(rn) ({unsigned long rval; \
+ asm volatile(MFTMR(rn, %0) : "=r" (rval)); rval;})
+#define mttmr(rn, v) asm volatile(MTTMR(rn, %0) : \
+ : "r" ((unsigned long)(v)) \
+ : "memory")
+
+extern unsigned long global_dbcr0[];
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_POWERPC_REG_BOOKE_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h
new file mode 100644
index 000000000..a21f529c4
--- /dev/null
+++ b/arch/powerpc/include/asm/reg_fsl_emb.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Contains register definitions for the Freescale Embedded Performance
+ * Monitor.
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_REG_FSL_EMB_H__
+#define __ASM_POWERPC_REG_FSL_EMB_H__
+
+#include <linux/stringify.h>
+
+#ifndef __ASSEMBLY__
+/* Performance Monitor Registers */
+#define mfpmr(rn) ({unsigned int rval; \
+ asm volatile("mfpmr %0," __stringify(rn) \
+ : "=r" (rval)); rval;})
+#define mtpmr(rn, v) asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v))
+#endif /* __ASSEMBLY__ */
+
+/* Freescale Book E Performance Monitor APU Registers */
+#define PMRN_PMC0 0x010 /* Performance Monitor Counter 0 */
+#define PMRN_PMC1 0x011 /* Performance Monitor Counter 1 */
+#define PMRN_PMC2 0x012 /* Performance Monitor Counter 2 */
+#define PMRN_PMC3 0x013 /* Performance Monitor Counter 3 */
+#define PMRN_PMC4 0x014 /* Performance Monitor Counter 4 */
+#define PMRN_PMC5 0x015 /* Performance Monitor Counter 5 */
+#define PMRN_PMLCA0 0x090 /* PM Local Control A0 */
+#define PMRN_PMLCA1 0x091 /* PM Local Control A1 */
+#define PMRN_PMLCA2 0x092 /* PM Local Control A2 */
+#define PMRN_PMLCA3 0x093 /* PM Local Control A3 */
+#define PMRN_PMLCA4 0x094 /* PM Local Control A4 */
+#define PMRN_PMLCA5 0x095 /* PM Local Control A5 */
+
+#define PMLCA_FC 0x80000000 /* Freeze Counter */
+#define PMLCA_FCS 0x40000000 /* Freeze in Supervisor */
+#define PMLCA_FCU 0x20000000 /* Freeze in User */
+#define PMLCA_FCM1 0x10000000 /* Freeze when PMM==1 */
+#define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */
+#define PMLCA_CE 0x04000000 /* Condition Enable */
+#define PMLCA_FGCS1 0x00000002 /* Freeze in guest state */
+#define PMLCA_FGCS0 0x00000001 /* Freeze in hypervisor state */
+
+#define PMLCA_EVENT_MASK 0x01ff0000 /* Event field */
+#define PMLCA_EVENT_SHIFT 16
+
+#define PMRN_PMLCB0 0x110 /* PM Local Control B0 */
+#define PMRN_PMLCB1 0x111 /* PM Local Control B1 */
+#define PMRN_PMLCB2 0x112 /* PM Local Control B2 */
+#define PMRN_PMLCB3 0x113 /* PM Local Control B3 */
+#define PMRN_PMLCB4 0x114 /* PM Local Control B4 */
+#define PMRN_PMLCB5 0x115 /* PM Local Control B5 */
+
+#define PMLCB_THRESHMUL_MASK 0x0700 /* Threshold Multiple Field */
+#define PMLCB_THRESHMUL_SHIFT 8
+
+#define PMLCB_THRESHOLD_MASK 0x003f /* Threshold Field */
+#define PMLCB_THRESHOLD_SHIFT 0
+
+#define PMRN_PMGC0 0x190 /* PM Global Control 0 */
+
+#define PMGC0_FAC 0x80000000 /* Freeze all Counters */
+#define PMGC0_PMIE 0x40000000 /* Interrupt Enable */
+#define PMGC0_FCECE 0x20000000 /* Freeze countes on
+ Enabled Condition or
+ Event */
+
+#define PMRN_UPMC0 0x000 /* User Performance Monitor Counter 0 */
+#define PMRN_UPMC1 0x001 /* User Performance Monitor Counter 1 */
+#define PMRN_UPMC2 0x002 /* User Performance Monitor Counter 2 */
+#define PMRN_UPMC3 0x003 /* User Performance Monitor Counter 3 */
+#define PMRN_UPMC4 0x004 /* User Performance Monitor Counter 4 */
+#define PMRN_UPMC5 0x005 /* User Performance Monitor Counter 5 */
+#define PMRN_UPMLCA0 0x080 /* User PM Local Control A0 */
+#define PMRN_UPMLCA1 0x081 /* User PM Local Control A1 */
+#define PMRN_UPMLCA2 0x082 /* User PM Local Control A2 */
+#define PMRN_UPMLCA3 0x083 /* User PM Local Control A3 */
+#define PMRN_UPMLCA4 0x084 /* User PM Local Control A4 */
+#define PMRN_UPMLCA5 0x085 /* User PM Local Control A5 */
+#define PMRN_UPMLCB0 0x100 /* User PM Local Control B0 */
+#define PMRN_UPMLCB1 0x101 /* User PM Local Control B1 */
+#define PMRN_UPMLCB2 0x102 /* User PM Local Control B2 */
+#define PMRN_UPMLCB3 0x103 /* User PM Local Control B3 */
+#define PMRN_UPMLCB4 0x104 /* User PM Local Control B4 */
+#define PMRN_UPMLCB5 0x105 /* User PM Local Control B5 */
+#define PMRN_UPMGC0 0x180 /* User PM Global Control 0 */
+
+
+#endif /* __ASM_POWERPC_REG_FSL_EMB_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/rheap.h b/arch/powerpc/include/asm/rheap.h
new file mode 100644
index 000000000..8e83703d6
--- /dev/null
+++ b/arch/powerpc/include/asm/rheap.h
@@ -0,0 +1,92 @@
+/*
+ * include/asm-ppc/rheap.h
+ *
+ * Header file for the implementation of a remote heap.
+ *
+ * Author: Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#ifndef __ASM_PPC_RHEAP_H__
+#define __ASM_PPC_RHEAP_H__
+
+#include <linux/list.h>
+
+typedef struct _rh_block {
+ struct list_head list;
+ unsigned long start;
+ int size;
+ const char *owner;
+} rh_block_t;
+
+typedef struct _rh_info {
+ unsigned int alignment;
+ int max_blocks;
+ int empty_slots;
+ rh_block_t *block;
+ struct list_head empty_list;
+ struct list_head free_list;
+ struct list_head taken_list;
+ unsigned int flags;
+} rh_info_t;
+
+#define RHIF_STATIC_INFO 0x1
+#define RHIF_STATIC_BLOCK 0x2
+
+typedef struct _rh_stats {
+ unsigned long start;
+ int size;
+ const char *owner;
+} rh_stats_t;
+
+#define RHGS_FREE 0
+#define RHGS_TAKEN 1
+
+/* Create a remote heap dynamically */
+extern rh_info_t *rh_create(unsigned int alignment);
+
+/* Destroy a remote heap, created by rh_create() */
+extern void rh_destroy(rh_info_t * info);
+
+/* Initialize in place a remote info block */
+extern void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
+ rh_block_t * block);
+
+/* Attach a free region to manage */
+extern int rh_attach_region(rh_info_t * info, unsigned long start, int size);
+
+/* Detach a free region */
+extern unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size);
+
+/* Allocate the given size from the remote heap (with alignment) */
+extern unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment,
+ const char *owner);
+
+/* Allocate the given size from the remote heap */
+extern unsigned long rh_alloc(rh_info_t * info, int size, const char *owner);
+
+/* Allocate the given size from the given address */
+extern unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size,
+ const char *owner);
+
+/* Free the allocated area */
+extern int rh_free(rh_info_t * info, unsigned long start);
+
+/* Get stats for debugging purposes */
+extern int rh_get_stats(rh_info_t * info, int what, int max_stats,
+ rh_stats_t * stats);
+
+/* Simple dump of remote heap info */
+extern void rh_dump(rh_info_t * info);
+
+/* Simple dump of remote info block */
+void rh_dump_blk(rh_info_t *info, rh_block_t *blk);
+
+/* Set owner of taken block */
+extern int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner);
+
+#endif /* __ASM_PPC_RHEAP_H__ */
diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h
new file mode 100644
index 000000000..0e57cda2a
--- /dev/null
+++ b/arch/powerpc/include/asm/rio.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RapidIO architecture support
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ */
+
+#ifndef ASM_PPC_RIO_H
+#define ASM_PPC_RIO_H
+
+#ifdef CONFIG_FSL_RIO
+extern int fsl_rio_mcheck_exception(struct pt_regs *);
+#else
+static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; }
+#endif
+
+#endif /* ASM_PPC_RIO_H */
diff --git a/arch/powerpc/include/asm/rtas-types.h b/arch/powerpc/include/asm/rtas-types.h
new file mode 100644
index 000000000..8df6235d6
--- /dev/null
+++ b/arch/powerpc/include/asm/rtas-types.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_RTAS_TYPES_H
+#define _ASM_POWERPC_RTAS_TYPES_H
+
+#include <linux/spinlock_types.h>
+
+typedef __be32 rtas_arg_t;
+
+struct rtas_args {
+ __be32 token;
+ __be32 nargs;
+ __be32 nret;
+ rtas_arg_t args[16];
+ rtas_arg_t *rets; /* Pointer to return values in args[]. */
+};
+
+struct rtas_t {
+ unsigned long entry; /* physical address pointer */
+ unsigned long base; /* physical address pointer */
+ unsigned long size;
+ arch_spinlock_t lock;
+ struct rtas_args args;
+ struct device_node *dev; /* virtual address pointer */
+};
+
+struct rtas_error_log {
+ /* Byte 0 */
+ u8 byte0; /* Architectural version */
+
+ /* Byte 1 */
+ u8 byte1;
+ /* XXXXXXXX
+ * XXX 3: Severity level of error
+ * XX 2: Degree of recovery
+ * X 1: Extended log present?
+ * XX 2: Reserved
+ */
+
+ /* Byte 2 */
+ u8 byte2;
+ /* XXXXXXXX
+ * XXXX 4: Initiator of event
+ * XXXX 4: Target of failed operation
+ */
+ u8 byte3; /* General event or error*/
+ __be32 extended_log_length; /* length in bytes */
+ unsigned char buffer[1]; /* Start of extended log */
+ /* Variable length. */
+};
+
+/* RTAS general extended event log, Version 6. The extended log starts
+ * from "buffer" field of struct rtas_error_log defined above.
+ */
+struct rtas_ext_event_log_v6 {
+ /* Byte 0 */
+ u8 byte0;
+ /* XXXXXXXX
+ * X 1: Log valid
+ * X 1: Unrecoverable error
+ * X 1: Recoverable (correctable or successfully retried)
+ * X 1: Bypassed unrecoverable error (degraded operation)
+ * X 1: Predictive error
+ * X 1: "New" log (always 1 for data returned from RTAS)
+ * X 1: Big Endian
+ * X 1: Reserved
+ */
+
+ /* Byte 1 */
+ u8 byte1; /* reserved */
+
+ /* Byte 2 */
+ u8 byte2;
+ /* XXXXXXXX
+ * X 1: Set to 1 (indicating log is in PowerPC format)
+ * XXX 3: Reserved
+ * XXXX 4: Log format used for bytes 12-2047
+ */
+
+ /* Byte 3 */
+ u8 byte3; /* reserved */
+ /* Byte 4-11 */
+ u8 reserved[8]; /* reserved */
+ /* Byte 12-15 */
+ __be32 company_id; /* Company ID of the company */
+ /* that defines the format for */
+ /* the vendor specific log type */
+ /* Byte 16-end of log */
+ u8 vendor_log[1]; /* Start of vendor specific log */
+ /* Variable length. */
+};
+
+/* Vendor specific Platform Event Log Format, Version 6, section header */
+struct pseries_errorlog {
+ __be16 id; /* 0x00 2-byte ASCII section ID */
+ __be16 length; /* 0x02 Section length in bytes */
+ u8 version; /* 0x04 Section version */
+ u8 subtype; /* 0x05 Section subtype */
+ __be16 creator_component; /* 0x06 Creator component ID */
+ u8 data[]; /* 0x08 Start of section data */
+};
+
+/* RTAS pseries hotplug errorlog section */
+struct pseries_hp_errorlog {
+ u8 resource;
+ u8 action;
+ u8 id_type;
+ u8 reserved;
+ union {
+ __be32 drc_index;
+ __be32 drc_count;
+ struct { __be32 count, index; } ic;
+ char drc_name[1];
+ } _drc_u;
+};
+
+#endif /* _ASM_POWERPC_RTAS_TYPES_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
new file mode 100644
index 000000000..56319aea6
--- /dev/null
+++ b/arch/powerpc/include/asm/rtas.h
@@ -0,0 +1,382 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _POWERPC_RTAS_H
+#define _POWERPC_RTAS_H
+#ifdef __KERNEL__
+
+#include <linux/spinlock.h>
+#include <asm/page.h>
+#include <asm/rtas-types.h>
+#include <linux/time.h>
+#include <linux/cpumask.h>
+
+/*
+ * Definitions for talking to the RTAS on CHRP machines.
+ *
+ * Copyright (C) 2001 Peter Bergner
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ */
+
+#define RTAS_UNKNOWN_SERVICE (-1)
+#define RTAS_INSTANTIATE_MAX (1ULL<<30) /* Don't instantiate rtas at/above this value */
+
+/* Memory set aside for sys_rtas to use with calls that need a work area. */
+#define RTAS_USER_REGION_SIZE (64 * 1024)
+
+/* RTAS return status codes */
+#define RTAS_BUSY -2 /* RTAS Busy */
+#define RTAS_EXTENDED_DELAY_MIN 9900
+#define RTAS_EXTENDED_DELAY_MAX 9905
+
+/* statuses specific to ibm,suspend-me */
+#define RTAS_SUSPEND_ABORTED 9000 /* Suspension aborted */
+#define RTAS_NOT_SUSPENDABLE -9004 /* Partition not suspendable */
+#define RTAS_THREADS_ACTIVE -9005 /* Multiple processor threads active */
+#define RTAS_OUTSTANDING_COPROC -9006 /* Outstanding coprocessor operations */
+
+/*
+ * In general to call RTAS use rtas_token("string") to lookup
+ * an RTAS token for the given string (e.g. "event-scan").
+ * To actually perform the call use
+ * ret = rtas_call(token, n_in, n_out, ...)
+ * Where n_in is the number of input parameters and
+ * n_out is the number of output parameters
+ *
+ * If the "string" is invalid on this system, RTAS_UNKNOWN_SERVICE
+ * will be returned as a token. rtas_call() does look for this
+ * token and error out gracefully so rtas_call(rtas_token("str"), ...)
+ * may be safely used for one-shot calls to RTAS.
+ *
+ */
+
+/* RTAS event classes */
+#define RTAS_INTERNAL_ERROR 0x80000000 /* set bit 0 */
+#define RTAS_EPOW_WARNING 0x40000000 /* set bit 1 */
+#define RTAS_HOTPLUG_EVENTS 0x10000000 /* set bit 3 */
+#define RTAS_IO_EVENTS 0x08000000 /* set bit 4 */
+#define RTAS_EVENT_SCAN_ALL_EVENTS 0xffffffff
+
+/* RTAS event severity */
+#define RTAS_SEVERITY_FATAL 0x5
+#define RTAS_SEVERITY_ERROR 0x4
+#define RTAS_SEVERITY_ERROR_SYNC 0x3
+#define RTAS_SEVERITY_WARNING 0x2
+#define RTAS_SEVERITY_EVENT 0x1
+#define RTAS_SEVERITY_NO_ERROR 0x0
+
+/* RTAS event disposition */
+#define RTAS_DISP_FULLY_RECOVERED 0x0
+#define RTAS_DISP_LIMITED_RECOVERY 0x1
+#define RTAS_DISP_NOT_RECOVERED 0x2
+
+/* RTAS event initiator */
+#define RTAS_INITIATOR_UNKNOWN 0x0
+#define RTAS_INITIATOR_CPU 0x1
+#define RTAS_INITIATOR_PCI 0x2
+#define RTAS_INITIATOR_ISA 0x3
+#define RTAS_INITIATOR_MEMORY 0x4
+#define RTAS_INITIATOR_POWERMGM 0x5
+
+/* RTAS event target */
+#define RTAS_TARGET_UNKNOWN 0x0
+#define RTAS_TARGET_CPU 0x1
+#define RTAS_TARGET_PCI 0x2
+#define RTAS_TARGET_ISA 0x3
+#define RTAS_TARGET_MEMORY 0x4
+#define RTAS_TARGET_POWERMGM 0x5
+
+/* RTAS event type */
+#define RTAS_TYPE_RETRY 0x01
+#define RTAS_TYPE_TCE_ERR 0x02
+#define RTAS_TYPE_INTERN_DEV_FAIL 0x03
+#define RTAS_TYPE_TIMEOUT 0x04
+#define RTAS_TYPE_DATA_PARITY 0x05
+#define RTAS_TYPE_ADDR_PARITY 0x06
+#define RTAS_TYPE_CACHE_PARITY 0x07
+#define RTAS_TYPE_ADDR_INVALID 0x08
+#define RTAS_TYPE_ECC_UNCORR 0x09
+#define RTAS_TYPE_ECC_CORR 0x0a
+#define RTAS_TYPE_EPOW 0x40
+#define RTAS_TYPE_PLATFORM 0xE0
+#define RTAS_TYPE_IO 0xE1
+#define RTAS_TYPE_INFO 0xE2
+#define RTAS_TYPE_DEALLOC 0xE3
+#define RTAS_TYPE_DUMP 0xE4
+#define RTAS_TYPE_HOTPLUG 0xE5
+/* I don't add PowerMGM events right now, this is a different topic */
+#define RTAS_TYPE_PMGM_POWER_SW_ON 0x60
+#define RTAS_TYPE_PMGM_POWER_SW_OFF 0x61
+#define RTAS_TYPE_PMGM_LID_OPEN 0x62
+#define RTAS_TYPE_PMGM_LID_CLOSE 0x63
+#define RTAS_TYPE_PMGM_SLEEP_BTN 0x64
+#define RTAS_TYPE_PMGM_WAKE_BTN 0x65
+#define RTAS_TYPE_PMGM_BATTERY_WARN 0x66
+#define RTAS_TYPE_PMGM_BATTERY_CRIT 0x67
+#define RTAS_TYPE_PMGM_SWITCH_TO_BAT 0x68
+#define RTAS_TYPE_PMGM_SWITCH_TO_AC 0x69
+#define RTAS_TYPE_PMGM_KBD_OR_MOUSE 0x6a
+#define RTAS_TYPE_PMGM_ENCLOS_OPEN 0x6b
+#define RTAS_TYPE_PMGM_ENCLOS_CLOSED 0x6c
+#define RTAS_TYPE_PMGM_RING_INDICATE 0x6d
+#define RTAS_TYPE_PMGM_LAN_ATTENTION 0x6e
+#define RTAS_TYPE_PMGM_TIME_ALARM 0x6f
+#define RTAS_TYPE_PMGM_CONFIG_CHANGE 0x70
+#define RTAS_TYPE_PMGM_SERVICE_PROC 0x71
+/* Platform Resource Reassignment Notification */
+#define RTAS_TYPE_PRRN 0xA0
+
+/* RTAS check-exception vector offset */
+#define RTAS_VECTOR_EXTERNAL_INTERRUPT 0x500
+
+static inline uint8_t rtas_error_severity(const struct rtas_error_log *elog)
+{
+ return (elog->byte1 & 0xE0) >> 5;
+}
+
+static inline uint8_t rtas_error_disposition(const struct rtas_error_log *elog)
+{
+ return (elog->byte1 & 0x18) >> 3;
+}
+
+static inline
+void rtas_set_disposition_recovered(struct rtas_error_log *elog)
+{
+ elog->byte1 &= ~0x18;
+ elog->byte1 |= (RTAS_DISP_FULLY_RECOVERED << 3);
+}
+
+static inline uint8_t rtas_error_extended(const struct rtas_error_log *elog)
+{
+ return (elog->byte1 & 0x04) >> 2;
+}
+
+static inline uint8_t rtas_error_initiator(const struct rtas_error_log *elog)
+{
+ return (elog->byte2 & 0xf0) >> 4;
+}
+
+#define rtas_error_type(x) ((x)->byte3)
+
+static inline
+uint32_t rtas_error_extended_log_length(const struct rtas_error_log *elog)
+{
+ return be32_to_cpu(elog->extended_log_length);
+}
+
+#define RTAS_V6EXT_LOG_FORMAT_EVENT_LOG 14
+
+#define RTAS_V6EXT_COMPANY_ID_IBM (('I' << 24) | ('B' << 16) | ('M' << 8))
+
+static
+inline uint8_t rtas_ext_event_log_format(struct rtas_ext_event_log_v6 *ext_log)
+{
+ return ext_log->byte2 & 0x0F;
+}
+
+static
+inline uint32_t rtas_ext_event_company_id(struct rtas_ext_event_log_v6 *ext_log)
+{
+ return be32_to_cpu(ext_log->company_id);
+}
+
+/* pSeries event log format */
+
+/* Two bytes ASCII section IDs */
+#define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S')
+#define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T')
+#define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S')
+#define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W')
+#define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P')
+#define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R')
+#define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M')
+#define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P')
+#define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E')
+#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I')
+#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D')
+#define PSERIES_ELOG_SECT_ID_HOTPLUG (('H' << 8) | 'P')
+#define PSERIES_ELOG_SECT_ID_MCE (('M' << 8) | 'C')
+
+static
+inline uint16_t pseries_errorlog_id(struct pseries_errorlog *sect)
+{
+ return be16_to_cpu(sect->id);
+}
+
+static
+inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect)
+{
+ return be16_to_cpu(sect->length);
+}
+
+#define PSERIES_HP_ELOG_RESOURCE_CPU 1
+#define PSERIES_HP_ELOG_RESOURCE_MEM 2
+#define PSERIES_HP_ELOG_RESOURCE_SLOT 3
+#define PSERIES_HP_ELOG_RESOURCE_PHB 4
+#define PSERIES_HP_ELOG_RESOURCE_PMEM 6
+
+#define PSERIES_HP_ELOG_ACTION_ADD 1
+#define PSERIES_HP_ELOG_ACTION_REMOVE 2
+
+#define PSERIES_HP_ELOG_ID_DRC_NAME 1
+#define PSERIES_HP_ELOG_ID_DRC_INDEX 2
+#define PSERIES_HP_ELOG_ID_DRC_COUNT 3
+#define PSERIES_HP_ELOG_ID_DRC_IC 4
+
+struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
+ uint16_t section_id);
+
+/*
+ * This can be set by the rtas_flash module so that it can get called
+ * as the absolutely last thing before the kernel terminates.
+ */
+extern void (*rtas_flash_term_hook)(int);
+
+extern struct rtas_t rtas;
+
+extern int rtas_token(const char *service);
+extern int rtas_service_present(const char *service);
+extern int rtas_call(int token, int, int, int *, ...);
+void rtas_call_unlocked(struct rtas_args *args, int token, int nargs,
+ int nret, ...);
+extern void __noreturn rtas_restart(char *cmd);
+extern void rtas_power_off(void);
+extern void __noreturn rtas_halt(void);
+extern void rtas_os_term(char *str);
+void rtas_activate_firmware(void);
+extern int rtas_get_sensor(int sensor, int index, int *state);
+extern int rtas_get_sensor_fast(int sensor, int index, int *state);
+extern int rtas_get_power_level(int powerdomain, int *level);
+extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
+extern bool rtas_indicator_present(int token, int *maxindex);
+extern int rtas_set_indicator(int indicator, int index, int new_value);
+extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
+extern void rtas_progress(char *s, unsigned short hex);
+int rtas_ibm_suspend_me(int *fw_status);
+
+struct rtc_time;
+extern time64_t rtas_get_boot_time(void);
+extern void rtas_get_rtc_time(struct rtc_time *rtc_time);
+extern int rtas_set_rtc_time(struct rtc_time *rtc_time);
+
+extern unsigned int rtas_busy_delay_time(int status);
+bool rtas_busy_delay(int status);
+
+extern int early_init_dt_scan_rtas(unsigned long node,
+ const char *uname, int depth, void *data);
+
+extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
+
+#ifdef CONFIG_PPC_PSERIES
+extern time64_t last_rtas_event;
+extern int clobbering_unread_rtas_event(void);
+extern void post_mobility_fixup(void);
+int rtas_syscall_dispatch_ibm_suspend_me(u64 handle);
+#else
+static inline int clobbering_unread_rtas_event(void) { return 0; }
+static inline int rtas_syscall_dispatch_ibm_suspend_me(u64 handle)
+{
+ return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_PPC_RTAS_DAEMON
+extern void rtas_cancel_event_scan(void);
+#else
+static inline void rtas_cancel_event_scan(void) { }
+#endif
+
+/* Error types logged. */
+#define ERR_FLAG_ALREADY_LOGGED 0x0
+#define ERR_FLAG_BOOT 0x1 /* log was pulled from NVRAM on boot */
+#define ERR_TYPE_RTAS_LOG 0x2 /* from rtas event-scan */
+#define ERR_TYPE_KERNEL_PANIC 0x4 /* from die()/panic() */
+#define ERR_TYPE_KERNEL_PANIC_GZ 0x8 /* ditto, compressed */
+
+/* All the types and not flags */
+#define ERR_TYPE_MASK \
+ (ERR_TYPE_RTAS_LOG | ERR_TYPE_KERNEL_PANIC | ERR_TYPE_KERNEL_PANIC_GZ)
+
+#define RTAS_DEBUG KERN_DEBUG "RTAS: "
+
+#define RTAS_ERROR_LOG_MAX 2048
+
+/*
+ * Return the firmware-specified size of the error log buffer
+ * for all rtas calls that require an error buffer argument.
+ * This includes 'check-exception' and 'rtas-last-error'.
+ */
+extern int rtas_get_error_log_max(void);
+
+/* Event Scan Parameters */
+#define EVENT_SCAN_ALL_EVENTS 0xf0000000
+#define SURVEILLANCE_TOKEN 9000
+#define LOG_NUMBER 64 /* must be a power of two */
+#define LOG_NUMBER_MASK (LOG_NUMBER-1)
+
+/* Some RTAS ops require a data buffer and that buffer must be < 4G.
+ * Rather than having a memory allocator, just use this buffer
+ * (get the lock first), make the RTAS call. Copy the data instead
+ * of holding the buffer for long.
+ */
+
+#define RTAS_DATA_BUF_SIZE 4096
+extern spinlock_t rtas_data_buf_lock;
+extern char rtas_data_buf[RTAS_DATA_BUF_SIZE];
+
+/* RMO buffer reserved for user-space RTAS use */
+extern unsigned long rtas_rmo_buf;
+
+#define GLOBAL_INTERRUPT_QUEUE 9005
+
+/**
+ * rtas_config_addr - Format a busno, devfn and reg for RTAS.
+ * @busno: The bus number.
+ * @devfn: The device and function number as encoded by PCI_DEVFN().
+ * @reg: The register number.
+ *
+ * This function encodes the given busno, devfn and register number as
+ * required for RTAS calls that take a "config_addr" parameter.
+ * See PAPR requirement 7.3.4-1 for more info.
+ */
+static inline u32 rtas_config_addr(int busno, int devfn, int reg)
+{
+ return ((reg & 0xf00) << 20) | ((busno & 0xff) << 16) |
+ (devfn << 8) | (reg & 0xff);
+}
+
+extern void rtas_give_timebase(void);
+extern void rtas_take_timebase(void);
+
+#ifdef CONFIG_PPC_RTAS
+static inline int page_is_rtas_user_buf(unsigned long pfn)
+{
+ unsigned long paddr = (pfn << PAGE_SHIFT);
+ if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_USER_REGION_SIZE))
+ return 1;
+ return 0;
+}
+
+/* Not the best place to put pSeries_coalesce_init, will be fixed when we
+ * move some of the rtas suspend-me stuff to pseries */
+extern void pSeries_coalesce_init(void);
+void rtas_initialize(void);
+#else
+static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
+static inline void pSeries_coalesce_init(void) { }
+static inline void rtas_initialize(void) { }
+#endif
+
+extern int call_rtas(const char *, int, int, unsigned long *, ...);
+
+#ifdef CONFIG_HV_PERF_CTRS
+void read_24x7_sys_info(void);
+#else
+static inline void read_24x7_sys_info(void) { }
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_RTAS_H */
diff --git a/arch/powerpc/include/asm/runlatch.h b/arch/powerpc/include/asm/runlatch.h
new file mode 100644
index 000000000..ceb66d761
--- /dev/null
+++ b/arch/powerpc/include/asm/runlatch.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_RUNLATCH_H
+#define _ASM_POWERPC_RUNLATCH_H
+
+#ifdef CONFIG_PPC64
+
+extern void __ppc64_runlatch_on(void);
+extern void __ppc64_runlatch_off(void);
+
+/*
+ * We manually hard enable-disable, this is called
+ * in the idle loop and we don't want to mess up
+ * with soft-disable/enable & interrupt replay.
+ */
+#define ppc64_runlatch_off() \
+ do { \
+ if (cpu_has_feature(CPU_FTR_CTRL) && \
+ test_thread_local_flags(_TLF_RUNLATCH)) { \
+ __hard_irq_disable(); \
+ __ppc64_runlatch_off(); \
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) \
+ __hard_irq_enable(); \
+ } \
+ } while (0)
+
+#define ppc64_runlatch_on() \
+ do { \
+ if (cpu_has_feature(CPU_FTR_CTRL) && \
+ !test_thread_local_flags(_TLF_RUNLATCH)) { \
+ __hard_irq_disable(); \
+ __ppc64_runlatch_on(); \
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) \
+ __hard_irq_enable(); \
+ } \
+ } while (0)
+#else
+#define ppc64_runlatch_on()
+#define ppc64_runlatch_off()
+#endif /* CONFIG_PPC64 */
+
+#endif /* _ASM_POWERPC_RUNLATCH_H */
diff --git a/arch/powerpc/include/asm/seccomp.h b/arch/powerpc/include/asm/seccomp.h
new file mode 100644
index 000000000..ac2033f13
--- /dev/null
+++ b/arch/powerpc/include/asm/seccomp.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SECCOMP_H
+#define _ASM_POWERPC_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#define __NR_seccomp_sigreturn_32 __NR_sigreturn
+
+#include <asm-generic/seccomp.h>
+
+#ifdef __LITTLE_ENDIAN__
+#define __SECCOMP_ARCH_LE __AUDIT_ARCH_LE
+#define __SECCOMP_ARCH_LE_NAME "le"
+#else
+#define __SECCOMP_ARCH_LE 0
+#define __SECCOMP_ARCH_LE_NAME
+#endif
+
+#ifdef CONFIG_PPC64
+# define SECCOMP_ARCH_NATIVE (AUDIT_ARCH_PPC64 | __SECCOMP_ARCH_LE)
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "ppc64" __SECCOMP_ARCH_LE_NAME
+# ifdef CONFIG_COMPAT
+# define SECCOMP_ARCH_COMPAT (AUDIT_ARCH_PPC | __SECCOMP_ARCH_LE)
+# define SECCOMP_ARCH_COMPAT_NR NR_syscalls
+# define SECCOMP_ARCH_COMPAT_NAME "ppc" __SECCOMP_ARCH_LE_NAME
+# endif
+#else /* !CONFIG_PPC64 */
+# define SECCOMP_ARCH_NATIVE (AUDIT_ARCH_PPC | __SECCOMP_ARCH_LE)
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "ppc" __SECCOMP_ARCH_LE_NAME
+#endif
+
+#endif /* _ASM_POWERPC_SECCOMP_H */
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
new file mode 100644
index 000000000..9c00c9c0c
--- /dev/null
+++ b/arch/powerpc/include/asm/sections.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SECTIONS_H
+#define _ASM_POWERPC_SECTIONS_H
+#ifdef __KERNEL__
+
+#include <linux/elf.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
+typedef struct func_desc func_desc_t;
+#endif
+
+#include <asm-generic/sections.h>
+
+extern char __head_end[];
+extern char __srwx_boundary[];
+
+/* Patch sites */
+extern s32 patch__call_flush_branch_caches1;
+extern s32 patch__call_flush_branch_caches2;
+extern s32 patch__call_flush_branch_caches3;
+extern s32 patch__flush_count_cache_return;
+extern s32 patch__flush_link_stack_return;
+extern s32 patch__call_kvm_flush_link_stack;
+extern s32 patch__call_kvm_flush_link_stack_p9;
+extern s32 patch__memset_nocache, patch__memcpy_nocache;
+
+extern long flush_branch_caches;
+extern long kvm_flush_link_stack;
+
+#ifdef __powerpc64__
+
+extern char __start_interrupts[];
+extern char __end_interrupts[];
+
+#ifdef CONFIG_PPC_POWERNV
+extern char start_real_trampolines[];
+extern char end_real_trampolines[];
+extern char start_virt_trampolines[];
+extern char end_virt_trampolines[];
+#endif
+
+/*
+ * This assumes the kernel is never compiled -mcmodel=small or
+ * the total .toc is always less than 64k.
+ */
+static inline unsigned long kernel_toc_addr(void)
+{
+ unsigned long toc_ptr;
+
+ asm volatile("mr %0, 2" : "=r" (toc_ptr));
+ return toc_ptr;
+}
+
+static inline int overlaps_interrupt_vector_text(unsigned long start,
+ unsigned long end)
+{
+ unsigned long real_start, real_end;
+ real_start = __start_interrupts - _stext;
+ real_end = __end_interrupts - _stext;
+
+ return start < (unsigned long)__va(real_end) &&
+ (unsigned long)__va(real_start) < end;
+}
+
+static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
+{
+ return start < (unsigned long)__init_end &&
+ (unsigned long)_stext < end;
+}
+
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SECTIONS_H */
diff --git a/arch/powerpc/include/asm/secure_boot.h b/arch/powerpc/include/asm/secure_boot.h
new file mode 100644
index 000000000..a2ff55691
--- /dev/null
+++ b/arch/powerpc/include/asm/secure_boot.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Secure boot definitions
+ *
+ * Copyright (C) 2019 IBM Corporation
+ * Author: Nayna Jain
+ */
+#ifndef _ASM_POWER_SECURE_BOOT_H
+#define _ASM_POWER_SECURE_BOOT_H
+
+#ifdef CONFIG_PPC_SECURE_BOOT
+
+bool is_ppc_secureboot_enabled(void);
+bool is_ppc_trustedboot_enabled(void);
+
+#else
+
+static inline bool is_ppc_secureboot_enabled(void)
+{
+ return false;
+}
+
+static inline bool is_ppc_trustedboot_enabled(void)
+{
+ return false;
+}
+
+#endif
+#endif
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
new file mode 100644
index 000000000..27574f218
--- /dev/null
+++ b/arch/powerpc/include/asm/security_features.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Security related feature bit definitions.
+ *
+ * Copyright 2018, Michael Ellerman, IBM Corporation.
+ */
+
+#ifndef _ASM_POWERPC_SECURITY_FEATURES_H
+#define _ASM_POWERPC_SECURITY_FEATURES_H
+
+
+extern u64 powerpc_security_features;
+extern bool rfi_flush;
+
+/* These are bit flags */
+enum stf_barrier_type {
+ STF_BARRIER_NONE = 0x1,
+ STF_BARRIER_FALLBACK = 0x2,
+ STF_BARRIER_EIEIO = 0x4,
+ STF_BARRIER_SYNC_ORI = 0x8,
+};
+
+void setup_stf_barrier(void);
+void do_stf_barrier_fixups(enum stf_barrier_type types);
+void setup_count_cache_flush(void);
+
+static inline void security_ftr_set(u64 feature)
+{
+ powerpc_security_features |= feature;
+}
+
+static inline void security_ftr_clear(u64 feature)
+{
+ powerpc_security_features &= ~feature;
+}
+
+static inline bool security_ftr_enabled(u64 feature)
+{
+ return !!(powerpc_security_features & feature);
+}
+
+#ifdef CONFIG_PPC_BOOK3S_64
+enum stf_barrier_type stf_barrier_type_get(void);
+#else
+static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; }
+#endif
+
+// Features indicating support for Spectre/Meltdown mitigations
+
+// The L1-D cache can be flushed with ori r30,r30,0
+#define SEC_FTR_L1D_FLUSH_ORI30 0x0000000000000001ull
+
+// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2)
+#define SEC_FTR_L1D_FLUSH_TRIG2 0x0000000000000002ull
+
+// ori r31,r31,0 acts as a speculation barrier
+#define SEC_FTR_SPEC_BAR_ORI31 0x0000000000000004ull
+
+// Speculation past bctr is disabled
+#define SEC_FTR_BCCTRL_SERIALISED 0x0000000000000008ull
+
+// Entries in L1-D are private to a SMT thread
+#define SEC_FTR_L1D_THREAD_PRIV 0x0000000000000010ull
+
+// Indirect branch prediction cache disabled
+#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
+
+// bcctr 2,0,0 triggers a hardware assisted count cache flush
+#define SEC_FTR_BCCTR_FLUSH_ASSIST 0x0000000000000800ull
+
+// bcctr 2,0,0 triggers a hardware assisted link stack flush
+#define SEC_FTR_BCCTR_LINK_FLUSH_ASSIST 0x0000000000002000ull
+
+// Features indicating need for Spectre/Meltdown mitigations
+
+// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest)
+#define SEC_FTR_L1D_FLUSH_HV 0x0000000000000040ull
+
+// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace)
+#define SEC_FTR_L1D_FLUSH_PR 0x0000000000000080ull
+
+// A speculation barrier should be used for bounds checks (Spectre variant 1)
+#define SEC_FTR_BNDS_CHK_SPEC_BAR 0x0000000000000100ull
+
+// Firmware configuration indicates user favours security over performance
+#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
+
+// Software required to flush count cache on context switch
+#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
+
+// Software required to flush link stack on context switch
+#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
+
+// The L1-D cache should be flushed when entering the kernel
+#define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull
+
+// The L1-D cache should be flushed after user accesses from the kernel
+#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull
+
+// The STF flush should be executed on privilege state switch
+#define SEC_FTR_STF_BARRIER 0x0000000000010000ull
+
+// Features enabled by default
+#define SEC_FTR_DEFAULT \
+ (SEC_FTR_L1D_FLUSH_HV | \
+ SEC_FTR_L1D_FLUSH_PR | \
+ SEC_FTR_BNDS_CHK_SPEC_BAR | \
+ SEC_FTR_L1D_FLUSH_ENTRY | \
+ SEC_FTR_L1D_FLUSH_UACCESS | \
+ SEC_FTR_STF_BARRIER | \
+ SEC_FTR_FAVOUR_SECURITY)
+
+#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
diff --git a/arch/powerpc/include/asm/secvar.h b/arch/powerpc/include/asm/secvar.h
new file mode 100644
index 000000000..4cc35b58b
--- /dev/null
+++ b/arch/powerpc/include/asm/secvar.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 IBM Corporation
+ * Author: Nayna Jain
+ *
+ * PowerPC secure variable operations.
+ */
+#ifndef SECVAR_OPS_H
+#define SECVAR_OPS_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+extern const struct secvar_operations *secvar_ops;
+
+struct secvar_operations {
+ int (*get)(const char *key, uint64_t key_len, u8 *data,
+ uint64_t *data_size);
+ int (*get_next)(const char *key, uint64_t *key_len,
+ uint64_t keybufsize);
+ int (*set)(const char *key, uint64_t key_len, u8 *data,
+ uint64_t data_size);
+};
+
+#ifdef CONFIG_PPC_SECURE_BOOT
+
+extern void set_secvar_ops(const struct secvar_operations *ops);
+
+#else
+
+static inline void set_secvar_ops(const struct secvar_operations *ops) { }
+
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/serial.h b/arch/powerpc/include/asm/serial.h
new file mode 100644
index 000000000..cd6c18d0e
--- /dev/null
+++ b/arch/powerpc/include/asm/serial.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ */
+#ifndef _ASM_POWERPC_SERIAL_H
+#define _ASM_POWERPC_SERIAL_H
+
+/*
+ * Serial ports are not listed here, because they are discovered
+ * through the device tree.
+ */
+
+/* Default baud base if not found in device-tree */
+#define BASE_BAUD ( 1843200 / 16 )
+
+#ifdef CONFIG_PPC_UDBG_16550
+extern void find_legacy_serial_ports(void);
+#else
+#define find_legacy_serial_ports() do { } while (0)
+#endif
+
+#endif /* _PPC64_SERIAL_H */
diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
new file mode 100644
index 000000000..7ebc807aa
--- /dev/null
+++ b/arch/powerpc/include/asm/set_memory.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SET_MEMORY_H
+#define _ASM_POWERPC_SET_MEMORY_H
+
+#define SET_MEMORY_RO 0
+#define SET_MEMORY_RW 1
+#define SET_MEMORY_NX 2
+#define SET_MEMORY_X 3
+#define SET_MEMORY_NP 4 /* Set memory non present */
+#define SET_MEMORY_P 5 /* Set memory present */
+
+int change_memory_attr(unsigned long addr, int numpages, long action);
+
+static inline int set_memory_ro(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_RO);
+}
+
+static inline int set_memory_rw(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_RW);
+}
+
+static inline int set_memory_nx(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_NX);
+}
+
+static inline int set_memory_x(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_X);
+}
+
+static inline int set_memory_np(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_NP);
+}
+
+static inline int set_memory_p(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_P);
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/setjmp.h b/arch/powerpc/include/asm/setjmp.h
new file mode 100644
index 000000000..f798e80e4
--- /dev/null
+++ b/arch/powerpc/include/asm/setjmp.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright © 2008 Michael Neuling IBM Corporation
+ */
+#ifndef _ASM_POWERPC_SETJMP_H
+#define _ASM_POWERPC_SETJMP_H
+
+#define JMP_BUF_LEN 23
+
+typedef long jmp_buf[JMP_BUF_LEN];
+
+extern int setjmp(jmp_buf env) __attribute__((returns_twice));
+extern void longjmp(jmp_buf env, int val) __attribute__((noreturn));
+
+#endif /* _ASM_POWERPC_SETJMP_H */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
new file mode 100644
index 000000000..e29e83f8a
--- /dev/null
+++ b/arch/powerpc/include/asm/setup.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SETUP_H
+#define _ASM_POWERPC_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+#ifndef __ASSEMBLY__
+extern void ppc_printk_progress(char *s, unsigned short hex);
+
+extern unsigned long long memory_limit;
+extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
+
+struct device_node;
+
+/* Used in very early kernel initialization. */
+extern unsigned long reloc_offset(void);
+extern unsigned long add_reloc_offset(unsigned long);
+extern void reloc_got2(unsigned long);
+
+#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
+
+void check_for_initrd(void);
+void mem_topology_setup(void);
+void initmem_init(void);
+void setup_panic(void);
+#define ARCH_PANIC_TIMEOUT 180
+
+#ifdef CONFIG_PPC_PSERIES
+extern bool pseries_reloc_on_exception(void);
+extern bool pseries_enable_reloc_on_exc(void);
+extern void pseries_disable_reloc_on_exc(void);
+extern void pseries_big_endian_exceptions(void);
+void __init pseries_little_endian_exceptions(void);
+#else
+static inline bool pseries_reloc_on_exception(void) { return false; }
+static inline bool pseries_enable_reloc_on_exc(void) { return false; }
+static inline void pseries_disable_reloc_on_exc(void) {}
+static inline void pseries_big_endian_exceptions(void) {}
+static inline void pseries_little_endian_exceptions(void) {}
+#endif /* CONFIG_PPC_PSERIES */
+
+void rfi_flush_enable(bool enable);
+
+/* These are bit flags */
+enum l1d_flush_type {
+ L1D_FLUSH_NONE = 0x1,
+ L1D_FLUSH_FALLBACK = 0x2,
+ L1D_FLUSH_ORI = 0x4,
+ L1D_FLUSH_MTTRIG = 0x8,
+};
+
+void setup_rfi_flush(enum l1d_flush_type, bool enable);
+void setup_entry_flush(bool enable);
+void setup_uaccess_flush(bool enable);
+void do_rfi_flush_fixups(enum l1d_flush_type types);
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+void __init setup_barrier_nospec(void);
+#else
+static inline void setup_barrier_nospec(void) { }
+#endif
+void do_uaccess_flush_fixups(enum l1d_flush_type types);
+void do_entry_flush_fixups(enum l1d_flush_type types);
+void do_barrier_nospec_fixups(bool enable);
+extern bool barrier_nospec_enabled;
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
+#else
+static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }
+#endif
+
+#ifdef CONFIG_PPC_E500
+void __init setup_spectre_v2(void);
+#else
+static inline void setup_spectre_v2(void) {}
+#endif
+void __init do_btb_flush_fixups(void);
+
+#ifdef CONFIG_PPC32
+unsigned long __init early_init(unsigned long dt_ptr);
+void __init machine_init(u64 dt_ptr);
+#endif
+void __init early_setup(unsigned long dt_ptr);
+void early_setup_secondary(void);
+
+/* prom_init (OpenFirmware) */
+unsigned long __init prom_init(unsigned long r3, unsigned long r4,
+ unsigned long pp, unsigned long r6,
+ unsigned long r7, unsigned long kbase);
+
+extern struct seq_buf ppc_hw_desc;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_SETUP_H */
+
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
new file mode 100644
index 000000000..8b957aabb
--- /dev/null
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -0,0 +1,343 @@
+/* Machine-dependent software floating-point definitions. PPC version.
+ Copyright (C) 1997 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, write to the Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ Actually, this is a PPC (32bit) version, written based on the
+ i386, sparc, and sparc64 versions, by me,
+ Peter Maydell (pmaydell@chiark.greenend.org.uk).
+ Comments are by and large also mine, although they may be inaccurate.
+
+ In picking out asm fragments I've gone with the lowest common
+ denominator, which also happens to be the hardware I have :->
+ That is, a SPARC without hardware multiply and divide.
+ */
+
+/* basic word size definitions */
+#define _FP_W_TYPE_SIZE 32
+#define _FP_W_TYPE unsigned int
+#define _FP_WS_TYPE signed int
+#define _FP_I_TYPE int
+
+#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
+
+/* You can optionally code some things like addition in asm. For
+ * example, i386 defines __FP_FRAC_ADD_2 as asm. If you don't
+ * then you get a fragment of C code [if you change an #ifdef 0
+ * in op-2.h] or a call to add_ssaaaa (see below).
+ * Good places to look for asm fragments to use are gcc and glibc.
+ * gcc's longlong.h is useful.
+ */
+
+/* We need to know how to multiply and divide. If the host word size
+ * is >= 2*fracbits you can use FP_MUL_MEAT_n_imm(t,R,X,Y) which
+ * codes the multiply with whatever gcc does to 'a * b'.
+ * _FP_MUL_MEAT_n_wide(t,R,X,Y,f) is used when you have an asm
+ * function that can multiply two 1W values and get a 2W result.
+ * Otherwise you're stuck with _FP_MUL_MEAT_n_hard(t,R,X,Y) which
+ * does bitshifting to avoid overflow.
+ * For division there is FP_DIV_MEAT_n_imm(t,R,X,Y,f) for word size
+ * >= 2*fracbits, where f is either _FP_DIV_HELP_imm or
+ * _FP_DIV_HELP_ldiv (see op-1.h).
+ * _FP_DIV_MEAT_udiv() is if you have asm to do 2W/1W => (1W, 1W).
+ * [GCC and glibc have longlong.h which has the asm macro udiv_qrnnd
+ * to do this.]
+ * In general, 'n' is the number of words required to hold the type,
+ * and 't' is either S, D or Q for single/double/quad.
+ * -- PMM
+ */
+/* Example: SPARC64:
+ * #define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_imm(S,R,X,Y)
+ * #define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_1_wide(D,R,X,Y,umul_ppmm)
+ * #define _FP_MUL_MEAT_Q(R,X,Y) _FP_MUL_MEAT_2_wide(Q,R,X,Y,umul_ppmm)
+ *
+ * #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
+ * #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv(D,R,X,Y)
+ * #define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv_64(Q,R,X,Y)
+ *
+ * Example: i386:
+ * #define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(S,R,X,Y,_i386_mul_32_64)
+ * #define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(D,R,X,Y,_i386_mul_32_64)
+ *
+ * #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y,_i386_div_64_32)
+ * #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv_64(D,R,X,Y)
+ */
+
+#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y)
+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
+
+/* These macros define what NaN looks like. They're supposed to expand to
+ * a comma-separated set of 32bit unsigned ints that encode NaN.
+ */
+#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
+#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
+#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
+#define _FP_NANSIGN_S 0
+#define _FP_NANSIGN_D 0
+#define _FP_NANSIGN_Q 0
+
+#define _FP_KEEPNANFRACP 1
+
+#ifdef FP_EX_BOOKE_E500_SPE
+#define FP_EX_INEXACT (1 << 21)
+#define FP_EX_INVALID (1 << 20)
+#define FP_EX_DIVZERO (1 << 19)
+#define FP_EX_UNDERFLOW (1 << 18)
+#define FP_EX_OVERFLOW (1 << 17)
+#define FP_INHIBIT_RESULTS 0
+
+#define __FPU_FPSCR (current->thread.spefscr)
+#define __FPU_ENABLED_EXC \
+({ \
+ (__FPU_FPSCR >> 2) & 0x1f; \
+})
+#else
+/* Exception flags. We use the bit positions of the appropriate bits
+ in the FPSCR, which also correspond to the FE_* bits. This makes
+ everything easier ;-). */
+#define FP_EX_INVALID (1 << (31 - 2))
+#define FP_EX_INVALID_SNAN EFLAG_VXSNAN
+#define FP_EX_INVALID_ISI EFLAG_VXISI
+#define FP_EX_INVALID_IDI EFLAG_VXIDI
+#define FP_EX_INVALID_ZDZ EFLAG_VXZDZ
+#define FP_EX_INVALID_IMZ EFLAG_VXIMZ
+#define FP_EX_OVERFLOW (1 << (31 - 3))
+#define FP_EX_UNDERFLOW (1 << (31 - 4))
+#define FP_EX_DIVZERO (1 << (31 - 5))
+#define FP_EX_INEXACT (1 << (31 - 6))
+
+#define __FPU_FPSCR (current->thread.fp_state.fpscr)
+
+/* We only actually write to the destination register
+ * if exceptions signalled (if any) will not trap.
+ */
+#define __FPU_ENABLED_EXC \
+({ \
+ (__FPU_FPSCR >> 3) & 0x1f; \
+})
+
+#endif
+
+/*
+ * If one NaN is signaling and the other is not,
+ * we choose that one, otherwise we choose X.
+ */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
+ do { \
+ if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
+ && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
+ { \
+ R##_s = X##_s; \
+ _FP_FRAC_COPY_##wc(R,X); \
+ } \
+ else \
+ { \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R,Y); \
+ } \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#define __FPU_TRAP_P(bits) \
+ ((__FPU_ENABLED_EXC & (bits)) != 0)
+
+#define __FP_PACK_S(val,X) \
+({ int __exc = _FP_PACK_CANONICAL(S,1,X); \
+ if(!__exc || !__FPU_TRAP_P(__exc)) \
+ _FP_PACK_RAW_1_P(S,val,X); \
+ __exc; \
+})
+
+#define __FP_PACK_D(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(D, 2, X); \
+ if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) \
+ _FP_PACK_RAW_2_P(D, val, X); \
+ } while (0)
+
+#define __FP_PACK_DS(val,X) \
+ do { \
+ FP_DECL_S(__X); \
+ FP_CONV(S, D, 1, 2, __X, X); \
+ _FP_PACK_CANONICAL(S, 1, __X); \
+ if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) { \
+ _FP_UNPACK_CANONICAL(S, 1, __X); \
+ FP_CONV(D, S, 2, 1, X, __X); \
+ _FP_PACK_CANONICAL(D, 2, X); \
+ if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) \
+ _FP_PACK_RAW_2_P(D, val, X); \
+ } \
+ } while (0)
+
+/* Obtain the current rounding mode. */
+#define FP_ROUNDMODE \
+({ \
+ __FPU_FPSCR & 0x3; \
+})
+
+/* the asm fragments go here: all these are taken from glibc-2.0.5's
+ * stdlib/longlong.h
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+/* add_ssaaaa is used in op-2.h and should be equivalent to
+ * #define add_ssaaaa(sh,sl,ah,al,bh,bl) (sh = ah+bh+ (( sl = al+bl) < al))
+ * add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
+ * high_addend_2, low_addend_2) adds two UWtype integers, composed by
+ * HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
+ * respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
+ * (i.e. carry out) is not stored anywhere, and is lost.
+ */
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("add%I4c %1,%3,%4\n\taddze %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
+ __asm__ ("add%I4c %1,%3,%4\n\taddme %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else \
+ __asm__ ("add%I5c %1,%4,%5\n\tadde %0,%2,%3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
+ } while (0)
+
+/* sub_ddmmss is used in op-2.h and udivmodti4.c and should be equivalent to
+ * #define sub_ddmmss(sh, sl, ah, al, bh, bl) (sh = ah-bh - ((sl = al-bl) > al))
+ * sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
+ * high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
+ * composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
+ * LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE
+ * and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
+ * and is lost.
+ */
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (ah) && (ah) == 0) \
+ __asm__ ("subf%I3c %1,%4,%3\n\tsubfze %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0) \
+ __asm__ ("subf%I3c %1,%4,%3\n\tsubfme %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("subf%I3c %1,%4,%3\n\taddme %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
+ __asm__ ("subf%I3c %1,%4,%3\n\taddze %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+ else \
+ __asm__ ("subf%I4c %1,%5,%4\n\tsubfe %0,%3,%2" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
+ } while (0)
+
+/* asm fragments for mul and div */
+
+/* umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
+ * UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
+ * word product in HIGH_PROD and LOW_PROD.
+ */
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+
+/* udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ * denominator) divides a UDWtype, composed by the UWtype integers
+ * HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
+ * in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less
+ * than DENOMINATOR for correct operation. If, in addition, the most
+ * significant bit of DENOMINATOR must be 1, then the pre-processor symbol
+ * UDIV_NEEDS_NORMALIZATION is defined to 1.
+ */
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ UWtype __d1, __d0, __q1, __q0; \
+ UWtype __r1, __r0, __m; \
+ __d1 = __ll_highpart (d); \
+ __d0 = __ll_lowpart (d); \
+ \
+ __r1 = (n1) % __d1; \
+ __q1 = (n1) / __d1; \
+ __m = (UWtype) __q1 * __d0; \
+ __r1 = __r1 * __ll_B | __ll_highpart (n0); \
+ if (__r1 < __m) \
+ { \
+ __q1--, __r1 += (d); \
+ if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
+ if (__r1 < __m) \
+ __q1--, __r1 += (d); \
+ } \
+ __r1 -= __m; \
+ \
+ __r0 = __r1 % __d1; \
+ __q0 = __r1 / __d1; \
+ __m = (UWtype) __q0 * __d0; \
+ __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
+ if (__r0 < __m) \
+ { \
+ __q0--, __r0 += (d); \
+ if (__r0 >= (d)) \
+ if (__r0 < __m) \
+ __q0--, __r0 += (d); \
+ } \
+ __r0 -= __m; \
+ \
+ (q) = (UWtype) __q1 * __ll_B | __q0; \
+ (r) = __r0; \
+ } while (0)
+
+#define UDIV_NEEDS_NORMALIZATION 1
+
+#define abort() \
+ return 0
+
+#ifdef __BIG_ENDIAN
+#define __BYTE_ORDER __BIG_ENDIAN
+#else
+#define __BYTE_ORDER __LITTLE_ENDIAN
+#endif
+
+/* Exception flags. */
+#define EFLAG_INVALID (1 << (31 - 2))
+#define EFLAG_OVERFLOW (1 << (31 - 3))
+#define EFLAG_UNDERFLOW (1 << (31 - 4))
+#define EFLAG_DIVZERO (1 << (31 - 5))
+#define EFLAG_INEXACT (1 << (31 - 6))
+
+#define EFLAG_VXSNAN (1 << (31 - 7))
+#define EFLAG_VXISI (1 << (31 - 8))
+#define EFLAG_VXIDI (1 << (31 - 9))
+#define EFLAG_VXZDZ (1 << (31 - 10))
+#define EFLAG_VXIMZ (1 << (31 - 11))
+#define EFLAG_VXVC (1 << (31 - 12))
+#define EFLAG_VXSOFT (1 << (31 - 21))
+#define EFLAG_VXSQRT (1 << (31 - 22))
+#define EFLAG_VXCVI (1 << (31 - 23))
diff --git a/arch/powerpc/include/asm/shmparam.h b/arch/powerpc/include/asm/shmparam.h
new file mode 100644
index 000000000..bc0968839
--- /dev/null
+++ b/arch/powerpc/include/asm/shmparam.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SHMPARAM_H
+#define _ASM_POWERPC_SHMPARAM_H
+
+#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
+
+#endif /* _ASM_POWERPC_SHMPARAM_H */
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
new file mode 100644
index 000000000..922d43700
--- /dev/null
+++ b/arch/powerpc/include/asm/signal.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SIGNAL_H
+#define _ASM_POWERPC_SIGNAL_H
+
+#define __ARCH_HAS_SA_RESTORER
+#include <uapi/asm/signal.h>
+#include <uapi/asm/ptrace.h>
+
+struct pt_regs;
+void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
+
+unsigned long get_min_sigframe_size_32(void);
+unsigned long get_min_sigframe_size_64(void);
+unsigned long get_min_sigframe_size(void);
+unsigned long get_min_sigframe_size_compat(void);
+
+#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
new file mode 100644
index 000000000..9dcc7e999
--- /dev/null
+++ b/arch/powerpc/include/asm/simple_spinlock.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_H
+#define _ASM_POWERPC_SIMPLE_SPINLOCK_H
+
+/*
+ * Simple spin lock operations.
+ *
+ * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
+ * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
+ * Rework to support virtual processors
+ *
+ * Type of int is used as a full 64b word is not necessary.
+ *
+ * (the type definitions are in asm/simple_spinlock_types.h)
+ */
+#include <linux/irqflags.h>
+#include <asm/paravirt.h>
+#include <asm/paca.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+
+#ifdef CONFIG_PPC64
+/* use 0x800000yy when locked, where yy == CPU number */
+#ifdef __BIG_ENDIAN__
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
+#else
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
+#endif
+#else
+#define LOCK_TOKEN 1
+#endif
+
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.slock == 0;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(READ_ONCE(*lock));
+}
+
+/*
+ * This returns the old value in the lock, so we succeeded
+ * in getting the lock if the return value is 0.
+ */
+static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned long tmp, token;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
+
+ token = LOCK_TOKEN;
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2,%[eh]\n\
+ cmpwi 0,%0,0\n\
+ bne- 2f\n\
+ stwcx. %1,0,%2\n\
+ bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+"2:"
+ : "=&r" (tmp)
+ : "r" (token), "r" (&lock->slock), [eh] "n" (eh)
+ : "cr0", "memory");
+
+ return tmp;
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ return __arch_spin_trylock(lock) == 0;
+}
+
+/*
+ * On a system with shared processors (that is, where a physical
+ * processor is multiplexed between several virtual processors),
+ * there is no point spinning on a lock if the holder of the lock
+ * isn't currently scheduled on a physical processor. Instead
+ * we detect this situation and ask the hypervisor to give the
+ * rest of our timeslice to the lock holder.
+ *
+ * So that we can tell which virtual processor is holding a lock,
+ * we put 0x80000000 | smp_processor_id() in the lock when it is
+ * held. Conveniently, we have a word in the paca that holds this
+ * value.
+ */
+
+#if defined(CONFIG_PPC_SPLPAR)
+/* We only yield to the hypervisor if we are in shared processor mode */
+void splpar_spin_yield(arch_spinlock_t *lock);
+void splpar_rw_yield(arch_rwlock_t *lock);
+#else /* SPLPAR */
+static inline void splpar_spin_yield(arch_spinlock_t *lock) {}
+static inline void splpar_rw_yield(arch_rwlock_t *lock) {}
+#endif
+
+static inline void spin_yield(arch_spinlock_t *lock)
+{
+ if (is_shared_processor())
+ splpar_spin_yield(lock);
+ else
+ barrier();
+}
+
+static inline void rw_yield(arch_rwlock_t *lock)
+{
+ if (is_shared_processor())
+ splpar_rw_yield(lock);
+ else
+ barrier();
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ while (1) {
+ if (likely(__arch_spin_trylock(lock) == 0))
+ break;
+ do {
+ HMT_low();
+ if (is_shared_processor())
+ splpar_spin_yield(lock);
+ } while (unlikely(lock->slock != 0));
+ HMT_medium();
+ }
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ __asm__ __volatile__("# arch_spin_unlock\n\t"
+ PPC_RELEASE_BARRIER: : :"memory");
+ lock->slock = 0;
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+#ifdef CONFIG_PPC64
+#define __DO_SIGN_EXTEND "extsw %0,%0\n"
+#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
+#else
+#define __DO_SIGN_EXTEND
+#define WRLOCK_TOKEN (-1)
+#endif
+
+/*
+ * This returns the old value in the lock + 1,
+ * so we got a read lock if the return value is > 0.
+ */
+static inline long __arch_read_trylock(arch_rwlock_t *rw)
+{
+ long tmp;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1,%[eh]\n"
+ __DO_SIGN_EXTEND
+" addic. %0,%0,1\n\
+ ble- 2f\n"
+" stwcx. %0,0,%1\n\
+ bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+"2:" : "=&r" (tmp)
+ : "r" (&rw->lock), [eh] "n" (eh)
+ : "cr0", "xer", "memory");
+
+ return tmp;
+}
+
+/*
+ * This returns the old value in the lock,
+ * so we got the write lock if the return value is 0.
+ */
+static inline long __arch_write_trylock(arch_rwlock_t *rw)
+{
+ long tmp, token;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
+
+ token = WRLOCK_TOKEN;
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2,%[eh]\n\
+ cmpwi 0,%0,0\n\
+ bne- 2f\n"
+" stwcx. %1,0,%2\n\
+ bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+"2:" : "=&r" (tmp)
+ : "r" (token), "r" (&rw->lock), [eh] "n" (eh)
+ : "cr0", "memory");
+
+ return tmp;
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ while (1) {
+ if (likely(__arch_read_trylock(rw) > 0))
+ break;
+ do {
+ HMT_low();
+ if (is_shared_processor())
+ splpar_rw_yield(rw);
+ } while (unlikely(rw->lock < 0));
+ HMT_medium();
+ }
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ while (1) {
+ if (likely(__arch_write_trylock(rw) == 0))
+ break;
+ do {
+ HMT_low();
+ if (is_shared_processor())
+ splpar_rw_yield(rw);
+ } while (unlikely(rw->lock != 0));
+ HMT_medium();
+ }
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ return __arch_read_trylock(rw) > 0;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ return __arch_write_trylock(rw) == 0;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ long tmp;
+
+ __asm__ __volatile__(
+ "# read_unlock\n\t"
+ PPC_RELEASE_BARRIER
+"1: lwarx %0,0,%1\n\
+ addic %0,%0,-1\n"
+" stwcx. %0,0,%1\n\
+ bne- 1b"
+ : "=&r"(tmp)
+ : "r"(&rw->lock)
+ : "cr0", "xer", "memory");
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ __asm__ __volatile__("# write_unlock\n\t"
+ PPC_RELEASE_BARRIER: : :"memory");
+ rw->lock = 0;
+}
+
+#define arch_spin_relax(lock) spin_yield(lock)
+#define arch_read_relax(lock) rw_yield(lock)
+#define arch_write_relax(lock) rw_yield(lock)
+
+#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/simple_spinlock_types.h b/arch/powerpc/include/asm/simple_spinlock_types.h
new file mode 100644
index 000000000..082433380
--- /dev/null
+++ b/arch/powerpc/include/asm/simple_spinlock_types.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H
+#define _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile signed int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+
+#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
new file mode 100644
index 000000000..6c6cb53d7
--- /dev/null
+++ b/arch/powerpc/include/asm/smp.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * smp.h: PowerPC-specific SMP code.
+ *
+ * Original was a copy of sparc smp.h. Now heavily modified
+ * for PPC.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com>
+ */
+
+#ifndef _ASM_POWERPC_SMP_H
+#define _ASM_POWERPC_SMP_H
+#ifdef __KERNEL__
+
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/kernel.h>
+#include <linux/irqreturn.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#endif
+#include <asm/percpu.h>
+
+extern int boot_cpuid;
+extern int boot_cpu_hwid; /* PPC64 only */
+extern int spinning_secondaries;
+extern u32 *cpu_to_phys_id;
+extern bool coregroup_enabled;
+
+extern int cpu_to_chip_id(int cpu);
+extern int *chip_id_lookup_table;
+
+DECLARE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
+DECLARE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
+DECLARE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
+
+#ifdef CONFIG_SMP
+
+struct smp_ops_t {
+ void (*message_pass)(int cpu, int msg);
+#ifdef CONFIG_PPC_SMP_MUXED_IPI
+ void (*cause_ipi)(int cpu);
+#endif
+ int (*cause_nmi_ipi)(int cpu);
+ void (*probe)(void);
+ int (*kick_cpu)(int nr);
+ int (*prepare_cpu)(int nr);
+ void (*setup_cpu)(int nr);
+ void (*bringup_done)(void);
+ void (*take_timebase)(void);
+ void (*give_timebase)(void);
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int nr);
+ int (*cpu_bootable)(unsigned int nr);
+#ifdef CONFIG_HOTPLUG_CPU
+ void (*cpu_offline_self)(void);
+#endif
+};
+
+extern struct task_struct *secondary_current;
+
+void start_secondary(void *unused);
+extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
+extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
+extern void smp_send_debugger_break(void);
+extern void start_secondary_resume(void);
+extern void smp_generic_give_timebase(void);
+extern void smp_generic_take_timebase(void);
+
+DECLARE_PER_CPU(unsigned int, cpu_pvr);
+
+#ifdef CONFIG_HOTPLUG_CPU
+int generic_cpu_disable(void);
+void generic_cpu_die(unsigned int cpu);
+void generic_set_cpu_dead(unsigned int cpu);
+void generic_set_cpu_up(unsigned int cpu);
+int generic_check_cpu_restart(unsigned int cpu);
+int is_cpu_dead(unsigned int cpu);
+#else
+#define generic_set_cpu_up(i) do { } while (0)
+#endif
+
+#ifdef CONFIG_PPC64
+#define raw_smp_processor_id() (local_paca->paca_index)
+#define hard_smp_processor_id() (get_paca()->hw_cpu_id)
+#else
+/* 32-bit */
+extern int smp_hw_index[];
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
+
+static inline int get_hard_smp_processor_id(int cpu)
+{
+ return smp_hw_index[cpu];
+}
+
+static inline void set_hard_smp_processor_id(int cpu, int phys)
+{
+ smp_hw_index[cpu] = phys;
+}
+#endif
+
+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
+
+static inline struct cpumask *cpu_sibling_mask(int cpu)
+{
+ return per_cpu(cpu_sibling_map, cpu);
+}
+
+static inline struct cpumask *cpu_core_mask(int cpu)
+{
+ return per_cpu(cpu_core_map, cpu);
+}
+
+static inline struct cpumask *cpu_l2_cache_mask(int cpu)
+{
+ return per_cpu(cpu_l2_cache_map, cpu);
+}
+
+static inline struct cpumask *cpu_smallcore_mask(int cpu)
+{
+ return per_cpu(cpu_smallcore_map, cpu);
+}
+
+extern int cpu_to_core_id(int cpu);
+
+extern bool has_big_cores;
+extern bool thread_group_shares_l2;
+extern bool thread_group_shares_l3;
+
+#define cpu_smt_mask cpu_smt_mask
+#ifdef CONFIG_SCHED_SMT
+static inline const struct cpumask *cpu_smt_mask(int cpu)
+{
+ if (has_big_cores)
+ return per_cpu(cpu_smallcore_map, cpu);
+
+ return per_cpu(cpu_sibling_map, cpu);
+}
+#endif /* CONFIG_SCHED_SMT */
+
+/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
+ *
+ * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
+ * in /proc/interrupts will be wrong!!! --Troy */
+#define PPC_MSG_CALL_FUNCTION 0
+#define PPC_MSG_RESCHEDULE 1
+#define PPC_MSG_TICK_BROADCAST 2
+#define PPC_MSG_NMI_IPI 3
+
+/* This is only used by the powernv kernel */
+#define PPC_MSG_RM_HOST_ACTION 4
+
+#define NMI_IPI_ALL_OTHERS -2
+
+#ifdef CONFIG_NMI_IPI
+extern int smp_handle_nmi_ipi(struct pt_regs *regs);
+#else
+static inline int smp_handle_nmi_ipi(struct pt_regs *regs) { return 0; }
+#endif
+
+/* for irq controllers that have dedicated ipis per message (4) */
+extern int smp_request_message_ipi(int virq, int message);
+extern const char *smp_ipi_name[];
+
+/* for irq controllers with only a single ipi */
+extern void smp_muxed_ipi_message_pass(int cpu, int msg);
+extern void smp_muxed_ipi_set_message(int cpu, int msg);
+extern irqreturn_t smp_ipi_demux(void);
+extern irqreturn_t smp_ipi_demux_relaxed(void);
+
+void smp_init_pSeries(void);
+void smp_init_cell(void);
+void smp_setup_cpu_maps(void);
+
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+
+#else
+/* for UP */
+#define hard_smp_processor_id() get_hard_smp_processor_id(0)
+#define smp_setup_cpu_maps()
+#define thread_group_shares_l2 0
+#define thread_group_shares_l3 0
+static inline const struct cpumask *cpu_sibling_mask(int cpu)
+{
+ return cpumask_of(cpu);
+}
+
+static inline const struct cpumask *cpu_smallcore_mask(int cpu)
+{
+ return cpumask_of(cpu);
+}
+
+static inline const struct cpumask *cpu_l2_cache_mask(int cpu)
+{
+ return cpumask_of(cpu);
+}
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_PPC64
+static inline int get_hard_smp_processor_id(int cpu)
+{
+ return paca_ptrs[cpu]->hw_cpu_id;
+}
+
+static inline void set_hard_smp_processor_id(int cpu, int phys)
+{
+ paca_ptrs[cpu]->hw_cpu_id = phys;
+}
+#else
+/* 32-bit */
+#ifndef CONFIG_SMP
+extern int boot_cpuid_phys;
+static inline int get_hard_smp_processor_id(int cpu)
+{
+ return boot_cpuid_phys;
+}
+
+static inline void set_hard_smp_processor_id(int cpu, int phys)
+{
+ boot_cpuid_phys = phys;
+}
+#endif /* !CONFIG_SMP */
+#endif /* !CONFIG_PPC64 */
+
+#if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE))
+extern void smp_release_cpus(void);
+#else
+static inline void smp_release_cpus(void) { }
+#endif
+
+extern int smt_enabled_at_boot;
+
+extern void smp_mpic_probe(void);
+extern void smp_mpic_setup_cpu(int cpu);
+extern int smp_generic_kick_cpu(int nr);
+extern int smp_generic_cpu_bootable(unsigned int nr);
+
+
+extern void smp_generic_give_timebase(void);
+extern void smp_generic_take_timebase(void);
+
+extern struct smp_ops_t *smp_ops;
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+/* Definitions relative to the secondary CPU spin loop
+ * and entry point. Not all of them exist on both 32 and
+ * 64-bit but defining them all here doesn't harm
+ */
+extern void generic_secondary_smp_init(void);
+extern unsigned long __secondary_hold_spinloop;
+extern unsigned long __secondary_hold_acknowledge;
+extern char __secondary_hold;
+extern unsigned int booting_thread_hwid;
+
+extern void __early_start(void);
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SMP_H) */
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
new file mode 100644
index 000000000..2ac6ab903
--- /dev/null
+++ b/arch/powerpc/include/asm/smu.h
@@ -0,0 +1,694 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SMU_H
+#define _SMU_H
+
+/*
+ * Definitions for talking to the SMU chip in newer G5 PowerMacs
+ */
+#ifdef __KERNEL__
+#include <linux/list.h>
+#endif
+#include <linux/types.h>
+
+/*
+ * Known SMU commands
+ *
+ * Most of what is below comes from looking at the Open Firmware driver,
+ * though this is still incomplete and could use better documentation here
+ * or there...
+ */
+
+
+/*
+ * Partition info commands
+ *
+ * These commands are used to retrieve the sdb-partition-XX datas from
+ * the SMU. The length is always 2. First byte is the subcommand code
+ * and second byte is the partition ID.
+ *
+ * The reply is 6 bytes:
+ *
+ * - 0..1 : partition address
+ * - 2 : a byte containing the partition ID
+ * - 3 : length (maybe other bits are rest of header ?)
+ *
+ * The data must then be obtained with calls to another command:
+ * SMU_CMD_MISC_ee_GET_DATABLOCK_REC (described below).
+ */
+#define SMU_CMD_PARTITION_COMMAND 0x3e
+#define SMU_CMD_PARTITION_LATEST 0x01
+#define SMU_CMD_PARTITION_BASE 0x02
+#define SMU_CMD_PARTITION_UPDATE 0x03
+
+
+/*
+ * Fan control
+ *
+ * This is a "mux" for fan control commands. The command seem to
+ * act differently based on the number of arguments. With 1 byte
+ * of argument, this seem to be queries for fans status, setpoint,
+ * etc..., while with 0xe arguments, we will set the fans speeds.
+ *
+ * Queries (1 byte arg):
+ * ---------------------
+ *
+ * arg=0x01: read RPM fans status
+ * arg=0x02: read RPM fans setpoint
+ * arg=0x11: read PWM fans status
+ * arg=0x12: read PWM fans setpoint
+ *
+ * the "status" queries return the current speed while the "setpoint" ones
+ * return the programmed/target speed. It _seems_ that the result is a bit
+ * mask in the first byte of active/available fans, followed by 6 words (16
+ * bits) containing the requested speed.
+ *
+ * Setpoint (14 bytes arg):
+ * ------------------------
+ *
+ * first arg byte is 0 for RPM fans and 0x10 for PWM. Second arg byte is the
+ * mask of fans affected by the command. Followed by 6 words containing the
+ * setpoint value for selected fans in the mask (or 0 if mask value is 0)
+ */
+#define SMU_CMD_FAN_COMMAND 0x4a
+
+
+/*
+ * Battery access
+ *
+ * Same command number as the PMU, could it be same syntax ?
+ */
+#define SMU_CMD_BATTERY_COMMAND 0x6f
+#define SMU_CMD_GET_BATTERY_INFO 0x00
+
+/*
+ * Real time clock control
+ *
+ * This is a "mux", first data byte contains the "sub" command.
+ * The "RTC" part of the SMU controls the date, time, powerup
+ * timer, but also a PRAM
+ *
+ * Dates are in BCD format on 7 bytes:
+ * [sec] [min] [hour] [weekday] [month day] [month] [year]
+ * with month being 1 based and year minus 100
+ */
+#define SMU_CMD_RTC_COMMAND 0x8e
+#define SMU_CMD_RTC_SET_PWRUP_TIMER 0x00 /* i: 7 bytes date */
+#define SMU_CMD_RTC_GET_PWRUP_TIMER 0x01 /* o: 7 bytes date */
+#define SMU_CMD_RTC_STOP_PWRUP_TIMER 0x02
+#define SMU_CMD_RTC_SET_PRAM_BYTE_ACC 0x20 /* i: 1 byte (address?) */
+#define SMU_CMD_RTC_SET_PRAM_AUTOINC 0x21 /* i: 1 byte (data?) */
+#define SMU_CMD_RTC_SET_PRAM_LO_BYTES 0x22 /* i: 10 bytes */
+#define SMU_CMD_RTC_SET_PRAM_HI_BYTES 0x23 /* i: 10 bytes */
+#define SMU_CMD_RTC_GET_PRAM_BYTE 0x28 /* i: 1 bytes (address?) */
+#define SMU_CMD_RTC_GET_PRAM_LO_BYTES 0x29 /* o: 10 bytes */
+#define SMU_CMD_RTC_GET_PRAM_HI_BYTES 0x2a /* o: 10 bytes */
+#define SMU_CMD_RTC_SET_DATETIME 0x80 /* i: 7 bytes date */
+#define SMU_CMD_RTC_GET_DATETIME 0x81 /* o: 7 bytes date */
+
+ /*
+ * i2c commands
+ *
+ * To issue an i2c command, first is to send a parameter block to
+ * the SMU. This is a command of type 0x9a with 9 bytes of header
+ * eventually followed by data for a write:
+ *
+ * 0: bus number (from device-tree usually, SMU has lots of busses !)
+ * 1: transfer type/format (see below)
+ * 2: device address. For combined and combined4 type transfers, this
+ * is the "write" version of the address (bit 0x01 cleared)
+ * 3: subaddress length (0..3)
+ * 4: subaddress byte 0 (or only byte for subaddress length 1)
+ * 5: subaddress byte 1
+ * 6: subaddress byte 2
+ * 7: combined address (device address for combined mode data phase)
+ * 8: data length
+ *
+ * The transfer types are the same good old Apple ones it seems,
+ * that is:
+ * - 0x00: Simple transfer
+ * - 0x01: Subaddress transfer (addr write + data tx, no restart)
+ * - 0x02: Combined transfer (addr write + restart + data tx)
+ *
+ * This is then followed by actual data for a write.
+ *
+ * At this point, the OF driver seems to have a limitation on transfer
+ * sizes of 0xd bytes on reads and 0x5 bytes on writes. I do not know
+ * whether this is just an OF limit due to some temporary buffer size
+ * or if this is an SMU imposed limit. This driver has the same limitation
+ * for now as I use a 0x10 bytes temporary buffer as well
+ *
+ * Once that is completed, a response is expected from the SMU. This is
+ * obtained via a command of type 0x9a with a length of 1 byte containing
+ * 0 as the data byte. OF also fills the rest of the data buffer with 0xff's
+ * though I can't tell yet if this is actually necessary. Once this command
+ * is complete, at this point, all I can tell is what OF does. OF tests
+ * byte 0 of the reply:
+ * - on read, 0xfe or 0xfc : bus is busy, wait (see below) or nak ?
+ * - on read, 0x00 or 0x01 : reply is in buffer (after the byte 0)
+ * - on write, < 0 -> failure (immediate exit)
+ * - else, OF just exists (without error, weird)
+ *
+ * So on read, there is this wait-for-busy thing when getting a 0xfc or
+ * 0xfe result. OF does a loop of up to 64 retries, waiting 20ms and
+ * doing the above again until either the retries expire or the result
+ * is no longer 0xfe or 0xfc
+ *
+ * The Darwin I2C driver is less subtle though. On any non-success status
+ * from the response command, it waits 5ms and tries again up to 20 times,
+ * it doesn't differentiate between fatal errors or "busy" status.
+ *
+ * This driver provides an asynchronous paramblock based i2c command
+ * interface to be used either directly by low level code or by a higher
+ * level driver interfacing to the linux i2c layer. The current
+ * implementation of this relies on working timers & timer interrupts
+ * though, so be careful of calling context for now. This may be "fixed"
+ * in the future by adding a polling facility.
+ */
+#define SMU_CMD_I2C_COMMAND 0x9a
+ /* transfer types */
+#define SMU_I2C_TRANSFER_SIMPLE 0x00
+#define SMU_I2C_TRANSFER_STDSUB 0x01
+#define SMU_I2C_TRANSFER_COMBINED 0x02
+
+/*
+ * Power supply control
+ *
+ * The "sub" command is an ASCII string in the data, the
+ * data length is that of the string.
+ *
+ * The VSLEW command can be used to get or set the voltage slewing.
+ * - length 5 (only "VSLEW") : it returns "DONE" and 3 bytes of
+ * reply at data offset 6, 7 and 8.
+ * - length 8 ("VSLEWxyz") has 3 additional bytes appended, and is
+ * used to set the voltage slewing point. The SMU replies with "DONE"
+ * I yet have to figure out their exact meaning of those 3 bytes in
+ * both cases. They seem to be:
+ * x = processor mask
+ * y = op. point index
+ * z = processor freq. step index
+ * I haven't yet deciphered result codes
+ *
+ */
+#define SMU_CMD_POWER_COMMAND 0xaa
+#define SMU_CMD_POWER_RESTART "RESTART"
+#define SMU_CMD_POWER_SHUTDOWN "SHUTDOWN"
+#define SMU_CMD_POWER_VOLTAGE_SLEW "VSLEW"
+
+/*
+ * Read ADC sensors
+ *
+ * This command takes one byte of parameter: the sensor ID (or "reg"
+ * value in the device-tree) and returns a 16 bits value
+ */
+#define SMU_CMD_READ_ADC 0xd8
+
+
+/* Misc commands
+ *
+ * This command seem to be a grab bag of various things
+ *
+ * Parameters:
+ * 1: subcommand
+ */
+#define SMU_CMD_MISC_df_COMMAND 0xdf
+
+/*
+ * Sets "system ready" status
+ *
+ * I did not yet understand how it exactly works or what it does.
+ *
+ * Guessing from OF code, 0x02 activates the display backlight. Apple uses/used
+ * the same codebase for all OF versions. On PowerBooks, this command would
+ * enable the backlight. For the G5s, it only activates the front LED. However,
+ * don't take this for granted.
+ *
+ * Parameters:
+ * 2: status [0x00, 0x01 or 0x02]
+ */
+#define SMU_CMD_MISC_df_SET_DISPLAY_LIT 0x02
+
+/*
+ * Sets mode of power switch.
+ *
+ * What this actually does is not yet known. Maybe it enables some interrupt.
+ *
+ * Parameters:
+ * 2: enable power switch? [0x00 or 0x01]
+ * 3 (optional): enable nmi? [0x00 or 0x01]
+ *
+ * Returns:
+ * If parameter 2 is 0x00 and parameter 3 is not specified, returns whether
+ * NMI is enabled. Otherwise unknown.
+ */
+#define SMU_CMD_MISC_df_NMI_OPTION 0x04
+
+/* Sets LED dimm offset.
+ *
+ * The front LED dimms itself during sleep. Its brightness (or, well, the PWM
+ * frequency) depends on current time. Therefore, the SMU needs to know the
+ * timezone.
+ *
+ * Parameters:
+ * 2-8: unknown (BCD coding)
+ */
+#define SMU_CMD_MISC_df_DIMM_OFFSET 0x99
+
+
+/*
+ * Version info commands
+ *
+ * Parameters:
+ * 1 (optional): Specifies version part to retrieve
+ *
+ * Returns:
+ * Version value
+ */
+#define SMU_CMD_VERSION_COMMAND 0xea
+#define SMU_VERSION_RUNNING 0x00
+#define SMU_VERSION_BASE 0x01
+#define SMU_VERSION_UPDATE 0x02
+
+
+/*
+ * Switches
+ *
+ * These are switches whose status seems to be known to the SMU.
+ *
+ * Parameters:
+ * none
+ *
+ * Result:
+ * Switch bits (ORed, see below)
+ */
+#define SMU_CMD_SWITCHES 0xdc
+
+/* Switches bits */
+#define SMU_SWITCH_CASE_CLOSED 0x01
+#define SMU_SWITCH_AC_POWER 0x04
+#define SMU_SWITCH_POWER_SWITCH 0x08
+
+
+/*
+ * Misc commands
+ *
+ * This command seem to be a grab bag of various things
+ *
+ * SMU_CMD_MISC_ee_GET_DATABLOCK_REC is used, among others, to
+ * transfer blocks of data from the SMU. So far, I've decrypted it's
+ * usage to retrieve partition data. In order to do that, you have to
+ * break your transfer in "chunks" since that command cannot transfer
+ * more than a chunk at a time. The chunk size used by OF is 0xe bytes,
+ * but it seems that the darwin driver will let you do 0x1e bytes if
+ * your "PMU" version is >= 0x30. You can get the "PMU" version apparently
+ * either in the last 16 bits of property "smu-version-pmu" or as the 16
+ * bytes at offset 1 of "smu-version-info"
+ *
+ * For each chunk, the command takes 7 bytes of arguments:
+ * byte 0: subcommand code (0x02)
+ * byte 1: 0x04 (always, I don't know what it means, maybe the address
+ * space to use or some other nicety. It's hard coded in OF)
+ * byte 2..5: SMU address of the chunk (big endian 32 bits)
+ * byte 6: size to transfer (up to max chunk size)
+ *
+ * The data is returned directly
+ */
+#define SMU_CMD_MISC_ee_COMMAND 0xee
+#define SMU_CMD_MISC_ee_GET_DATABLOCK_REC 0x02
+
+/* Retrieves currently used watts.
+ *
+ * Parameters:
+ * 1: 0x03 (Meaning unknown)
+ */
+#define SMU_CMD_MISC_ee_GET_WATTS 0x03
+
+#define SMU_CMD_MISC_ee_LEDS_CTRL 0x04 /* i: 00 (00,01) [00] */
+#define SMU_CMD_MISC_ee_GET_DATA 0x05 /* i: 00 , o: ?? */
+
+
+/*
+ * Power related commands
+ *
+ * Parameters:
+ * 1: subcommand
+ */
+#define SMU_CMD_POWER_EVENTS_COMMAND 0x8f
+
+/* SMU_POWER_EVENTS subcommands */
+enum {
+ SMU_PWR_GET_POWERUP_EVENTS = 0x00,
+ SMU_PWR_SET_POWERUP_EVENTS = 0x01,
+ SMU_PWR_CLR_POWERUP_EVENTS = 0x02,
+ SMU_PWR_GET_WAKEUP_EVENTS = 0x03,
+ SMU_PWR_SET_WAKEUP_EVENTS = 0x04,
+ SMU_PWR_CLR_WAKEUP_EVENTS = 0x05,
+
+ /*
+ * Get last shutdown cause
+ *
+ * Returns:
+ * 1 byte (signed char): Last shutdown cause. Exact meaning unknown.
+ */
+ SMU_PWR_LAST_SHUTDOWN_CAUSE = 0x07,
+
+ /*
+ * Sets or gets server ID. Meaning or use is unknown.
+ *
+ * Parameters:
+ * 2 (optional): Set server ID (1 byte)
+ *
+ * Returns:
+ * 1 byte (server ID?)
+ */
+ SMU_PWR_SERVER_ID = 0x08,
+};
+
+/* Power events wakeup bits */
+enum {
+ SMU_PWR_WAKEUP_KEY = 0x01, /* Wake on key press */
+ SMU_PWR_WAKEUP_AC_INSERT = 0x02, /* Wake on AC adapter plug */
+ SMU_PWR_WAKEUP_AC_CHANGE = 0x04,
+ SMU_PWR_WAKEUP_LID_OPEN = 0x08,
+ SMU_PWR_WAKEUP_RING = 0x10,
+};
+
+
+/*
+ * - Kernel side interface -
+ */
+
+#ifdef __KERNEL__
+
+/*
+ * Asynchronous SMU commands
+ *
+ * Fill up this structure and submit it via smu_queue_command(),
+ * and get notified by the optional done() callback, or because
+ * status becomes != 1
+ */
+
+struct smu_cmd;
+
+struct smu_cmd
+{
+ /* public */
+ u8 cmd; /* command */
+ int data_len; /* data len */
+ int reply_len; /* reply len */
+ void *data_buf; /* data buffer */
+ void *reply_buf; /* reply buffer */
+ int status; /* command status */
+ void (*done)(struct smu_cmd *cmd, void *misc);
+ void *misc;
+
+ /* private */
+ struct list_head link;
+};
+
+/*
+ * Queues an SMU command, all fields have to be initialized
+ */
+extern int smu_queue_cmd(struct smu_cmd *cmd);
+
+/*
+ * Simple command wrapper. This structure embeds a small buffer
+ * to ease sending simple SMU commands from the stack
+ */
+struct smu_simple_cmd
+{
+ struct smu_cmd cmd;
+ u8 buffer[16];
+};
+
+/*
+ * Queues a simple command. All fields will be initialized by that
+ * function
+ */
+extern int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command,
+ unsigned int data_len,
+ void (*done)(struct smu_cmd *cmd, void *misc),
+ void *misc,
+ ...);
+
+/*
+ * Completion helper. Pass it to smu_queue_simple or as 'done'
+ * member to smu_queue_cmd, it will call complete() on the struct
+ * completion passed in the "misc" argument
+ */
+extern void smu_done_complete(struct smu_cmd *cmd, void *misc);
+
+/*
+ * Synchronous helpers. Will spin-wait for completion of a command
+ */
+extern void smu_spinwait_cmd(struct smu_cmd *cmd);
+
+static inline void smu_spinwait_simple(struct smu_simple_cmd *scmd)
+{
+ smu_spinwait_cmd(&scmd->cmd);
+}
+
+/*
+ * Poll routine to call if blocked with irqs off
+ */
+extern void smu_poll(void);
+
+
+/*
+ * Init routine, presence check....
+ */
+int __init smu_init(void);
+extern int smu_present(void);
+struct platform_device;
+extern struct platform_device *smu_get_ofdev(void);
+
+
+/*
+ * Common command wrappers
+ */
+extern void smu_shutdown(void);
+extern void smu_restart(void);
+struct rtc_time;
+extern int smu_get_rtc_time(struct rtc_time *time, int spinwait);
+extern int smu_set_rtc_time(struct rtc_time *time, int spinwait);
+
+/*
+ * Kernel asynchronous i2c interface
+ */
+
+#define SMU_I2C_READ_MAX 0x1d
+#define SMU_I2C_WRITE_MAX 0x15
+
+/* SMU i2c header, exactly matches i2c header on wire */
+struct smu_i2c_param
+{
+ u8 bus; /* SMU bus ID (from device tree) */
+ u8 type; /* i2c transfer type */
+ u8 devaddr; /* device address (includes direction) */
+ u8 sublen; /* subaddress length */
+ u8 subaddr[3]; /* subaddress */
+ u8 caddr; /* combined address, filled by SMU driver */
+ u8 datalen; /* length of transfer */
+ u8 data[SMU_I2C_READ_MAX]; /* data */
+};
+
+struct smu_i2c_cmd
+{
+ /* public */
+ struct smu_i2c_param info;
+ void (*done)(struct smu_i2c_cmd *cmd, void *misc);
+ void *misc;
+ int status; /* 1 = pending, 0 = ok, <0 = fail */
+
+ /* private */
+ struct smu_cmd scmd;
+ int read;
+ int stage;
+ int retries;
+ u8 pdata[32];
+ struct list_head link;
+};
+
+/*
+ * Call this to queue an i2c command to the SMU. You must fill info,
+ * including info.data for a write, done and misc.
+ * For now, no polling interface is provided so you have to use completion
+ * callback.
+ */
+extern int smu_queue_i2c(struct smu_i2c_cmd *cmd);
+
+
+#endif /* __KERNEL__ */
+
+
+/*
+ * - SMU "sdb" partitions informations -
+ */
+
+
+/*
+ * Partition header format
+ */
+struct smu_sdbp_header {
+ __u8 id;
+ __u8 len;
+ __u8 version;
+ __u8 flags;
+};
+
+
+ /*
+ * demangle 16 and 32 bits integer in some SMU partitions
+ * (currently, afaik, this concerns only the FVT partition
+ * (0x12)
+ */
+#define SMU_U16_MIX(x) le16_to_cpu(x)
+#define SMU_U32_MIX(x) ((((x) & 0xff00ff00u) >> 8)|(((x) & 0x00ff00ffu) << 8))
+
+
+/* This is the definition of the SMU sdb-partition-0x12 table (called
+ * CPU F/V/T operating points in Darwin). The definition for all those
+ * SMU tables should be moved to some separate file
+ */
+#define SMU_SDB_FVT_ID 0x12
+
+struct smu_sdbp_fvt {
+ __u32 sysclk; /* Base SysClk frequency in Hz for
+ * this operating point. Value need to
+ * be unmixed with SMU_U32_MIX()
+ */
+ __u8 pad;
+ __u8 maxtemp; /* Max temp. supported by this
+ * operating point
+ */
+
+ __u16 volts[3]; /* CPU core voltage for the 3
+ * PowerTune modes, a mode with
+ * 0V = not supported. Value need
+ * to be unmixed with SMU_U16_MIX()
+ */
+};
+
+/* This partition contains voltage & current sensor calibration
+ * informations
+ */
+#define SMU_SDB_CPUVCP_ID 0x21
+
+struct smu_sdbp_cpuvcp {
+ __u16 volt_scale; /* u4.12 fixed point */
+ __s16 volt_offset; /* s4.12 fixed point */
+ __u16 curr_scale; /* u4.12 fixed point */
+ __s16 curr_offset; /* s4.12 fixed point */
+ __s32 power_quads[3]; /* s4.28 fixed point */
+};
+
+/* This partition contains CPU thermal diode calibration
+ */
+#define SMU_SDB_CPUDIODE_ID 0x18
+
+struct smu_sdbp_cpudiode {
+ __u16 m_value; /* u1.15 fixed point */
+ __s16 b_value; /* s10.6 fixed point */
+
+};
+
+/* This partition contains Slots power calibration
+ */
+#define SMU_SDB_SLOTSPOW_ID 0x78
+
+struct smu_sdbp_slotspow {
+ __u16 pow_scale; /* u4.12 fixed point */
+ __s16 pow_offset; /* s4.12 fixed point */
+};
+
+/* This partition contains machine specific version information about
+ * the sensor/control layout
+ */
+#define SMU_SDB_SENSORTREE_ID 0x25
+
+struct smu_sdbp_sensortree {
+ __u8 model_id;
+ __u8 unknown[3];
+};
+
+/* This partition contains CPU thermal control PID informations. So far
+ * only single CPU machines have been seen with an SMU, so we assume this
+ * carries only informations for those
+ */
+#define SMU_SDB_CPUPIDDATA_ID 0x17
+
+struct smu_sdbp_cpupiddata {
+ __u8 unknown1;
+ __u8 target_temp_delta;
+ __u8 unknown2;
+ __u8 history_len;
+ __s16 power_adj;
+ __u16 max_power;
+ __s32 gp,gr,gd;
+};
+
+
+/* Other partitions without known structures */
+#define SMU_SDB_DEBUG_SWITCHES_ID 0x05
+
+#ifdef __KERNEL__
+/*
+ * This returns the pointer to an SMU "sdb" partition data or NULL
+ * if not found. The data format is described below
+ */
+extern const struct smu_sdbp_header *smu_get_sdb_partition(int id,
+ unsigned int *size);
+
+/* Get "sdb" partition data from an SMU satellite */
+extern struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id,
+ int id, unsigned int *size);
+
+
+#endif /* __KERNEL__ */
+
+
+/*
+ * - Userland interface -
+ */
+
+/*
+ * A given instance of the device can be configured for 2 different
+ * things at the moment:
+ *
+ * - sending SMU commands (default at open() time)
+ * - receiving SMU events (not yet implemented)
+ *
+ * Commands are written with write() of a command block. They can be
+ * "driver" commands (for example to switch to event reception mode)
+ * or real SMU commands. They are made of a header followed by command
+ * data if any.
+ *
+ * For SMU commands (not for driver commands), you can then read() back
+ * a reply. The reader will be blocked or not depending on how the device
+ * file is opened. poll() isn't implemented yet. The reply will consist
+ * of a header as well, followed by the reply data if any. You should
+ * always provide a buffer large enough for the maximum reply data, I
+ * recommand one page.
+ *
+ * It is illegal to send SMU commands through a file descriptor configured
+ * for events reception
+ *
+ */
+struct smu_user_cmd_hdr
+{
+ __u32 cmdtype;
+#define SMU_CMDTYPE_SMU 0 /* SMU command */
+#define SMU_CMDTYPE_WANTS_EVENTS 1 /* switch fd to events mode */
+#define SMU_CMDTYPE_GET_PARTITION 2 /* retrieve an sdb partition */
+
+ __u8 cmd; /* SMU command byte */
+ __u8 pad[3]; /* padding */
+ __u32 data_len; /* Length of data following */
+};
+
+struct smu_user_reply_hdr
+{
+ __u32 status; /* Command status */
+ __u32 reply_len; /* Length of data follwing */
+};
+
+#endif /* _SMU_H */
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
new file mode 100644
index 000000000..d07286684
--- /dev/null
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SPARSEMEM_H
+#define _ASM_POWERPC_SPARSEMEM_H 1
+#ifdef __KERNEL__
+
+#ifdef CONFIG_SPARSEMEM
+/*
+ * SECTION_SIZE_BITS 2^N: how big each section will be
+ * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
+ */
+#define SECTION_SIZE_BITS 24
+
+#endif /* CONFIG_SPARSEMEM */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern int remove_section_mapping(unsigned long start, unsigned long end);
+extern int memory_add_physaddr_to_nid(u64 start);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+
+#ifdef CONFIG_NUMA
+extern int hot_add_scn_to_nid(unsigned long scn_addr);
+#else
+static inline int hot_add_scn_to_nid(unsigned long scn_addr)
+{
+ return 0;
+}
+#endif /* CONFIG_NUMA */
+#endif /* CONFIG_MEMORY_HOTPLUG */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SPARSEMEM_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
new file mode 100644
index 000000000..bd75872a6
--- /dev/null
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+#ifdef __KERNEL__
+
+#ifdef CONFIG_PPC_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#include <asm/qrwlock.h>
+#else
+#include <asm/simple_spinlock.h>
+#endif
+
+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock() smp_mb()
+
+#ifndef CONFIG_PARAVIRT_SPINLOCKS
+static inline void pv_spinlocks_init(void) { }
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
new file mode 100644
index 000000000..d5f8a74ed
--- /dev/null
+++ b/arch/powerpc/include/asm/spinlock_types.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
+#define _ASM_POWERPC_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+# error "please don't include this file directly"
+#endif
+
+#ifdef CONFIG_PPC_QUEUED_SPINLOCKS
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qrwlock_types.h>
+#else
+#include <asm/simple_spinlock_types.h>
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
new file mode 100644
index 000000000..96ad4510c
--- /dev/null
+++ b/arch/powerpc/include/asm/spu.h
@@ -0,0 +1,679 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SPU core / file system interface and HW structures
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ */
+
+#ifndef _SPU_H
+#define _SPU_H
+#ifdef __KERNEL__
+
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <asm/reg.h>
+#include <asm/copro.h>
+
+#define LS_SIZE (256 * 1024)
+#define LS_ADDR_MASK (LS_SIZE - 1)
+
+#define MFC_PUT_CMD 0x20
+#define MFC_PUTS_CMD 0x28
+#define MFC_PUTR_CMD 0x30
+#define MFC_PUTF_CMD 0x22
+#define MFC_PUTB_CMD 0x21
+#define MFC_PUTFS_CMD 0x2A
+#define MFC_PUTBS_CMD 0x29
+#define MFC_PUTRF_CMD 0x32
+#define MFC_PUTRB_CMD 0x31
+#define MFC_PUTL_CMD 0x24
+#define MFC_PUTRL_CMD 0x34
+#define MFC_PUTLF_CMD 0x26
+#define MFC_PUTLB_CMD 0x25
+#define MFC_PUTRLF_CMD 0x36
+#define MFC_PUTRLB_CMD 0x35
+
+#define MFC_GET_CMD 0x40
+#define MFC_GETS_CMD 0x48
+#define MFC_GETF_CMD 0x42
+#define MFC_GETB_CMD 0x41
+#define MFC_GETFS_CMD 0x4A
+#define MFC_GETBS_CMD 0x49
+#define MFC_GETL_CMD 0x44
+#define MFC_GETLF_CMD 0x46
+#define MFC_GETLB_CMD 0x45
+
+#define MFC_SDCRT_CMD 0x80
+#define MFC_SDCRTST_CMD 0x81
+#define MFC_SDCRZ_CMD 0x89
+#define MFC_SDCRS_CMD 0x8D
+#define MFC_SDCRF_CMD 0x8F
+
+#define MFC_GETLLAR_CMD 0xD0
+#define MFC_PUTLLC_CMD 0xB4
+#define MFC_PUTLLUC_CMD 0xB0
+#define MFC_PUTQLLUC_CMD 0xB8
+#define MFC_SNDSIG_CMD 0xA0
+#define MFC_SNDSIGB_CMD 0xA1
+#define MFC_SNDSIGF_CMD 0xA2
+#define MFC_BARRIER_CMD 0xC0
+#define MFC_EIEIO_CMD 0xC8
+#define MFC_SYNC_CMD 0xCC
+
+#define MFC_MIN_DMA_SIZE_SHIFT 4 /* 16 bytes */
+#define MFC_MAX_DMA_SIZE_SHIFT 14 /* 16384 bytes */
+#define MFC_MIN_DMA_SIZE (1 << MFC_MIN_DMA_SIZE_SHIFT)
+#define MFC_MAX_DMA_SIZE (1 << MFC_MAX_DMA_SIZE_SHIFT)
+#define MFC_MIN_DMA_SIZE_MASK (MFC_MIN_DMA_SIZE - 1)
+#define MFC_MAX_DMA_SIZE_MASK (MFC_MAX_DMA_SIZE - 1)
+#define MFC_MIN_DMA_LIST_SIZE 0x0008 /* 8 bytes */
+#define MFC_MAX_DMA_LIST_SIZE 0x4000 /* 16K bytes */
+
+#define MFC_TAGID_TO_TAGMASK(tag_id) (1 << (tag_id & 0x1F))
+
+/* Events for Channels 0-2 */
+#define MFC_DMA_TAG_STATUS_UPDATE_EVENT 0x00000001
+#define MFC_DMA_TAG_CMD_STALL_NOTIFY_EVENT 0x00000002
+#define MFC_DMA_QUEUE_AVAILABLE_EVENT 0x00000008
+#define MFC_SPU_MAILBOX_WRITTEN_EVENT 0x00000010
+#define MFC_DECREMENTER_EVENT 0x00000020
+#define MFC_PU_INT_MAILBOX_AVAILABLE_EVENT 0x00000040
+#define MFC_PU_MAILBOX_AVAILABLE_EVENT 0x00000080
+#define MFC_SIGNAL_2_EVENT 0x00000100
+#define MFC_SIGNAL_1_EVENT 0x00000200
+#define MFC_LLR_LOST_EVENT 0x00000400
+#define MFC_PRIV_ATTN_EVENT 0x00000800
+#define MFC_MULTI_SRC_EVENT 0x00001000
+
+/* Flag indicating progress during context switch. */
+#define SPU_CONTEXT_SWITCH_PENDING 0UL
+#define SPU_CONTEXT_FAULT_PENDING 1UL
+
+struct spu_context;
+struct spu_runqueue;
+struct spu_lscsa;
+struct device_node;
+
+enum spu_utilization_state {
+ SPU_UTIL_USER,
+ SPU_UTIL_SYSTEM,
+ SPU_UTIL_IOWAIT,
+ SPU_UTIL_IDLE_LOADED,
+ SPU_UTIL_MAX
+};
+
+struct spu {
+ const char *name;
+ unsigned long local_store_phys;
+ u8 *local_store;
+ unsigned long problem_phys;
+ struct spu_problem __iomem *problem;
+ struct spu_priv2 __iomem *priv2;
+ struct list_head cbe_list;
+ struct list_head full_list;
+ enum { SPU_FREE, SPU_USED } alloc_state;
+ int number;
+ unsigned int irqs[3];
+ u32 node;
+ unsigned long flags;
+ u64 class_0_pending;
+ u64 class_0_dar;
+ u64 class_1_dar;
+ u64 class_1_dsisr;
+ size_t ls_size;
+ unsigned int slb_replace;
+ struct mm_struct *mm;
+ struct spu_context *ctx;
+ struct spu_runqueue *rq;
+ unsigned long long timestamp;
+ pid_t pid;
+ pid_t tgid;
+ spinlock_t register_lock;
+
+ void (* wbox_callback)(struct spu *spu);
+ void (* ibox_callback)(struct spu *spu);
+ void (* stop_callback)(struct spu *spu, int irq);
+ void (* mfc_callback)(struct spu *spu);
+
+ char irq_c0[8];
+ char irq_c1[8];
+ char irq_c2[8];
+
+ u64 spe_id;
+
+ void* pdata; /* platform private data */
+
+ /* of based platforms only */
+ struct device_node *devnode;
+
+ /* native only */
+ struct spu_priv1 __iomem *priv1;
+
+ /* beat only */
+ u64 shadow_int_mask_RW[3];
+
+ struct device dev;
+
+ int has_mem_affinity;
+ struct list_head aff_list;
+
+ struct {
+ /* protected by interrupt reentrancy */
+ enum spu_utilization_state util_state;
+ unsigned long long tstamp;
+ unsigned long long times[SPU_UTIL_MAX];
+ unsigned long long vol_ctx_switch;
+ unsigned long long invol_ctx_switch;
+ unsigned long long min_flt;
+ unsigned long long maj_flt;
+ unsigned long long hash_flt;
+ unsigned long long slb_flt;
+ unsigned long long class2_intr;
+ unsigned long long libassist;
+ } stats;
+};
+
+struct cbe_spu_info {
+ struct mutex list_mutex;
+ struct list_head spus;
+ int n_spus;
+ int nr_active;
+ atomic_t busy_spus;
+ atomic_t reserved_spus;
+};
+
+extern struct cbe_spu_info cbe_spu_info[];
+
+void spu_init_channels(struct spu *spu);
+void spu_irq_setaffinity(struct spu *spu, int cpu);
+
+void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
+ void *code, int code_size);
+
+extern void spu_invalidate_slbs(struct spu *spu);
+extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
+int spu_64k_pages_available(void);
+
+/* Calls from the memory management to the SPU */
+struct mm_struct;
+extern void spu_flush_all_slbs(struct mm_struct *mm);
+
+/* system callbacks from the SPU */
+struct spu_syscall_block {
+ u64 nr_ret;
+ u64 parm[6];
+};
+extern long spu_sys_callback(struct spu_syscall_block *s);
+
+/* syscalls implemented in spufs */
+struct file;
+struct coredump_params;
+struct spufs_calls {
+ long (*create_thread)(const char __user *name,
+ unsigned int flags, umode_t mode,
+ struct file *neighbor);
+ long (*spu_run)(struct file *filp, __u32 __user *unpc,
+ __u32 __user *ustatus);
+ int (*coredump_extra_notes_size)(void);
+ int (*coredump_extra_notes_write)(struct coredump_params *cprm);
+ void (*notify_spus_active)(void);
+ struct module *owner;
+};
+
+/* return status from spu_run, same as in libspe */
+#define SPE_EVENT_DMA_ALIGNMENT 0x0008 /*A DMA alignment error */
+#define SPE_EVENT_SPE_ERROR 0x0010 /*An illegal instruction error*/
+#define SPE_EVENT_SPE_DATA_SEGMENT 0x0020 /*A DMA segmentation error */
+#define SPE_EVENT_SPE_DATA_STORAGE 0x0040 /*A DMA storage error */
+#define SPE_EVENT_INVALID_DMA 0x0800 /* Invalid MFC DMA */
+
+/*
+ * Flags for sys_spu_create.
+ */
+#define SPU_CREATE_EVENTS_ENABLED 0x0001
+#define SPU_CREATE_GANG 0x0002
+#define SPU_CREATE_NOSCHED 0x0004
+#define SPU_CREATE_ISOLATE 0x0008
+#define SPU_CREATE_AFFINITY_SPU 0x0010
+#define SPU_CREATE_AFFINITY_MEM 0x0020
+
+#define SPU_CREATE_FLAG_ALL 0x003f /* mask of all valid flags */
+
+
+int register_spu_syscalls(struct spufs_calls *calls);
+void unregister_spu_syscalls(struct spufs_calls *calls);
+
+int spu_add_dev_attr(struct device_attribute *attr);
+void spu_remove_dev_attr(struct device_attribute *attr);
+
+int spu_add_dev_attr_group(const struct attribute_group *attrs);
+void spu_remove_dev_attr_group(const struct attribute_group *attrs);
+
+extern void notify_spus_active(void);
+extern void do_notify_spus_active(void);
+
+/*
+ * This defines the Local Store, Problem Area and Privilege Area of an SPU.
+ */
+
+union mfc_tag_size_class_cmd {
+ struct {
+ u16 mfc_size;
+ u16 mfc_tag;
+ u8 pad;
+ u8 mfc_rclassid;
+ u16 mfc_cmd;
+ } u;
+ struct {
+ u32 mfc_size_tag32;
+ u32 mfc_class_cmd32;
+ } by32;
+ u64 all64;
+};
+
+struct mfc_cq_sr {
+ u64 mfc_cq_data0_RW;
+ u64 mfc_cq_data1_RW;
+ u64 mfc_cq_data2_RW;
+ u64 mfc_cq_data3_RW;
+};
+
+struct spu_problem {
+#define MS_SYNC_PENDING 1L
+ u64 spc_mssync_RW; /* 0x0000 */
+ u8 pad_0x0008_0x3000[0x3000 - 0x0008];
+
+ /* DMA Area */
+ u8 pad_0x3000_0x3004[0x4]; /* 0x3000 */
+ u32 mfc_lsa_W; /* 0x3004 */
+ u64 mfc_ea_W; /* 0x3008 */
+ union mfc_tag_size_class_cmd mfc_union_W; /* 0x3010 */
+ u8 pad_0x3018_0x3104[0xec]; /* 0x3018 */
+ u32 dma_qstatus_R; /* 0x3104 */
+ u8 pad_0x3108_0x3204[0xfc]; /* 0x3108 */
+ u32 dma_querytype_RW; /* 0x3204 */
+ u8 pad_0x3208_0x321c[0x14]; /* 0x3208 */
+ u32 dma_querymask_RW; /* 0x321c */
+ u8 pad_0x3220_0x322c[0xc]; /* 0x3220 */
+ u32 dma_tagstatus_R; /* 0x322c */
+#define DMA_TAGSTATUS_INTR_ANY 1u
+#define DMA_TAGSTATUS_INTR_ALL 2u
+ u8 pad_0x3230_0x4000[0x4000 - 0x3230]; /* 0x3230 */
+
+ /* SPU Control Area */
+ u8 pad_0x4000_0x4004[0x4]; /* 0x4000 */
+ u32 pu_mb_R; /* 0x4004 */
+ u8 pad_0x4008_0x400c[0x4]; /* 0x4008 */
+ u32 spu_mb_W; /* 0x400c */
+ u8 pad_0x4010_0x4014[0x4]; /* 0x4010 */
+ u32 mb_stat_R; /* 0x4014 */
+ u8 pad_0x4018_0x401c[0x4]; /* 0x4018 */
+ u32 spu_runcntl_RW; /* 0x401c */
+#define SPU_RUNCNTL_STOP 0L
+#define SPU_RUNCNTL_RUNNABLE 1L
+#define SPU_RUNCNTL_ISOLATE 2L
+ u8 pad_0x4020_0x4024[0x4]; /* 0x4020 */
+ u32 spu_status_R; /* 0x4024 */
+#define SPU_STOP_STATUS_SHIFT 16
+#define SPU_STATUS_STOPPED 0x0
+#define SPU_STATUS_RUNNING 0x1
+#define SPU_STATUS_STOPPED_BY_STOP 0x2
+#define SPU_STATUS_STOPPED_BY_HALT 0x4
+#define SPU_STATUS_WAITING_FOR_CHANNEL 0x8
+#define SPU_STATUS_SINGLE_STEP 0x10
+#define SPU_STATUS_INVALID_INSTR 0x20
+#define SPU_STATUS_INVALID_CH 0x40
+#define SPU_STATUS_ISOLATED_STATE 0x80
+#define SPU_STATUS_ISOLATED_LOAD_STATUS 0x200
+#define SPU_STATUS_ISOLATED_EXIT_STATUS 0x400
+ u8 pad_0x4028_0x402c[0x4]; /* 0x4028 */
+ u32 spu_spe_R; /* 0x402c */
+ u8 pad_0x4030_0x4034[0x4]; /* 0x4030 */
+ u32 spu_npc_RW; /* 0x4034 */
+ u8 pad_0x4038_0x14000[0x14000 - 0x4038]; /* 0x4038 */
+
+ /* Signal Notification Area */
+ u8 pad_0x14000_0x1400c[0xc]; /* 0x14000 */
+ u32 signal_notify1; /* 0x1400c */
+ u8 pad_0x14010_0x1c00c[0x7ffc]; /* 0x14010 */
+ u32 signal_notify2; /* 0x1c00c */
+} __attribute__ ((aligned(0x20000)));
+
+/* SPU Privilege 2 State Area */
+struct spu_priv2 {
+ /* MFC Registers */
+ u8 pad_0x0000_0x1100[0x1100 - 0x0000]; /* 0x0000 */
+
+ /* SLB Management Registers */
+ u8 pad_0x1100_0x1108[0x8]; /* 0x1100 */
+ u64 slb_index_W; /* 0x1108 */
+#define SLB_INDEX_MASK 0x7L
+ u64 slb_esid_RW; /* 0x1110 */
+ u64 slb_vsid_RW; /* 0x1118 */
+#define SLB_VSID_SUPERVISOR_STATE (0x1ull << 11)
+#define SLB_VSID_SUPERVISOR_STATE_MASK (0x1ull << 11)
+#define SLB_VSID_PROBLEM_STATE (0x1ull << 10)
+#define SLB_VSID_PROBLEM_STATE_MASK (0x1ull << 10)
+#define SLB_VSID_EXECUTE_SEGMENT (0x1ull << 9)
+#define SLB_VSID_NO_EXECUTE_SEGMENT (0x1ull << 9)
+#define SLB_VSID_EXECUTE_SEGMENT_MASK (0x1ull << 9)
+#define SLB_VSID_4K_PAGE (0x0 << 8)
+#define SLB_VSID_LARGE_PAGE (0x1ull << 8)
+#define SLB_VSID_PAGE_SIZE_MASK (0x1ull << 8)
+#define SLB_VSID_CLASS_MASK (0x1ull << 7)
+#define SLB_VSID_VIRTUAL_PAGE_SIZE_MASK (0x1ull << 6)
+ u64 slb_invalidate_entry_W; /* 0x1120 */
+ u64 slb_invalidate_all_W; /* 0x1128 */
+ u8 pad_0x1130_0x2000[0x2000 - 0x1130]; /* 0x1130 */
+
+ /* Context Save / Restore Area */
+ struct mfc_cq_sr spuq[16]; /* 0x2000 */
+ struct mfc_cq_sr puq[8]; /* 0x2200 */
+ u8 pad_0x2300_0x3000[0x3000 - 0x2300]; /* 0x2300 */
+
+ /* MFC Control */
+ u64 mfc_control_RW; /* 0x3000 */
+#define MFC_CNTL_RESUME_DMA_QUEUE (0ull << 0)
+#define MFC_CNTL_SUSPEND_DMA_QUEUE (1ull << 0)
+#define MFC_CNTL_SUSPEND_DMA_QUEUE_MASK (1ull << 0)
+#define MFC_CNTL_SUSPEND_MASK (1ull << 4)
+#define MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION (0ull << 8)
+#define MFC_CNTL_SUSPEND_IN_PROGRESS (1ull << 8)
+#define MFC_CNTL_SUSPEND_COMPLETE (3ull << 8)
+#define MFC_CNTL_SUSPEND_DMA_STATUS_MASK (3ull << 8)
+#define MFC_CNTL_DMA_QUEUES_EMPTY (1ull << 14)
+#define MFC_CNTL_DMA_QUEUES_EMPTY_MASK (1ull << 14)
+#define MFC_CNTL_PURGE_DMA_REQUEST (1ull << 15)
+#define MFC_CNTL_PURGE_DMA_IN_PROGRESS (1ull << 24)
+#define MFC_CNTL_PURGE_DMA_COMPLETE (3ull << 24)
+#define MFC_CNTL_PURGE_DMA_STATUS_MASK (3ull << 24)
+#define MFC_CNTL_RESTART_DMA_COMMAND (1ull << 32)
+#define MFC_CNTL_DMA_COMMAND_REISSUE_PENDING (1ull << 32)
+#define MFC_CNTL_DMA_COMMAND_REISSUE_STATUS_MASK (1ull << 32)
+#define MFC_CNTL_MFC_PRIVILEGE_STATE (2ull << 33)
+#define MFC_CNTL_MFC_PROBLEM_STATE (3ull << 33)
+#define MFC_CNTL_MFC_KEY_PROTECTION_STATE_MASK (3ull << 33)
+#define MFC_CNTL_DECREMENTER_HALTED (1ull << 35)
+#define MFC_CNTL_DECREMENTER_RUNNING (1ull << 40)
+#define MFC_CNTL_DECREMENTER_STATUS_MASK (1ull << 40)
+ u8 pad_0x3008_0x4000[0x4000 - 0x3008]; /* 0x3008 */
+
+ /* Interrupt Mailbox */
+ u64 puint_mb_R; /* 0x4000 */
+ u8 pad_0x4008_0x4040[0x4040 - 0x4008]; /* 0x4008 */
+
+ /* SPU Control */
+ u64 spu_privcntl_RW; /* 0x4040 */
+#define SPU_PRIVCNTL_MODE_NORMAL (0x0ull << 0)
+#define SPU_PRIVCNTL_MODE_SINGLE_STEP (0x1ull << 0)
+#define SPU_PRIVCNTL_MODE_MASK (0x1ull << 0)
+#define SPU_PRIVCNTL_NO_ATTENTION_EVENT (0x0ull << 1)
+#define SPU_PRIVCNTL_ATTENTION_EVENT (0x1ull << 1)
+#define SPU_PRIVCNTL_ATTENTION_EVENT_MASK (0x1ull << 1)
+#define SPU_PRIVCNT_LOAD_REQUEST_NORMAL (0x0ull << 2)
+#define SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK (0x1ull << 2)
+ u8 pad_0x4048_0x4058[0x10]; /* 0x4048 */
+ u64 spu_lslr_RW; /* 0x4058 */
+ u64 spu_chnlcntptr_RW; /* 0x4060 */
+ u64 spu_chnlcnt_RW; /* 0x4068 */
+ u64 spu_chnldata_RW; /* 0x4070 */
+ u64 spu_cfg_RW; /* 0x4078 */
+ u8 pad_0x4080_0x5000[0x5000 - 0x4080]; /* 0x4080 */
+
+ /* PV2_ImplRegs: Implementation-specific privileged-state 2 regs */
+ u64 spu_pm_trace_tag_status_RW; /* 0x5000 */
+ u64 spu_tag_status_query_RW; /* 0x5008 */
+#define TAG_STATUS_QUERY_CONDITION_BITS (0x3ull << 32)
+#define TAG_STATUS_QUERY_MASK_BITS (0xffffffffull)
+ u64 spu_cmd_buf1_RW; /* 0x5010 */
+#define SPU_COMMAND_BUFFER_1_LSA_BITS (0x7ffffull << 32)
+#define SPU_COMMAND_BUFFER_1_EAH_BITS (0xffffffffull)
+ u64 spu_cmd_buf2_RW; /* 0x5018 */
+#define SPU_COMMAND_BUFFER_2_EAL_BITS ((0xffffffffull) << 32)
+#define SPU_COMMAND_BUFFER_2_TS_BITS (0xffffull << 16)
+#define SPU_COMMAND_BUFFER_2_TAG_BITS (0x3full)
+ u64 spu_atomic_status_RW; /* 0x5020 */
+} __attribute__ ((aligned(0x20000)));
+
+/* SPU Privilege 1 State Area */
+struct spu_priv1 {
+ /* Control and Configuration Area */
+ u64 mfc_sr1_RW; /* 0x000 */
+#define MFC_STATE1_LOCAL_STORAGE_DECODE_MASK 0x01ull
+#define MFC_STATE1_BUS_TLBIE_MASK 0x02ull
+#define MFC_STATE1_REAL_MODE_OFFSET_ENABLE_MASK 0x04ull
+#define MFC_STATE1_PROBLEM_STATE_MASK 0x08ull
+#define MFC_STATE1_RELOCATE_MASK 0x10ull
+#define MFC_STATE1_MASTER_RUN_CONTROL_MASK 0x20ull
+#define MFC_STATE1_TABLE_SEARCH_MASK 0x40ull
+ u64 mfc_lpid_RW; /* 0x008 */
+ u64 spu_idr_RW; /* 0x010 */
+ u64 mfc_vr_RO; /* 0x018 */
+#define MFC_VERSION_BITS (0xffff << 16)
+#define MFC_REVISION_BITS (0xffff)
+#define MFC_GET_VERSION_BITS(vr) (((vr) & MFC_VERSION_BITS) >> 16)
+#define MFC_GET_REVISION_BITS(vr) ((vr) & MFC_REVISION_BITS)
+ u64 spu_vr_RO; /* 0x020 */
+#define SPU_VERSION_BITS (0xffff << 16)
+#define SPU_REVISION_BITS (0xffff)
+#define SPU_GET_VERSION_BITS(vr) (vr & SPU_VERSION_BITS) >> 16
+#define SPU_GET_REVISION_BITS(vr) (vr & SPU_REVISION_BITS)
+ u8 pad_0x28_0x100[0x100 - 0x28]; /* 0x28 */
+
+ /* Interrupt Area */
+ u64 int_mask_RW[3]; /* 0x100 */
+#define CLASS0_ENABLE_DMA_ALIGNMENT_INTR 0x1L
+#define CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR 0x2L
+#define CLASS0_ENABLE_SPU_ERROR_INTR 0x4L
+#define CLASS0_ENABLE_MFC_FIR_INTR 0x8L
+#define CLASS1_ENABLE_SEGMENT_FAULT_INTR 0x1L
+#define CLASS1_ENABLE_STORAGE_FAULT_INTR 0x2L
+#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_GET_INTR 0x4L
+#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_PUT_INTR 0x8L
+#define CLASS2_ENABLE_MAILBOX_INTR 0x1L
+#define CLASS2_ENABLE_SPU_STOP_INTR 0x2L
+#define CLASS2_ENABLE_SPU_HALT_INTR 0x4L
+#define CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR 0x8L
+#define CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR 0x10L
+ u8 pad_0x118_0x140[0x28]; /* 0x118 */
+ u64 int_stat_RW[3]; /* 0x140 */
+#define CLASS0_DMA_ALIGNMENT_INTR 0x1L
+#define CLASS0_INVALID_DMA_COMMAND_INTR 0x2L
+#define CLASS0_SPU_ERROR_INTR 0x4L
+#define CLASS0_INTR_MASK 0x7L
+#define CLASS1_SEGMENT_FAULT_INTR 0x1L
+#define CLASS1_STORAGE_FAULT_INTR 0x2L
+#define CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR 0x4L
+#define CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR 0x8L
+#define CLASS1_INTR_MASK 0xfL
+#define CLASS2_MAILBOX_INTR 0x1L
+#define CLASS2_SPU_STOP_INTR 0x2L
+#define CLASS2_SPU_HALT_INTR 0x4L
+#define CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR 0x8L
+#define CLASS2_MAILBOX_THRESHOLD_INTR 0x10L
+#define CLASS2_INTR_MASK 0x1fL
+ u8 pad_0x158_0x180[0x28]; /* 0x158 */
+ u64 int_route_RW; /* 0x180 */
+
+ /* Interrupt Routing */
+ u8 pad_0x188_0x200[0x200 - 0x188]; /* 0x188 */
+
+ /* Atomic Unit Control Area */
+ u64 mfc_atomic_flush_RW; /* 0x200 */
+#define mfc_atomic_flush_enable 0x1L
+ u8 pad_0x208_0x280[0x78]; /* 0x208 */
+ u64 resource_allocation_groupID_RW; /* 0x280 */
+ u64 resource_allocation_enable_RW; /* 0x288 */
+ u8 pad_0x290_0x3c8[0x3c8 - 0x290]; /* 0x290 */
+
+ /* SPU_Cache_ImplRegs: Implementation-dependent cache registers */
+
+ u64 smf_sbi_signal_sel; /* 0x3c8 */
+#define smf_sbi_mask_lsb 56
+#define smf_sbi_shift (63 - smf_sbi_mask_lsb)
+#define smf_sbi_mask (0x301LL << smf_sbi_shift)
+#define smf_sbi_bus0_bits (0x001LL << smf_sbi_shift)
+#define smf_sbi_bus2_bits (0x100LL << smf_sbi_shift)
+#define smf_sbi2_bus0_bits (0x201LL << smf_sbi_shift)
+#define smf_sbi2_bus2_bits (0x300LL << smf_sbi_shift)
+ u64 smf_ato_signal_sel; /* 0x3d0 */
+#define smf_ato_mask_lsb 35
+#define smf_ato_shift (63 - smf_ato_mask_lsb)
+#define smf_ato_mask (0x3LL << smf_ato_shift)
+#define smf_ato_bus0_bits (0x2LL << smf_ato_shift)
+#define smf_ato_bus2_bits (0x1LL << smf_ato_shift)
+ u8 pad_0x3d8_0x400[0x400 - 0x3d8]; /* 0x3d8 */
+
+ /* TLB Management Registers */
+ u64 mfc_sdr_RW; /* 0x400 */
+ u8 pad_0x408_0x500[0xf8]; /* 0x408 */
+ u64 tlb_index_hint_RO; /* 0x500 */
+ u64 tlb_index_W; /* 0x508 */
+ u64 tlb_vpn_RW; /* 0x510 */
+ u64 tlb_rpn_RW; /* 0x518 */
+ u8 pad_0x520_0x540[0x20]; /* 0x520 */
+ u64 tlb_invalidate_entry_W; /* 0x540 */
+ u64 tlb_invalidate_all_W; /* 0x548 */
+ u8 pad_0x550_0x580[0x580 - 0x550]; /* 0x550 */
+
+ /* SPU_MMU_ImplRegs: Implementation-dependent MMU registers */
+ u64 smm_hid; /* 0x580 */
+#define PAGE_SIZE_MASK 0xf000000000000000ull
+#define PAGE_SIZE_16MB_64KB 0x2000000000000000ull
+ u8 pad_0x588_0x600[0x600 - 0x588]; /* 0x588 */
+
+ /* MFC Status/Control Area */
+ u64 mfc_accr_RW; /* 0x600 */
+#define MFC_ACCR_EA_ACCESS_GET (1 << 0)
+#define MFC_ACCR_EA_ACCESS_PUT (1 << 1)
+#define MFC_ACCR_LS_ACCESS_GET (1 << 3)
+#define MFC_ACCR_LS_ACCESS_PUT (1 << 4)
+ u8 pad_0x608_0x610[0x8]; /* 0x608 */
+ u64 mfc_dsisr_RW; /* 0x610 */
+#define MFC_DSISR_PTE_NOT_FOUND (1 << 30)
+#define MFC_DSISR_ACCESS_DENIED (1 << 27)
+#define MFC_DSISR_ATOMIC (1 << 26)
+#define MFC_DSISR_ACCESS_PUT (1 << 25)
+#define MFC_DSISR_ADDR_MATCH (1 << 22)
+#define MFC_DSISR_LS (1 << 17)
+#define MFC_DSISR_L (1 << 16)
+#define MFC_DSISR_ADDRESS_OVERFLOW (1 << 0)
+ u8 pad_0x618_0x620[0x8]; /* 0x618 */
+ u64 mfc_dar_RW; /* 0x620 */
+ u8 pad_0x628_0x700[0x700 - 0x628]; /* 0x628 */
+
+ /* Replacement Management Table (RMT) Area */
+ u64 rmt_index_RW; /* 0x700 */
+ u8 pad_0x708_0x710[0x8]; /* 0x708 */
+ u64 rmt_data1_RW; /* 0x710 */
+ u8 pad_0x718_0x800[0x800 - 0x718]; /* 0x718 */
+
+ /* Control/Configuration Registers */
+ u64 mfc_dsir_R; /* 0x800 */
+#define MFC_DSIR_Q (1 << 31)
+#define MFC_DSIR_SPU_QUEUE MFC_DSIR_Q
+ u64 mfc_lsacr_RW; /* 0x808 */
+#define MFC_LSACR_COMPARE_MASK ((~0ull) << 32)
+#define MFC_LSACR_COMPARE_ADDR ((~0ull) >> 32)
+ u64 mfc_lscrr_R; /* 0x810 */
+#define MFC_LSCRR_Q (1 << 31)
+#define MFC_LSCRR_SPU_QUEUE MFC_LSCRR_Q
+#define MFC_LSCRR_QI_SHIFT 32
+#define MFC_LSCRR_QI_MASK ((~0ull) << MFC_LSCRR_QI_SHIFT)
+ u8 pad_0x818_0x820[0x8]; /* 0x818 */
+ u64 mfc_tclass_id_RW; /* 0x820 */
+#define MFC_TCLASS_ID_ENABLE (1L << 0L)
+#define MFC_TCLASS_SLOT2_ENABLE (1L << 5L)
+#define MFC_TCLASS_SLOT1_ENABLE (1L << 6L)
+#define MFC_TCLASS_SLOT0_ENABLE (1L << 7L)
+#define MFC_TCLASS_QUOTA_2_SHIFT 8L
+#define MFC_TCLASS_QUOTA_1_SHIFT 16L
+#define MFC_TCLASS_QUOTA_0_SHIFT 24L
+#define MFC_TCLASS_QUOTA_2_MASK (0x1FL << MFC_TCLASS_QUOTA_2_SHIFT)
+#define MFC_TCLASS_QUOTA_1_MASK (0x1FL << MFC_TCLASS_QUOTA_1_SHIFT)
+#define MFC_TCLASS_QUOTA_0_MASK (0x1FL << MFC_TCLASS_QUOTA_0_SHIFT)
+ u8 pad_0x828_0x900[0x900 - 0x828]; /* 0x828 */
+
+ /* Real Mode Support Registers */
+ u64 mfc_rm_boundary; /* 0x900 */
+ u8 pad_0x908_0x938[0x30]; /* 0x908 */
+ u64 smf_dma_signal_sel; /* 0x938 */
+#define mfc_dma1_mask_lsb 41
+#define mfc_dma1_shift (63 - mfc_dma1_mask_lsb)
+#define mfc_dma1_mask (0x3LL << mfc_dma1_shift)
+#define mfc_dma1_bits (0x1LL << mfc_dma1_shift)
+#define mfc_dma2_mask_lsb 43
+#define mfc_dma2_shift (63 - mfc_dma2_mask_lsb)
+#define mfc_dma2_mask (0x3LL << mfc_dma2_shift)
+#define mfc_dma2_bits (0x1LL << mfc_dma2_shift)
+ u8 pad_0x940_0xa38[0xf8]; /* 0x940 */
+ u64 smm_signal_sel; /* 0xa38 */
+#define smm_sig_mask_lsb 12
+#define smm_sig_shift (63 - smm_sig_mask_lsb)
+#define smm_sig_mask (0x3LL << smm_sig_shift)
+#define smm_sig_bus0_bits (0x2LL << smm_sig_shift)
+#define smm_sig_bus2_bits (0x1LL << smm_sig_shift)
+ u8 pad_0xa40_0xc00[0xc00 - 0xa40]; /* 0xa40 */
+
+ /* DMA Command Error Area */
+ u64 mfc_cer_R; /* 0xc00 */
+#define MFC_CER_Q (1 << 31)
+#define MFC_CER_SPU_QUEUE MFC_CER_Q
+ u8 pad_0xc08_0x1000[0x1000 - 0xc08]; /* 0xc08 */
+
+ /* PV1_ImplRegs: Implementation-dependent privileged-state 1 regs */
+ /* DMA Command Error Area */
+ u64 spu_ecc_cntl_RW; /* 0x1000 */
+#define SPU_ECC_CNTL_E (1ull << 0ull)
+#define SPU_ECC_CNTL_ENABLE SPU_ECC_CNTL_E
+#define SPU_ECC_CNTL_DISABLE (~SPU_ECC_CNTL_E & 1L)
+#define SPU_ECC_CNTL_S (1ull << 1ull)
+#define SPU_ECC_STOP_AFTER_ERROR SPU_ECC_CNTL_S
+#define SPU_ECC_CONTINUE_AFTER_ERROR (~SPU_ECC_CNTL_S & 2L)
+#define SPU_ECC_CNTL_B (1ull << 2ull)
+#define SPU_ECC_BACKGROUND_ENABLE SPU_ECC_CNTL_B
+#define SPU_ECC_BACKGROUND_DISABLE (~SPU_ECC_CNTL_B & 4L)
+#define SPU_ECC_CNTL_I_SHIFT 3ull
+#define SPU_ECC_CNTL_I_MASK (3ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_WRITE_ALWAYS (~SPU_ECC_CNTL_I & 12L)
+#define SPU_ECC_WRITE_CORRECTABLE (1ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_WRITE_UNCORRECTABLE (3ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_CNTL_D (1ull << 5ull)
+#define SPU_ECC_DETECTION_ENABLE SPU_ECC_CNTL_D
+#define SPU_ECC_DETECTION_DISABLE (~SPU_ECC_CNTL_D & 32L)
+ u64 spu_ecc_stat_RW; /* 0x1008 */
+#define SPU_ECC_CORRECTED_ERROR (1ull << 0ul)
+#define SPU_ECC_UNCORRECTED_ERROR (1ull << 1ul)
+#define SPU_ECC_SCRUB_COMPLETE (1ull << 2ul)
+#define SPU_ECC_SCRUB_IN_PROGRESS (1ull << 3ul)
+#define SPU_ECC_INSTRUCTION_ERROR (1ull << 4ul)
+#define SPU_ECC_DATA_ERROR (1ull << 5ul)
+#define SPU_ECC_DMA_ERROR (1ull << 6ul)
+#define SPU_ECC_STATUS_CNT_MASK (256ull << 8)
+ u64 spu_ecc_addr_RW; /* 0x1010 */
+ u64 spu_err_mask_RW; /* 0x1018 */
+#define SPU_ERR_ILLEGAL_INSTR (1ull << 0ul)
+#define SPU_ERR_ILLEGAL_CHANNEL (1ull << 1ul)
+ u8 pad_0x1020_0x1028[0x1028 - 0x1020]; /* 0x1020 */
+
+ /* SPU Debug-Trace Bus (DTB) Selection Registers */
+ u64 spu_trig0_sel; /* 0x1028 */
+ u64 spu_trig1_sel; /* 0x1030 */
+ u64 spu_trig2_sel; /* 0x1038 */
+ u64 spu_trig3_sel; /* 0x1040 */
+ u64 spu_trace_sel; /* 0x1048 */
+#define spu_trace_sel_mask 0x1f1fLL
+#define spu_trace_sel_bus0_bits 0x1000LL
+#define spu_trace_sel_bus2_bits 0x0010LL
+ u64 spu_event0_sel; /* 0x1050 */
+ u64 spu_event1_sel; /* 0x1058 */
+ u64 spu_event2_sel; /* 0x1060 */
+ u64 spu_event3_sel; /* 0x1068 */
+ u64 spu_trace_cntl; /* 0x1070 */
+} __attribute__ ((aligned(0x2000)));
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/powerpc/include/asm/spu_csa.h b/arch/powerpc/include/asm/spu_csa.h
new file mode 100644
index 000000000..c33df961c
--- /dev/null
+++ b/arch/powerpc/include/asm/spu_csa.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * spu_csa.h: Definitions for SPU context save area (CSA).
+ *
+ * (C) Copyright IBM 2005
+ *
+ * Author: Mark Nutter <mnutter@us.ibm.com>
+ */
+
+#ifndef _SPU_CSA_H_
+#define _SPU_CSA_H_
+#ifdef __KERNEL__
+
+/*
+ * Total number of 128-bit registers.
+ */
+#define NR_SPU_GPRS 128
+#define NR_SPU_SPRS 9
+#define NR_SPU_REGS_PAD 7
+#define NR_SPU_SPILL_REGS 144 /* GPRS + SPRS + PAD */
+#define SIZEOF_SPU_SPILL_REGS NR_SPU_SPILL_REGS * 16
+
+#define SPU_SAVE_COMPLETE 0x3FFB
+#define SPU_RESTORE_COMPLETE 0x3FFC
+
+/*
+ * Definitions for various 'stopped' status conditions,
+ * to be recreated during context restore.
+ */
+#define SPU_STOPPED_STATUS_P 1
+#define SPU_STOPPED_STATUS_I 2
+#define SPU_STOPPED_STATUS_H 3
+#define SPU_STOPPED_STATUS_S 4
+#define SPU_STOPPED_STATUS_S_I 5
+#define SPU_STOPPED_STATUS_S_P 6
+#define SPU_STOPPED_STATUS_P_H 7
+#define SPU_STOPPED_STATUS_P_I 8
+#define SPU_STOPPED_STATUS_R 9
+
+/*
+ * Definitions for software decrementer status flag.
+ */
+#define SPU_DECR_STATUS_RUNNING 0x1
+#define SPU_DECR_STATUS_WRAPPED 0x2
+
+#ifndef __ASSEMBLY__
+/**
+ * spu_reg128 - generic 128-bit register definition.
+ */
+struct spu_reg128 {
+ u32 slot[4];
+};
+
+/**
+ * struct spu_lscsa - Local Store Context Save Area.
+ * @gprs: Array of saved registers.
+ * @fpcr: Saved floating point status control register.
+ * @decr: Saved decrementer value.
+ * @decr_status: Indicates software decrementer status flags.
+ * @ppu_mb: Saved PPU mailbox data.
+ * @ppuint_mb: Saved PPU interrupting mailbox data.
+ * @tag_mask: Saved tag group mask.
+ * @event_mask: Saved event mask.
+ * @srr0: Saved SRR0.
+ * @stopped_status: Conditions to be recreated by restore.
+ * @ls: Saved contents of Local Storage Area.
+ *
+ * The LSCSA represents state that is primarily saved and
+ * restored by SPU-side code.
+ */
+struct spu_lscsa {
+ struct spu_reg128 gprs[128];
+ struct spu_reg128 fpcr;
+ struct spu_reg128 decr;
+ struct spu_reg128 decr_status;
+ struct spu_reg128 ppu_mb;
+ struct spu_reg128 ppuint_mb;
+ struct spu_reg128 tag_mask;
+ struct spu_reg128 event_mask;
+ struct spu_reg128 srr0;
+ struct spu_reg128 stopped_status;
+
+ /*
+ * 'ls' must be page-aligned on all configurations.
+ * Since we don't want to rely on having the spu-gcc
+ * installed to build the kernel and this structure
+ * is used in the SPU-side code, make it 64k-page
+ * aligned for now.
+ */
+ unsigned char ls[LS_SIZE] __attribute__((aligned(65536)));
+};
+
+#ifndef __SPU__
+/*
+ * struct spu_problem_collapsed - condensed problem state area, w/o pads.
+ */
+struct spu_problem_collapsed {
+ u64 spc_mssync_RW;
+ u32 mfc_lsa_W;
+ u32 unused_pad0;
+ u64 mfc_ea_W;
+ union mfc_tag_size_class_cmd mfc_union_W;
+ u32 dma_qstatus_R;
+ u32 dma_querytype_RW;
+ u32 dma_querymask_RW;
+ u32 dma_tagstatus_R;
+ u32 pu_mb_R;
+ u32 spu_mb_W;
+ u32 mb_stat_R;
+ u32 spu_runcntl_RW;
+ u32 spu_status_R;
+ u32 spu_spc_R;
+ u32 spu_npc_RW;
+ u32 signal_notify1;
+ u32 signal_notify2;
+ u32 unused_pad1;
+};
+
+/*
+ * struct spu_priv1_collapsed - condensed privileged 1 area, w/o pads.
+ */
+struct spu_priv1_collapsed {
+ u64 mfc_sr1_RW;
+ u64 mfc_lpid_RW;
+ u64 spu_idr_RW;
+ u64 mfc_vr_RO;
+ u64 spu_vr_RO;
+ u64 int_mask_class0_RW;
+ u64 int_mask_class1_RW;
+ u64 int_mask_class2_RW;
+ u64 int_stat_class0_RW;
+ u64 int_stat_class1_RW;
+ u64 int_stat_class2_RW;
+ u64 int_route_RW;
+ u64 mfc_atomic_flush_RW;
+ u64 resource_allocation_groupID_RW;
+ u64 resource_allocation_enable_RW;
+ u64 mfc_fir_R;
+ u64 mfc_fir_status_or_W;
+ u64 mfc_fir_status_and_W;
+ u64 mfc_fir_mask_R;
+ u64 mfc_fir_mask_or_W;
+ u64 mfc_fir_mask_and_W;
+ u64 mfc_fir_chkstp_enable_RW;
+ u64 smf_sbi_signal_sel;
+ u64 smf_ato_signal_sel;
+ u64 tlb_index_hint_RO;
+ u64 tlb_index_W;
+ u64 tlb_vpn_RW;
+ u64 tlb_rpn_RW;
+ u64 tlb_invalidate_entry_W;
+ u64 tlb_invalidate_all_W;
+ u64 smm_hid;
+ u64 mfc_accr_RW;
+ u64 mfc_dsisr_RW;
+ u64 mfc_dar_RW;
+ u64 rmt_index_RW;
+ u64 rmt_data1_RW;
+ u64 mfc_dsir_R;
+ u64 mfc_lsacr_RW;
+ u64 mfc_lscrr_R;
+ u64 mfc_tclass_id_RW;
+ u64 mfc_rm_boundary;
+ u64 smf_dma_signal_sel;
+ u64 smm_signal_sel;
+ u64 mfc_cer_R;
+ u64 pu_ecc_cntl_RW;
+ u64 pu_ecc_stat_RW;
+ u64 spu_ecc_addr_RW;
+ u64 spu_err_mask_RW;
+ u64 spu_trig0_sel;
+ u64 spu_trig1_sel;
+ u64 spu_trig2_sel;
+ u64 spu_trig3_sel;
+ u64 spu_trace_sel;
+ u64 spu_event0_sel;
+ u64 spu_event1_sel;
+ u64 spu_event2_sel;
+ u64 spu_event3_sel;
+ u64 spu_trace_cntl;
+};
+
+/*
+ * struct spu_priv2_collapsed - condensed privileged 2 area, w/o pads.
+ */
+struct spu_priv2_collapsed {
+ u64 slb_index_W;
+ u64 slb_esid_RW;
+ u64 slb_vsid_RW;
+ u64 slb_invalidate_entry_W;
+ u64 slb_invalidate_all_W;
+ struct mfc_cq_sr spuq[16];
+ struct mfc_cq_sr puq[8];
+ u64 mfc_control_RW;
+ u64 puint_mb_R;
+ u64 spu_privcntl_RW;
+ u64 spu_lslr_RW;
+ u64 spu_chnlcntptr_RW;
+ u64 spu_chnlcnt_RW;
+ u64 spu_chnldata_RW;
+ u64 spu_cfg_RW;
+ u64 spu_tag_status_query_RW;
+ u64 spu_cmd_buf1_RW;
+ u64 spu_cmd_buf2_RW;
+ u64 spu_atomic_status_RW;
+};
+
+/**
+ * struct spu_state
+ * @lscsa: Local Store Context Save Area.
+ * @prob: Collapsed Problem State Area, w/o pads.
+ * @priv1: Collapsed Privileged 1 Area, w/o pads.
+ * @priv2: Collapsed Privileged 2 Area, w/o pads.
+ * @spu_chnlcnt_RW: Array of saved channel counts.
+ * @spu_chnldata_RW: Array of saved channel data.
+ * @suspend_time: Time stamp when decrementer disabled.
+ *
+ * Structure representing the whole of the SPU
+ * context save area (CSA). This struct contains
+ * all of the state necessary to suspend and then
+ * later optionally resume execution of an SPU
+ * context.
+ *
+ * The @lscsa region is by far the largest, and is
+ * allocated separately so that it may either be
+ * pinned or mapped to/from application memory, as
+ * appropriate for the OS environment.
+ */
+struct spu_state {
+ struct spu_lscsa *lscsa;
+ struct spu_problem_collapsed prob;
+ struct spu_priv1_collapsed priv1;
+ struct spu_priv2_collapsed priv2;
+ u64 spu_chnlcnt_RW[32];
+ u64 spu_chnldata_RW[32];
+ u32 spu_mailbox_data[4];
+ u32 pu_mailbox_data[1];
+ u64 class_0_dar, class_0_pending;
+ u64 class_1_dar, class_1_dsisr;
+ unsigned long suspend_time;
+ spinlock_t register_lock;
+};
+
+#endif /* !__SPU__ */
+#endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
+#endif /* _SPU_CSA_H_ */
diff --git a/arch/powerpc/include/asm/spu_info.h b/arch/powerpc/include/asm/spu_info.h
new file mode 100644
index 000000000..732431034
--- /dev/null
+++ b/arch/powerpc/include/asm/spu_info.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SPU info structures
+ *
+ * (C) Copyright 2006 IBM Corp.
+ *
+ * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
+ */
+#ifndef _SPU_INFO_H
+#define _SPU_INFO_H
+
+#include <asm/spu.h>
+#include <uapi/asm/spu_info.h>
+
+#endif
diff --git a/arch/powerpc/include/asm/spu_priv1.h b/arch/powerpc/include/asm/spu_priv1.h
new file mode 100644
index 000000000..2167d756e
--- /dev/null
+++ b/arch/powerpc/include/asm/spu_priv1.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Defines an spu hypervisor abstraction layer.
+ *
+ * Copyright 2006 Sony Corp.
+ */
+
+#if !defined(_SPU_PRIV1_H)
+#define _SPU_PRIV1_H
+#if defined(__KERNEL__)
+
+#include <linux/types.h>
+
+struct spu;
+struct spu_context;
+
+/* access to priv1 registers */
+
+struct spu_priv1_ops {
+ void (*int_mask_and) (struct spu *spu, int class, u64 mask);
+ void (*int_mask_or) (struct spu *spu, int class, u64 mask);
+ void (*int_mask_set) (struct spu *spu, int class, u64 mask);
+ u64 (*int_mask_get) (struct spu *spu, int class);
+ void (*int_stat_clear) (struct spu *spu, int class, u64 stat);
+ u64 (*int_stat_get) (struct spu *spu, int class);
+ void (*cpu_affinity_set) (struct spu *spu, int cpu);
+ u64 (*mfc_dar_get) (struct spu *spu);
+ u64 (*mfc_dsisr_get) (struct spu *spu);
+ void (*mfc_dsisr_set) (struct spu *spu, u64 dsisr);
+ void (*mfc_sdr_setup) (struct spu *spu);
+ void (*mfc_sr1_set) (struct spu *spu, u64 sr1);
+ u64 (*mfc_sr1_get) (struct spu *spu);
+ void (*mfc_tclass_id_set) (struct spu *spu, u64 tclass_id);
+ u64 (*mfc_tclass_id_get) (struct spu *spu);
+ void (*tlb_invalidate) (struct spu *spu);
+ void (*resource_allocation_groupID_set) (struct spu *spu, u64 id);
+ u64 (*resource_allocation_groupID_get) (struct spu *spu);
+ void (*resource_allocation_enable_set) (struct spu *spu, u64 enable);
+ u64 (*resource_allocation_enable_get) (struct spu *spu);
+};
+
+extern const struct spu_priv1_ops* spu_priv1_ops;
+
+static inline void
+spu_int_mask_and (struct spu *spu, int class, u64 mask)
+{
+ spu_priv1_ops->int_mask_and(spu, class, mask);
+}
+
+static inline void
+spu_int_mask_or (struct spu *spu, int class, u64 mask)
+{
+ spu_priv1_ops->int_mask_or(spu, class, mask);
+}
+
+static inline void
+spu_int_mask_set (struct spu *spu, int class, u64 mask)
+{
+ spu_priv1_ops->int_mask_set(spu, class, mask);
+}
+
+static inline u64
+spu_int_mask_get (struct spu *spu, int class)
+{
+ return spu_priv1_ops->int_mask_get(spu, class);
+}
+
+static inline void
+spu_int_stat_clear (struct spu *spu, int class, u64 stat)
+{
+ spu_priv1_ops->int_stat_clear(spu, class, stat);
+}
+
+static inline u64
+spu_int_stat_get (struct spu *spu, int class)
+{
+ return spu_priv1_ops->int_stat_get (spu, class);
+}
+
+static inline void
+spu_cpu_affinity_set (struct spu *spu, int cpu)
+{
+ spu_priv1_ops->cpu_affinity_set(spu, cpu);
+}
+
+static inline u64
+spu_mfc_dar_get (struct spu *spu)
+{
+ return spu_priv1_ops->mfc_dar_get(spu);
+}
+
+static inline u64
+spu_mfc_dsisr_get (struct spu *spu)
+{
+ return spu_priv1_ops->mfc_dsisr_get(spu);
+}
+
+static inline void
+spu_mfc_dsisr_set (struct spu *spu, u64 dsisr)
+{
+ spu_priv1_ops->mfc_dsisr_set(spu, dsisr);
+}
+
+static inline void
+spu_mfc_sdr_setup (struct spu *spu)
+{
+ spu_priv1_ops->mfc_sdr_setup(spu);
+}
+
+static inline void
+spu_mfc_sr1_set (struct spu *spu, u64 sr1)
+{
+ spu_priv1_ops->mfc_sr1_set(spu, sr1);
+}
+
+static inline u64
+spu_mfc_sr1_get (struct spu *spu)
+{
+ return spu_priv1_ops->mfc_sr1_get(spu);
+}
+
+static inline void
+spu_mfc_tclass_id_set (struct spu *spu, u64 tclass_id)
+{
+ spu_priv1_ops->mfc_tclass_id_set(spu, tclass_id);
+}
+
+static inline u64
+spu_mfc_tclass_id_get (struct spu *spu)
+{
+ return spu_priv1_ops->mfc_tclass_id_get(spu);
+}
+
+static inline void
+spu_tlb_invalidate (struct spu *spu)
+{
+ spu_priv1_ops->tlb_invalidate(spu);
+}
+
+static inline void
+spu_resource_allocation_groupID_set (struct spu *spu, u64 id)
+{
+ spu_priv1_ops->resource_allocation_groupID_set(spu, id);
+}
+
+static inline u64
+spu_resource_allocation_groupID_get (struct spu *spu)
+{
+ return spu_priv1_ops->resource_allocation_groupID_get(spu);
+}
+
+static inline void
+spu_resource_allocation_enable_set (struct spu *spu, u64 enable)
+{
+ spu_priv1_ops->resource_allocation_enable_set(spu, enable);
+}
+
+static inline u64
+spu_resource_allocation_enable_get (struct spu *spu)
+{
+ return spu_priv1_ops->resource_allocation_enable_get(spu);
+}
+
+/* spu management abstraction */
+
+struct spu_management_ops {
+ int (*enumerate_spus)(int (*fn)(void *data));
+ int (*create_spu)(struct spu *spu, void *data);
+ int (*destroy_spu)(struct spu *spu);
+ void (*enable_spu)(struct spu_context *ctx);
+ void (*disable_spu)(struct spu_context *ctx);
+ int (*init_affinity)(void);
+};
+
+extern const struct spu_management_ops* spu_management_ops;
+
+static inline int
+spu_enumerate_spus (int (*fn)(void *data))
+{
+ return spu_management_ops->enumerate_spus(fn);
+}
+
+static inline int
+spu_create_spu (struct spu *spu, void *data)
+{
+ return spu_management_ops->create_spu(spu, data);
+}
+
+static inline int
+spu_destroy_spu (struct spu *spu)
+{
+ return spu_management_ops->destroy_spu(spu);
+}
+
+static inline int
+spu_init_affinity (void)
+{
+ return spu_management_ops->init_affinity();
+}
+
+static inline void
+spu_enable_spu (struct spu_context *ctx)
+{
+ spu_management_ops->enable_spu(ctx);
+}
+
+static inline void
+spu_disable_spu (struct spu_context *ctx)
+{
+ spu_management_ops->disable_spu(ctx);
+}
+
+/*
+ * The declarations following are put here for convenience
+ * and only intended to be used by the platform setup code.
+ */
+
+extern const struct spu_priv1_ops spu_priv1_mmio_ops;
+extern const struct spu_priv1_ops spu_priv1_beat_ops;
+
+extern const struct spu_management_ops spu_management_of_ops;
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
new file mode 100644
index 000000000..50950deed
--- /dev/null
+++ b/arch/powerpc/include/asm/sstep.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
+ */
+#include <asm/inst.h>
+
+struct pt_regs;
+
+/*
+ * We don't allow single-stepping an mtmsrd that would clear
+ * MSR_RI, since that would make the exception unrecoverable.
+ * Since we need to single-step to proceed from a breakpoint,
+ * we don't allow putting a breakpoint on an mtmsrd instruction.
+ * Similarly we don't allow breakpoints on rfid instructions.
+ * These macros tell us if an instruction is a mtmsrd or rfid.
+ * Note that these return true for both mtmsr/rfi (32-bit)
+ * and mtmsrd/rfid (64-bit).
+ */
+#define IS_MTMSRD(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124)
+#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x4c000024)
+
+enum instruction_type {
+ COMPUTE, /* arith/logical/CR op, etc. */
+ LOAD, /* load and store types need to be contiguous */
+ LOAD_MULTI,
+ LOAD_FP,
+ LOAD_VMX,
+ LOAD_VSX,
+ STORE,
+ STORE_MULTI,
+ STORE_FP,
+ STORE_VMX,
+ STORE_VSX,
+ LARX,
+ STCX,
+ BRANCH,
+ MFSPR,
+ MTSPR,
+ CACHEOP,
+ BARRIER,
+ SYSCALL,
+ SYSCALL_VECTORED_0,
+ MFMSR,
+ MTMSR,
+ RFI,
+ INTERRUPT,
+ UNKNOWN
+};
+
+#define INSTR_TYPE_MASK 0x1f
+
+#define OP_IS_LOAD(type) ((LOAD <= (type) && (type) <= LOAD_VSX) || (type) == LARX)
+#define OP_IS_STORE(type) ((STORE <= (type) && (type) <= STORE_VSX) || (type) == STCX)
+#define OP_IS_LOAD_STORE(type) (LOAD <= (type) && (type) <= STCX)
+
+/* Compute flags, ORed in with type */
+#define SETREG 0x20
+#define SETCC 0x40
+#define SETXER 0x80
+
+/* Branch flags, ORed in with type */
+#define SETLK 0x20
+#define BRTAKEN 0x40
+#define DECCTR 0x80
+
+/* Load/store flags, ORed in with type */
+#define SIGNEXT 0x20
+#define UPDATE 0x40 /* matches bit in opcode 31 instructions */
+#define BYTEREV 0x80
+#define FPCONV 0x100
+
+/* Barrier type field, ORed in with type */
+#define BARRIER_MASK 0xe0
+#define BARRIER_SYNC 0x00
+#define BARRIER_ISYNC 0x20
+#define BARRIER_EIEIO 0x40
+#define BARRIER_LWSYNC 0x60
+#define BARRIER_PTESYNC 0x80
+
+/* Cacheop values, ORed in with type */
+#define CACHEOP_MASK 0x700
+#define DCBST 0
+#define DCBF 0x100
+#define DCBTST 0x200
+#define DCBT 0x300
+#define ICBI 0x400
+#define DCBZ 0x500
+
+/* VSX flags values */
+#define VSX_FPCONV 1 /* do floating point SP/DP conversion */
+#define VSX_SPLAT 2 /* store loaded value into all elements */
+#define VSX_LDLEFT 4 /* load VSX register from left */
+#define VSX_CHECK_VEC 8 /* check MSR_VEC not MSR_VSX for reg >= 32 */
+
+/* Prefixed flag, ORed in with type */
+#define PREFIXED 0x800
+
+/* Size field in type word */
+#define SIZE(n) ((n) << 12)
+#define GETSIZE(w) ((w) >> 12)
+
+#define GETTYPE(t) ((t) & INSTR_TYPE_MASK)
+#define GETLENGTH(t) (((t) & PREFIXED) ? 8 : 4)
+
+#define MKOP(t, f, s) ((t) | (f) | SIZE(s))
+
+/* Prefix instruction operands */
+#define GET_PREFIX_RA(i) (((i) >> 16) & 0x1f)
+#define GET_PREFIX_R(i) ((i) & (1ul << 20))
+
+extern s32 patch__exec_instr;
+
+struct instruction_op {
+ int type;
+ int reg;
+ unsigned long val;
+ /* For LOAD/STORE/LARX/STCX */
+ unsigned long ea;
+ int update_reg;
+ /* For MFSPR */
+ int spr;
+ u32 ccval;
+ u32 xerval;
+ u8 element_size; /* for VSX/VMX loads/stores */
+ u8 vsx_flags;
+};
+
+union vsx_reg {
+ u8 b[16];
+ u16 h[8];
+ u32 w[4];
+ unsigned long d[2];
+ float fp[4];
+ double dp[2];
+ __vector128 v;
+};
+
+/*
+ * Decode an instruction, and return information about it in *op
+ * without changing *regs.
+ *
+ * Return value is 1 if the instruction can be emulated just by
+ * updating *regs with the information in *op, -1 if we need the
+ * GPRs but *regs doesn't contain the full register set, or 0
+ * otherwise.
+ */
+extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ ppc_inst_t instr);
+
+/*
+ * Emulate an instruction that can be executed just by updating
+ * fields in *regs.
+ */
+void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
+
+/*
+ * Emulate instructions that cause a transfer of control,
+ * arithmetic/logical instructions, loads and stores,
+ * cache operations and barriers.
+ *
+ * Returns 1 if the instruction was emulated successfully,
+ * 0 if it could not be emulated, or -1 for an instruction that
+ * should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.).
+ */
+int emulate_step(struct pt_regs *regs, ppc_inst_t instr);
+
+/*
+ * Emulate a load or store instruction by reading/writing the
+ * memory of the current process. FP/VMX/VSX registers are assumed
+ * to hold live values if the appropriate enable bit in regs->msr is
+ * set; otherwise this will use the saved values in the thread struct
+ * for user-mode accesses.
+ */
+extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
+
+extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
+ const void *mem, bool cross_endian);
+extern void emulate_vsx_store(struct instruction_op *op,
+ const union vsx_reg *reg, void *mem,
+ bool cross_endian);
+extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
new file mode 100644
index 000000000..1c8460e23
--- /dev/null
+++ b/arch/powerpc/include/asm/stackprotector.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCC stack protector support.
+ *
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H
+
+#include <linux/random.h>
+#include <linux/version.h>
+#include <asm/reg.h>
+#include <asm/current.h>
+#include <asm/paca.h>
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ canary = get_random_canary();
+ canary ^= mftb();
+ canary ^= LINUX_VERSION_CODE;
+ canary &= CANARY_MASK;
+
+ current->stack_canary = canary;
+#ifdef CONFIG_PPC64
+ get_paca()->canary = canary;
+#endif
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/powerpc/include/asm/stacktrace.h b/arch/powerpc/include/asm/stacktrace.h
new file mode 100644
index 000000000..6149b53b3
--- /dev/null
+++ b/arch/powerpc/include/asm/stacktrace.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Stack trace functions.
+ *
+ * Copyright 2018, Murilo Opsfelder Araujo, IBM Corporation.
+ */
+
+#ifndef _ASM_POWERPC_STACKTRACE_H
+#define _ASM_POWERPC_STACKTRACE_H
+
+void show_user_instructions(struct pt_regs *regs);
+
+#endif /* _ASM_POWERPC_STACKTRACE_H */
diff --git a/arch/powerpc/include/asm/static_call.h b/arch/powerpc/include/asm/static_call.h
new file mode 100644
index 000000000..de1018cc5
--- /dev/null
+++ b/arch/powerpc/include/asm/static_call.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_STATIC_CALL_H
+#define _ASM_POWERPC_STATIC_CALL_H
+
+#define __PPC_SCT(name, inst) \
+ asm(".pushsection .text, \"ax\" \n" \
+ ".align 5 \n" \
+ ".globl " STATIC_CALL_TRAMP_STR(name) " \n" \
+ STATIC_CALL_TRAMP_STR(name) ": \n" \
+ inst " \n" \
+ " lis 12,2f@ha \n" \
+ " lwz 12,2f@l(12) \n" \
+ " mtctr 12 \n" \
+ " bctr \n" \
+ "1: li 3, 0 \n" \
+ " blr \n" \
+ "2: .long 0 \n" \
+ ".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
+ ".size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \
+ ".popsection \n")
+
+#define PPC_SCT_RET0 20 /* Offset of label 1 */
+#define PPC_SCT_DATA 28 /* Offset of label 2 */
+
+#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) __PPC_SCT(name, "b " #func)
+#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) __PPC_SCT(name, "blr")
+#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) __PPC_SCT(name, "b .+20")
+
+#endif /* _ASM_POWERPC_STATIC_CALL_H */
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
new file mode 100644
index 000000000..2aa0e31e6
--- /dev/null
+++ b/arch/powerpc/include/asm/string.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_STRING_H
+#define _ASM_POWERPC_STRING_H
+
+#ifdef __KERNEL__
+
+#ifndef CONFIG_KASAN
+#define __HAVE_ARCH_STRNCPY
+#define __HAVE_ARCH_STRNCMP
+#define __HAVE_ARCH_MEMCHR
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_MEMSET16
+#endif
+
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMMOVE
+#define __HAVE_ARCH_MEMCPY_FLUSHCACHE
+
+extern char * strcpy(char *,const char *);
+extern char * strncpy(char *,const char *, __kernel_size_t);
+extern __kernel_size_t strlen(const char *);
+extern int strcmp(const char *,const char *);
+extern int strncmp(const char *, const char *, __kernel_size_t);
+extern char * strcat(char *, const char *);
+extern void * memset(void *,int,__kernel_size_t);
+extern void * memcpy(void *,const void *,__kernel_size_t);
+extern void * memmove(void *,const void *,__kernel_size_t);
+extern int memcmp(const void *,const void *,__kernel_size_t);
+extern void * memchr(const void *,int,__kernel_size_t);
+void memcpy_flushcache(void *dest, const void *src, size_t size);
+
+void *__memset(void *s, int c, __kernel_size_t count);
+void *__memcpy(void *to, const void *from, __kernel_size_t n);
+void *__memmove(void *to, const void *from, __kernel_size_t n);
+
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
+#endif
+
+#ifdef CONFIG_PPC64
+#ifndef CONFIG_KASAN
+#define __HAVE_ARCH_MEMSET32
+#define __HAVE_ARCH_MEMSET64
+
+extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t);
+extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
+extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t);
+
+static inline void *memset16(uint16_t *p, uint16_t v, __kernel_size_t n)
+{
+ return __memset16(p, v, n * 2);
+}
+
+static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n)
+{
+ return __memset32(p, v, n * 4);
+}
+
+static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
+{
+ return __memset64(p, v, n * 8);
+}
+#endif
+#else
+#ifndef CONFIG_KASAN
+#define __HAVE_ARCH_STRLEN
+#endif
+
+extern void *memset16(uint16_t *, uint16_t, __kernel_size_t);
+#endif
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_STRING_H */
diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h
new file mode 100644
index 000000000..a02bd54b8
--- /dev/null
+++ b/arch/powerpc/include/asm/svm.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SVM helper functions
+ *
+ * Copyright 2018 Anshuman Khandual, IBM Corporation.
+ */
+
+#ifndef _ASM_POWERPC_SVM_H
+#define _ASM_POWERPC_SVM_H
+
+#ifdef CONFIG_PPC_SVM
+
+#include <asm/reg.h>
+
+static inline bool is_secure_guest(void)
+{
+ return mfmsr() & MSR_S;
+}
+
+void dtl_cache_ctor(void *addr);
+#define get_dtl_cache_ctor() (is_secure_guest() ? dtl_cache_ctor : NULL)
+
+#else /* CONFIG_PPC_SVM */
+
+static inline bool is_secure_guest(void)
+{
+ return false;
+}
+
+#define get_dtl_cache_ctor() NULL
+
+#endif /* CONFIG_PPC_SVM */
+#endif /* _ASM_POWERPC_SVM_H */
diff --git a/arch/powerpc/include/asm/swab.h b/arch/powerpc/include/asm/swab.h
new file mode 100644
index 000000000..f4cfdc124
--- /dev/null
+++ b/arch/powerpc/include/asm/swab.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ */
+#ifndef _ASM_POWERPC_SWAB_H
+#define _ASM_POWERPC_SWAB_H
+
+#include <uapi/asm/swab.h>
+
+#endif /* _ASM_POWERPC_SWAB_H */
diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
new file mode 100644
index 000000000..4203b5e0a
--- /dev/null
+++ b/arch/powerpc/include/asm/swiotlb.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor
+ */
+
+#ifndef __ASM_SWIOTLB_H
+#define __ASM_SWIOTLB_H
+
+#include <linux/swiotlb.h>
+
+extern unsigned int ppc_swiotlb_enable;
+extern unsigned int ppc_swiotlb_flags;
+
+#ifdef CONFIG_SWIOTLB
+void swiotlb_detect_4g(void);
+#else
+static inline void swiotlb_detect_4g(void) {}
+#endif
+
+#endif /* __ASM_SWIOTLB_H */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
new file mode 100644
index 000000000..aee25e3eb
--- /dev/null
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_SWITCH_TO_H
+#define _ASM_POWERPC_SWITCH_TO_H
+
+#include <linux/sched.h>
+#include <asm/reg.h>
+
+struct thread_struct;
+struct task_struct;
+struct pt_regs;
+
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
+
+extern struct task_struct *_switch(struct thread_struct *prev,
+ struct thread_struct *next);
+
+extern void switch_booke_debug_regs(struct debug_reg *new_debug);
+
+extern int emulate_altivec(struct pt_regs *);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+void restore_math(struct pt_regs *regs);
+#else
+static inline void restore_math(struct pt_regs *regs)
+{
+}
+#endif
+
+void restore_tm_state(struct pt_regs *regs);
+
+extern void flush_all_to_thread(struct task_struct *);
+extern void giveup_all(struct task_struct *);
+
+#ifdef CONFIG_PPC_FPU
+extern void enable_kernel_fp(void);
+extern void flush_fp_to_thread(struct task_struct *);
+extern void giveup_fpu(struct task_struct *);
+extern void save_fpu(struct task_struct *);
+static inline void disable_kernel_fp(void)
+{
+ msr_check_and_clear(MSR_FP);
+}
+#else
+static inline void save_fpu(struct task_struct *t) { }
+static inline void flush_fp_to_thread(struct task_struct *t) { }
+#endif
+
+#ifdef CONFIG_ALTIVEC
+extern void enable_kernel_altivec(void);
+extern void flush_altivec_to_thread(struct task_struct *);
+extern void giveup_altivec(struct task_struct *);
+extern void save_altivec(struct task_struct *);
+static inline void disable_kernel_altivec(void)
+{
+ msr_check_and_clear(MSR_VEC);
+}
+#else
+static inline void save_altivec(struct task_struct *t) { }
+static inline void __giveup_altivec(struct task_struct *t) { }
+static inline void enable_kernel_altivec(void)
+{
+ BUILD_BUG();
+}
+
+static inline void disable_kernel_altivec(void)
+{
+ BUILD_BUG();
+}
+#endif
+
+#ifdef CONFIG_VSX
+extern void enable_kernel_vsx(void);
+extern void flush_vsx_to_thread(struct task_struct *);
+static inline void disable_kernel_vsx(void)
+{
+ msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
+}
+#else
+static inline void enable_kernel_vsx(void)
+{
+ BUILD_BUG();
+}
+
+static inline void disable_kernel_vsx(void)
+{
+ BUILD_BUG();
+}
+#endif
+
+#ifdef CONFIG_SPE
+extern void enable_kernel_spe(void);
+extern void flush_spe_to_thread(struct task_struct *);
+extern void giveup_spe(struct task_struct *);
+extern void __giveup_spe(struct task_struct *);
+static inline void disable_kernel_spe(void)
+{
+ msr_check_and_clear(MSR_SPE);
+}
+#else
+static inline void __giveup_spe(struct task_struct *t) { }
+#endif
+
+static inline void clear_task_ebb(struct task_struct *t)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* EBB perf events are not inherited, so clear all EBB state. */
+ t->thread.ebbrr = 0;
+ t->thread.ebbhr = 0;
+ t->thread.bescr = 0;
+ t->thread.mmcr2 = 0;
+ t->thread.mmcr0 = 0;
+ t->thread.siar = 0;
+ t->thread.sdar = 0;
+ t->thread.sier = 0;
+ t->thread.used_ebb = 0;
+#endif
+}
+
+void kvmppc_save_user_regs(void);
+void kvmppc_save_current_sprs(void);
+
+extern int set_thread_tidr(struct task_struct *t);
+
+#endif /* _ASM_POWERPC_SWITCH_TO_H */
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
new file mode 100644
index 000000000..b0b4c6487
--- /dev/null
+++ b/arch/powerpc/include/asm/synch.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SYNCH_H
+#define _ASM_POWERPC_SYNCH_H
+#ifdef __KERNEL__
+
+#include <asm/cputable.h>
+#include <asm/feature-fixups.h>
+#include <asm/ppc-opcode.h>
+
+#ifndef __ASSEMBLY__
+extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
+extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
+ void *fixup_end);
+
+static inline void eieio(void)
+{
+ if (IS_ENABLED(CONFIG_BOOKE))
+ __asm__ __volatile__ ("mbar" : : : "memory");
+ else
+ __asm__ __volatile__ ("eieio" : : : "memory");
+}
+
+static inline void isync(void)
+{
+ __asm__ __volatile__ ("isync" : : : "memory");
+}
+
+static inline void ppc_after_tlbiel_barrier(void)
+{
+ asm volatile("ptesync": : :"memory");
+ /*
+ * POWER9, POWER10 need a cp_abort after tlbiel to ensure the copy is
+ * invalidated correctly. If this is not done, the paste can take data
+ * from the physical address that was translated at copy time.
+ *
+ * POWER9 in practice does not need this, because address spaces with
+ * accelerators mapped will use tlbie (which does invalidate the copy)
+ * to invalidate translations. It's not possible to limit POWER10 this
+ * way due to local copy-paste.
+ */
+ asm volatile(ASM_FTR_IFSET(PPC_CP_ABORT, "", %0) : : "i" (CPU_FTR_ARCH_31) : "memory");
+}
+#endif /* __ASSEMBLY__ */
+
+#if defined(__powerpc64__)
+# define LWSYNC lwsync
+#elif defined(CONFIG_PPC_E500)
+# define LWSYNC \
+ START_LWSYNC_SECTION(96); \
+ sync; \
+ MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup);
+#else
+# define LWSYNC sync
+#endif
+
+#ifdef CONFIG_SMP
+#define __PPC_ACQUIRE_BARRIER \
+ START_LWSYNC_SECTION(97); \
+ isync; \
+ MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
+#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
+#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
+#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
+#define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
+#else
+#define PPC_ACQUIRE_BARRIER
+#define PPC_RELEASE_BARRIER
+#define PPC_ATOMIC_ENTRY_BARRIER
+#define PPC_ATOMIC_EXIT_BARRIER
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SYNCH_H */
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
new file mode 100644
index 000000000..3dd36c5e3
--- /dev/null
+++ b/arch/powerpc/include/asm/syscall.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H 1
+
+#include <uapi/linux/audit.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+
+#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+typedef long (*syscall_fn)(const struct pt_regs *);
+#else
+typedef long (*syscall_fn)(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long);
+#endif
+
+/* ftrace syscalls requires exporting the sys_call_table */
+extern const syscall_fn sys_call_table[];
+extern const syscall_fn compat_sys_call_table[];
+
+static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ /*
+ * Note that we are returning an int here. That means 0xffffffff, ie.
+ * 32-bit negative 1, will be interpreted as -1 on a 64-bit kernel.
+ * This is important for seccomp so that compat tasks can set r0 = -1
+ * to reject the syscall.
+ */
+ if (trap_is_syscall(regs))
+ return regs->gpr[0];
+ else
+ return -1;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->gpr[3] = regs->orig_gpr3;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if (trap_is_scv(regs)) {
+ unsigned long error = regs->gpr[3];
+
+ return IS_ERR_VALUE(error) ? error : 0;
+ } else {
+ /*
+ * If the system call failed,
+ * regs->gpr[3] contains a positive ERRORCODE.
+ */
+ return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
+ }
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->gpr[3];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ if (trap_is_scv(regs)) {
+ regs->gpr[3] = (long) error ?: val;
+ } else {
+ /*
+ * In the general case it's not obvious that we must deal with
+ * CCR here, as the syscall exit path will also do that for us.
+ * However there are some places, eg. the signal code, which
+ * check ccr to decide if the value in r3 is actually an error.
+ */
+ if (error) {
+ regs->ccr |= 0x10000000L;
+ regs->gpr[3] = error;
+ } else {
+ regs->ccr &= ~0x10000000L;
+ regs->gpr[3] = val;
+ }
+ }
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ unsigned long val, mask = -1UL;
+ unsigned int n = 6;
+
+ if (is_tsk_32bit_task(task))
+ mask = 0xffffffff;
+
+ while (n--) {
+ if (n == 0)
+ val = regs->orig_gpr3;
+ else
+ val = regs->gpr[3 + n];
+
+ args[n] = val & mask;
+ }
+}
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+ if (is_tsk_32bit_task(task))
+ return AUDIT_ARCH_PPC;
+ else if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+ return AUDIT_ARCH_PPC64LE;
+ else
+ return AUDIT_ARCH_PPC64;
+}
+#endif /* _ASM_SYSCALL_H */
diff --git a/arch/powerpc/include/asm/syscall_wrapper.h b/arch/powerpc/include/asm/syscall_wrapper.h
new file mode 100644
index 000000000..67486c67e
--- /dev/null
+++ b/arch/powerpc/include/asm/syscall_wrapper.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * syscall_wrapper.h - powerpc specific wrappers to syscall definitions
+ *
+ * Based on arch/{x86,arm64}/include/asm/syscall_wrapper.h
+ */
+
+#ifndef __ASM_POWERPC_SYSCALL_WRAPPER_H
+#define __ASM_POWERPC_SYSCALL_WRAPPER_H
+
+struct pt_regs;
+
+#define SC_POWERPC_REGS_TO_ARGS(x, ...) \
+ __MAP(x,__SC_ARGS \
+ ,,regs->gpr[3],,regs->gpr[4],,regs->gpr[5] \
+ ,,regs->gpr[6],,regs->gpr[7],,regs->gpr[8])
+
+#define __SYSCALL_DEFINEx(x, name, ...) \
+ long sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(sys##name, ERRNO); \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ long sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_sys##name(SC_POWERPC_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
+ return ret; \
+ } \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ long sys_##sname(const struct pt_regs *__unused); \
+ ALLOW_ERROR_INJECTION(sys_##sname, ERRNO); \
+ long sys_##sname(const struct pt_regs *__unused)
+
+#define COND_SYSCALL(name) \
+ long sys_##name(const struct pt_regs *regs); \
+ long __weak sys_##name(const struct pt_regs *regs) \
+ { \
+ return sys_ni_syscall(); \
+ }
+
+#endif // __ASM_POWERPC_SYSCALL_WRAPPER_H
diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h
new file mode 100644
index 000000000..6d51b007b
--- /dev/null
+++ b/arch/powerpc/include/asm/syscalls.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_SYSCALLS_H
+#define __ASM_POWERPC_SYSCALLS_H
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <linux/compat.h>
+
+#include <asm/syscall.h>
+#ifdef CONFIG_PPC64
+#include <asm/syscalls_32.h>
+#endif
+#include <asm/unistd.h>
+#include <asm/ucontext.h>
+
+#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+long sys_ni_syscall(void);
+#else
+long sys_ni_syscall(const struct pt_regs *regs);
+#endif
+
+struct rtas_args;
+
+/*
+ * long long munging:
+ * The 32 bit ABI passes long longs in an odd even register pair.
+ * High and low parts are swapped depending on endian mode,
+ * so define a macro (similar to mips linux32) to handle that.
+ */
+#ifdef __LITTLE_ENDIAN__
+#define merge_64(low, high) (((u64)high << 32) | low)
+#else
+#define merge_64(high, low) (((u64)high << 32) | low)
+#endif
+
+/*
+ * PowerPC architecture-specific syscalls
+ */
+
+#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+
+long sys_rtas(struct rtas_args __user *uargs);
+
+#ifdef CONFIG_PPC64
+long sys_ppc64_personality(unsigned long personality);
+#ifdef CONFIG_COMPAT
+long compat_sys_ppc64_personality(unsigned long personality);
+#endif /* CONFIG_COMPAT */
+#endif /* CONFIG_PPC64 */
+
+long sys_swapcontext(struct ucontext __user *old_ctx,
+ struct ucontext __user *new_ctx, long ctx_size);
+long sys_mmap(unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, off_t offset);
+long sys_mmap2(unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+long sys_switch_endian(void);
+
+#ifdef CONFIG_PPC32
+long sys_sigreturn(void);
+long sys_debug_setcontext(struct ucontext __user *ctx, int ndbg,
+ struct sig_dbg_op __user *dbg);
+#endif
+
+long sys_rt_sigreturn(void);
+
+long sys_subpage_prot(unsigned long addr,
+ unsigned long len, u32 __user *map);
+
+#ifdef CONFIG_COMPAT
+long compat_sys_swapcontext(struct ucontext32 __user *old_ctx,
+ struct ucontext32 __user *new_ctx,
+ int ctx_size);
+long compat_sys_old_getrlimit(unsigned int resource,
+ struct compat_rlimit __user *rlim);
+long compat_sys_sigreturn(void);
+long compat_sys_rt_sigreturn(void);
+#endif /* CONFIG_COMPAT */
+
+/*
+ * Architecture specific signatures required by long long munging:
+ * The 32 bit ABI passes long longs in an odd even register pair.
+ * The following signatures provide a machine long parameter for
+ * each register that will be supplied. The implementation is
+ * responsible for combining parameter pairs.
+ */
+
+#ifdef CONFIG_PPC32
+long sys_ppc_pread64(unsigned int fd,
+ char __user *ubuf, compat_size_t count,
+ u32 reg6, u32 pos1, u32 pos2);
+long sys_ppc_pwrite64(unsigned int fd,
+ const char __user *ubuf, compat_size_t count,
+ u32 reg6, u32 pos1, u32 pos2);
+long sys_ppc_readahead(int fd, u32 r4,
+ u32 offset1, u32 offset2, u32 count);
+long sys_ppc_truncate64(const char __user *path, u32 reg4,
+ unsigned long len1, unsigned long len2);
+long sys_ppc_ftruncate64(unsigned int fd, u32 reg4,
+ unsigned long len1, unsigned long len2);
+long sys_ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2,
+ size_t len, int advice);
+long sys_ppc_sync_file_range2(int fd, unsigned int flags,
+ unsigned int offset1,
+ unsigned int offset2,
+ unsigned int nbytes1,
+ unsigned int nbytes2);
+long sys_ppc_fallocate(int fd, int mode, u32 offset1, u32 offset2,
+ u32 len1, u32 len2);
+#endif
+#ifdef CONFIG_COMPAT
+long compat_sys_mmap2(unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+long compat_sys_ppc_pread64(unsigned int fd,
+ char __user *ubuf, compat_size_t count,
+ u32 reg6, u32 pos1, u32 pos2);
+long compat_sys_ppc_pwrite64(unsigned int fd,
+ const char __user *ubuf, compat_size_t count,
+ u32 reg6, u32 pos1, u32 pos2);
+long compat_sys_ppc_readahead(int fd, u32 r4,
+ u32 offset1, u32 offset2, u32 count);
+long compat_sys_ppc_truncate64(const char __user *path, u32 reg4,
+ unsigned long len1, unsigned long len2);
+long compat_sys_ppc_ftruncate64(unsigned int fd, u32 reg4,
+ unsigned long len1, unsigned long len2);
+long compat_sys_ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2,
+ size_t len, int advice);
+long compat_sys_ppc_sync_file_range2(int fd, unsigned int flags,
+ unsigned int offset1,
+ unsigned int offset2,
+ unsigned int nbytes1,
+ unsigned int nbytes2);
+#endif /* CONFIG_COMPAT */
+
+#if defined(CONFIG_PPC32) || defined(CONFIG_COMPAT)
+long sys_ppc_fadvise64_64(int fd, int advice,
+ u32 offset_high, u32 offset_low,
+ u32 len_high, u32 len_low);
+#endif
+
+#else
+
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+#define __SYSCALL(nr, entry) \
+ long entry(const struct pt_regs *regs);
+
+#ifdef CONFIG_PPC64
+#include <asm/syscall_table_64.h>
+#else
+#include <asm/syscall_table_32.h>
+#endif /* CONFIG_PPC64 */
+
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_SYSCALLS_H */
diff --git a/arch/powerpc/include/asm/syscalls_32.h b/arch/powerpc/include/asm/syscalls_32.h
new file mode 100644
index 000000000..749255568
--- /dev/null
+++ b/arch/powerpc/include/asm/syscalls_32.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_SYSCALLS_32_H
+#define _ASM_POWERPC_SYSCALLS_32_H
+
+#include <linux/compat.h>
+#include <asm/siginfo.h>
+#include <asm/signal.h>
+
+/*
+ * Data types and macros for providing 32b PowerPC support.
+ */
+
+/* These are here to support 32-bit syscalls on a 64-bit kernel. */
+
+struct pt_regs32 {
+ unsigned int gpr[32];
+ unsigned int nip;
+ unsigned int msr;
+ unsigned int orig_gpr3; /* Used for restarting system calls */
+ unsigned int ctr;
+ unsigned int link;
+ unsigned int xer;
+ unsigned int ccr;
+ unsigned int mq; /* 601 only (not used at present) */
+ unsigned int trap; /* Reason for being here */
+ unsigned int dar; /* Fault registers */
+ unsigned int dsisr;
+ unsigned int result; /* Result of a system call */
+};
+
+struct sigcontext32 {
+ unsigned int _unused[4];
+ int signal;
+ compat_uptr_t handler;
+ unsigned int oldmask;
+ compat_uptr_t regs; /* 4 byte pointer to the pt_regs32 structure. */
+};
+
+struct mcontext32 {
+ elf_gregset_t32 mc_gregs;
+ elf_fpregset_t mc_fregs;
+ unsigned int mc_pad[2];
+ elf_vrregset_t32 mc_vregs __attribute__((__aligned__(16)));
+ elf_vsrreghalf_t32 mc_vsregs __attribute__((__aligned__(16)));
+};
+
+struct ucontext32 {
+ unsigned int uc_flags;
+ unsigned int uc_link;
+ compat_stack_t uc_stack;
+ int uc_pad[7];
+ compat_uptr_t uc_regs; /* points to uc_mcontext field */
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+ /* glibc has 1024-bit signal masks, ours are 64-bit */
+ int uc_maskext[30];
+ int uc_pad2[3];
+ struct mcontext32 uc_mcontext;
+};
+
+#endif // _ASM_POWERPC_SYSCALLS_32_H
diff --git a/arch/powerpc/include/asm/task_size_32.h b/arch/powerpc/include/asm/task_size_32.h
new file mode 100644
index 000000000..de7290ee7
--- /dev/null
+++ b/arch/powerpc/include/asm/task_size_32.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_TASK_SIZE_32_H
+#define _ASM_POWERPC_TASK_SIZE_32_H
+
+#if CONFIG_TASK_SIZE > CONFIG_KERNEL_START
+#error User TASK_SIZE overlaps with KERNEL_START address
+#endif
+
+#define TASK_SIZE (CONFIG_TASK_SIZE)
+
+/*
+ * This decides where the kernel will search for a free chunk of vm space during
+ * mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
+
+#define DEFAULT_MAP_WINDOW TASK_SIZE
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+#endif /* _ASM_POWERPC_TASK_SIZE_32_H */
diff --git a/arch/powerpc/include/asm/task_size_64.h b/arch/powerpc/include/asm/task_size_64.h
new file mode 100644
index 000000000..5a709951c
--- /dev/null
+++ b/arch/powerpc/include/asm/task_size_64.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_TASK_SIZE_64_H
+#define _ASM_POWERPC_TASK_SIZE_64_H
+
+/*
+ * 64-bit user address space can have multiple limits
+ * For now supported values are:
+ */
+#define TASK_SIZE_64TB (0x0000400000000000UL)
+#define TASK_SIZE_128TB (0x0000800000000000UL)
+#define TASK_SIZE_512TB (0x0002000000000000UL)
+#define TASK_SIZE_1PB (0x0004000000000000UL)
+#define TASK_SIZE_2PB (0x0008000000000000UL)
+
+/*
+ * With 52 bits in the address we can support up to 4PB of range.
+ */
+#define TASK_SIZE_4PB (0x0010000000000000UL)
+
+/*
+ * For now 512TB is only supported with book3s and 64K linux page size.
+ */
+#ifdef CONFIG_PPC_64K_PAGES
+/*
+ * Max value currently used:
+ */
+#define TASK_SIZE_USER64 TASK_SIZE_4PB
+#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB
+#define TASK_CONTEXT_SIZE TASK_SIZE_512TB
+#else
+#define TASK_SIZE_USER64 TASK_SIZE_64TB
+#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB
+
+/*
+ * We don't need to allocate extended context ids for 4K page size, because we
+ * limit the max effective address on this config to 64TB.
+ */
+#define TASK_CONTEXT_SIZE TASK_SIZE_64TB
+#endif
+
+/*
+ * 32-bit user address space is 4GB - 1 page
+ * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
+ */
+#define TASK_SIZE_USER32 (0x0000000100000000UL - (1 * PAGE_SIZE))
+
+#define TASK_SIZE (is_32bit_task() ? TASK_SIZE_USER32 : TASK_SIZE_USER64)
+
+#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
+
+/*
+ * This decides where the kernel will search for a free chunk of vm space during
+ * mmap's.
+ */
+#define TASK_UNMAPPED_BASE \
+ ((is_32bit_task()) ? TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64)
+
+/*
+ * Initial task size value for user applications. For book3s 64 we start
+ * with 128TB and conditionally enable upto 512TB
+ */
+#ifdef CONFIG_PPC_BOOK3S_64
+#define DEFAULT_MAP_WINDOW \
+ ((is_32bit_task()) ? TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
+#else
+#define DEFAULT_MAP_WINDOW TASK_SIZE
+#endif
+
+#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
+#define STACK_TOP_USER32 TASK_SIZE_USER32
+#define STACK_TOP_MAX TASK_SIZE_USER64
+#define STACK_TOP (is_32bit_task() ? STACK_TOP_USER32 : STACK_TOP_USER64)
+
+#define arch_get_mmap_base(addr, base) \
+ (((addr) > DEFAULT_MAP_WINDOW) ? (base) + TASK_SIZE - DEFAULT_MAP_WINDOW : (base))
+
+#define arch_get_mmap_end(addr, len, flags) \
+ (((addr) > DEFAULT_MAP_WINDOW) || \
+ (((flags) & MAP_FIXED) && ((addr) + (len) > DEFAULT_MAP_WINDOW)) ? TASK_SIZE : \
+ DEFAULT_MAP_WINDOW)
+
+#endif /* _ASM_POWERPC_TASK_SIZE_64_H */
diff --git a/arch/powerpc/include/asm/tce.h b/arch/powerpc/include/asm/tce.h
new file mode 100644
index 000000000..0c34d2756
--- /dev/null
+++ b/arch/powerpc/include/asm/tce.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
+ * Rewrite, cleanup:
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
+ */
+
+#ifndef _ASM_POWERPC_TCE_H
+#define _ASM_POWERPC_TCE_H
+#ifdef __KERNEL__
+
+#include <asm/iommu.h>
+
+/*
+ * Tces come in two formats, one for the virtual bus and a different
+ * format for PCI. PCI TCEs can have hardware or software maintianed
+ * coherency.
+ */
+#define TCE_VB 0
+#define TCE_PCI 1
+
+#define TCE_ENTRY_SIZE 8 /* each TCE is 64 bits */
+#define TCE_VALID 0x800 /* TCE valid */
+#define TCE_ALLIO 0x400 /* TCE valid for all lpars */
+#define TCE_PCI_WRITE 0x2 /* write from PCI allowed */
+#define TCE_PCI_READ 0x1 /* read from PCI allowed */
+#define TCE_VB_WRITE 0x1 /* write from VB allowed */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_TCE_H */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
new file mode 100644
index 000000000..af58f1ed3
--- /dev/null
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* thread_info.h: PowerPC low-level thread information
+ * adapted from the i386 version by Paul Mackerras
+ *
+ * Copyright (C) 2002 David Howells (dhowells@redhat.com)
+ * - Incorporating suggestions made by Linus Torvalds and Dave Miller
+ */
+
+#ifndef _ASM_POWERPC_THREAD_INFO_H
+#define _ASM_POWERPC_THREAD_INFO_H
+
+#include <asm/asm-const.h>
+#include <asm/page.h>
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_KASAN
+#define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1)
+#else
+#define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT
+#endif
+
+#if defined(CONFIG_VMAP_STACK) && MIN_THREAD_SHIFT < PAGE_SHIFT
+#define THREAD_SHIFT PAGE_SHIFT
+#else
+#define THREAD_SHIFT MIN_THREAD_SHIFT
+#endif
+
+#define THREAD_SIZE (1 << THREAD_SHIFT)
+
+/*
+ * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
+ * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
+ * assembly.
+ */
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN_SHIFT (THREAD_SHIFT + 1)
+#else
+#define THREAD_ALIGN_SHIFT THREAD_SHIFT
+#endif
+
+#define THREAD_ALIGN (1 << THREAD_ALIGN_SHIFT)
+
+#ifndef __ASSEMBLY__
+#include <linux/cache.h>
+#include <asm/processor.h>
+#include <asm/accounting.h>
+
+#define SLB_PRELOAD_NR 16U
+/*
+ * low level task data.
+ */
+struct thread_info {
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
+#ifdef CONFIG_SMP
+ unsigned int cpu;
+#endif
+ unsigned long local_flags; /* private flags for thread */
+#ifdef CONFIG_LIVEPATCH_64
+ unsigned long *livepatch_sp;
+#endif
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC32)
+ struct cpu_accounting_data accounting;
+#endif
+ unsigned char slb_preload_nr;
+ unsigned char slb_preload_tail;
+ u32 slb_preload_esid[SLB_PRELOAD_NR];
+
+ /* low level flags - has atomic operations done on it */
+ unsigned long flags ____cacheline_aligned_in_smp;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .flags = 0, \
+}
+
+#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
+
+/* how to get the thread information struct from C */
+extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+
+void arch_setup_new_exec(void);
+#define arch_setup_new_exec arch_setup_new_exec
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * thread information flag bit numbers
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_NOTIFY_SIGNAL 3 /* signal notifications exist */
+#define TIF_SYSCALL_EMU 4 /* syscall emulation active */
+#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
+#define TIF_PATCH_PENDING 6 /* pending live patching update */
+#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+#define TIF_SINGLESTEP 8 /* singlestepping active */
+#define TIF_SECCOMP 10 /* secure computing */
+#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
+#define TIF_NOERROR 12 /* Force successful syscall return */
+#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
+#define TIF_UPROBE 14 /* breakpointed or single-stepping */
+#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
+#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
+ for stack store? */
+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
+#if defined(CONFIG_PPC64)
+#define TIF_ELF2ABI 18 /* function descriptors must die! */
+#endif
+#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_32BIT 20 /* 32 bit binary */
+
+/* as above, but as bit values */
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
+#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+#define _TIF_32BIT (1<<TIF_32BIT)
+#define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM)
+#define _TIF_PATCH_PENDING (1<<TIF_PATCH_PENDING)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+#define _TIF_SECCOMP (1<<TIF_SECCOMP)
+#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
+#define _TIF_NOERROR (1<<TIF_NOERROR)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_UPROBE (1<<TIF_UPROBE)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
+#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU)
+#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+ _TIF_SYSCALL_EMU)
+
+#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
+ _TIF_NOTIFY_SIGNAL)
+#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
+
+/* Bits in local_flags */
+/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
+#define TLF_NAPPING 0 /* idle thread enabled NAP mode */
+#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */
+#define TLF_LAZY_MMU 3 /* tlb_batch is active */
+#define TLF_RUNLATCH 4 /* Is the runlatch enabled? */
+
+#define _TLF_NAPPING (1 << TLF_NAPPING)
+#define _TLF_SLEEPING (1 << TLF_SLEEPING)
+#define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU)
+#define _TLF_RUNLATCH (1 << TLF_RUNLATCH)
+
+#ifndef __ASSEMBLY__
+
+static inline void clear_thread_local_flags(unsigned int flags)
+{
+ struct thread_info *ti = current_thread_info();
+ ti->local_flags &= ~flags;
+}
+
+static inline bool test_thread_local_flags(unsigned int flags)
+{
+ struct thread_info *ti = current_thread_info();
+ return (ti->local_flags & flags) != 0;
+}
+
+#ifdef CONFIG_COMPAT
+#define is_32bit_task() (test_thread_flag(TIF_32BIT))
+#define is_tsk_32bit_task(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT))
+#else
+#define is_32bit_task() (IS_ENABLED(CONFIG_PPC32))
+#define is_tsk_32bit_task(tsk) (IS_ENABLED(CONFIG_PPC32))
+#endif
+
+#if defined(CONFIG_PPC64)
+#define is_elf2_task() (test_thread_flag(TIF_ELF2ABI))
+#else
+#define is_elf2_task() (0)
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_THREAD_INFO_H */
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
new file mode 100644
index 000000000..9f50766c4
--- /dev/null
+++ b/arch/powerpc/include/asm/time.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Common time prototypes and such for all ppc machines.
+ *
+ * Written by Cort Dougan (cort@cs.nmt.edu) to merge
+ * Paul Mackerras' version and mine for PReP and Pmac.
+ */
+
+#ifndef __POWERPC_TIME_H
+#define __POWERPC_TIME_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <linux/percpu.h>
+
+#include <asm/processor.h>
+#include <asm/cpu_has_feature.h>
+#include <asm/vdso/timebase.h>
+
+/* time.c */
+extern u64 decrementer_max;
+
+extern unsigned long tb_ticks_per_jiffy;
+extern unsigned long tb_ticks_per_usec;
+extern unsigned long tb_ticks_per_sec;
+extern struct clock_event_device decrementer_clockevent;
+extern u64 decrementer_max;
+
+
+extern void generic_calibrate_decr(void);
+
+/* Some sane defaults: 125 MHz timebase, 1GHz processor */
+extern unsigned long ppc_proc_freq;
+#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
+extern unsigned long ppc_tb_freq;
+#define DEFAULT_TB_FREQ 125000000UL
+
+extern bool tb_invalid;
+
+struct div_result {
+ u64 result_high;
+ u64 result_low;
+};
+
+static inline u64 get_vtb(void)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ return mfspr(SPRN_VTB);
+
+ return 0;
+}
+
+/* Accessor functions for the decrementer register.
+ * The 4xx doesn't even have a decrementer. I tried to use the
+ * generic timer interrupt code, which seems OK, with the 4xx PIT
+ * in auto-reload mode. The problem is PIT stops counting when it
+ * hits zero. If it would wrap, we could use it just like a decrementer.
+ */
+static inline u64 get_dec(void)
+{
+ if (IS_ENABLED(CONFIG_40x))
+ return mfspr(SPRN_PIT);
+
+ return mfspr(SPRN_DEC);
+}
+
+/*
+ * Note: Book E and 4xx processors differ from other PowerPC processors
+ * in when the decrementer generates its interrupt: on the 1 to 0
+ * transition for Book E/4xx, but on the 0 to -1 transition for others.
+ */
+static inline void set_dec(u64 val)
+{
+ if (IS_ENABLED(CONFIG_40x))
+ mtspr(SPRN_PIT, (u32)val);
+ else if (IS_ENABLED(CONFIG_BOOKE))
+ mtspr(SPRN_DEC, val);
+ else
+ mtspr(SPRN_DEC, val - 1);
+}
+
+static inline unsigned long tb_ticks_since(unsigned long tstamp)
+{
+ return mftb() - tstamp;
+}
+
+#define mulhwu(x,y) \
+({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
+
+#ifdef CONFIG_PPC64
+#define mulhdu(x,y) \
+({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
+#else
+extern u64 mulhdu(u64, u64);
+#endif
+
+extern void div128_by_32(u64 dividend_high, u64 dividend_low,
+ unsigned divisor, struct div_result *dr);
+
+extern void secondary_cpu_time_init(void);
+extern void __init time_init(void);
+
+DECLARE_PER_CPU(u64, decrementers_next_tb);
+
+static inline u64 timer_get_next_tb(void)
+{
+ return __this_cpu_read(decrementers_next_tb);
+}
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+void timer_rearm_host_dec(u64 now);
+#endif
+
+/* Convert timebase ticks to nanoseconds */
+unsigned long long tb_to_ns(unsigned long long tb_ticks);
+
+void timer_broadcast_interrupt(void);
+
+/* SPLPAR and VIRT_CPU_ACCOUNTING_NATIVE */
+void pseries_accumulate_stolen_time(void);
+u64 pseries_calculate_stolen_time(u64 stop_tb);
+
+#endif /* __KERNEL__ */
+#endif /* __POWERPC_TIME_H */
diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
new file mode 100644
index 000000000..14b4489de
--- /dev/null
+++ b/arch/powerpc/include/asm/timex.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_TIMEX_H
+#define _ASM_POWERPC_TIMEX_H
+
+#ifdef __KERNEL__
+
+/*
+ * PowerPC architecture timex specifications
+ */
+
+#include <asm/cputable.h>
+#include <asm/vdso/timebase.h>
+
+#define CLOCK_TICK_RATE 1024000 /* Underlying HZ */
+
+typedef unsigned long cycles_t;
+
+static inline cycles_t get_cycles(void)
+{
+ return mftb();
+}
+#define get_cycles get_cycles
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_TIMEX_H */
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
new file mode 100644
index 000000000..b3de6102a
--- /dev/null
+++ b/arch/powerpc/include/asm/tlb.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * TLB shootdown specifics for powerpc
+ *
+ * Copyright (C) 2002 Anton Blanchard, IBM Corp.
+ * Copyright (C) 2002 Paul Mackerras, IBM Corp.
+ */
+#ifndef _ASM_POWERPC_TLB_H
+#define _ASM_POWERPC_TLB_H
+#ifdef __KERNEL__
+
+#ifndef __powerpc64__
+#include <linux/pgtable.h>
+#endif
+#ifndef __powerpc64__
+#include <asm/page.h>
+#include <asm/mmu.h>
+#endif
+
+#include <linux/pagemap.h>
+
+#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
+
+#define tlb_flush tlb_flush
+extern void tlb_flush(struct mmu_gather *tlb);
+/*
+ * book3s:
+ * Hash does not use the linux page-tables, so we can avoid
+ * the TLB invalidate for page-table freeing, Radix otoh does use the
+ * page-tables and needs the TLBI.
+ *
+ * nohash:
+ * We still do TLB invalidate in the __pte_free_tlb routine before we
+ * add the page table pages to mmu gather table batch.
+ */
+#define tlb_needs_table_invalidate() radix_enabled()
+
+/* Get the generic bits... */
+#include <asm-generic/tlb.h>
+
+static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
+ unsigned long address)
+{
+#ifdef CONFIG_PPC_BOOK3S_32
+ if (pte_val(*ptep) & _PAGE_HASHPTE)
+ flush_hash_entry(tlb->mm, ptep, address);
+#endif
+}
+
+#ifdef CONFIG_SMP
+static inline int mm_is_core_local(struct mm_struct *mm)
+{
+ return cpumask_subset(mm_cpumask(mm),
+ topology_sibling_cpumask(smp_processor_id()));
+}
+
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+ if (atomic_read(&mm->context.active_cpus) > 1)
+ return false;
+ return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
+}
+#else /* CONFIG_PPC_BOOK3S_64 */
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+ return cpumask_equal(mm_cpumask(mm),
+ cpumask_of(smp_processor_id()));
+}
+#endif /* !CONFIG_PPC_BOOK3S_64 */
+
+#else /* CONFIG_SMP */
+static inline int mm_is_core_local(struct mm_struct *mm)
+{
+ return 1;
+}
+
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+ return 1;
+}
+#endif
+
+#define arch_supports_page_table_move arch_supports_page_table_move
+static inline bool arch_supports_page_table_move(void)
+{
+ return radix_enabled();
+}
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_TLB_H */
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
new file mode 100644
index 000000000..61fba43bf
--- /dev/null
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_TLBFLUSH_H
+#define _ASM_POWERPC_TLBFLUSH_H
+
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/book3s/tlbflush.h>
+#else
+#include <asm/nohash/tlbflush.h>
+#endif /* !CONFIG_PPC_BOOK3S */
+
+#endif /* _ASM_POWERPC_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
new file mode 100644
index 000000000..e94f6db5e
--- /dev/null
+++ b/arch/powerpc/include/asm/tm.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Transactional memory support routines to reclaim and recheckpoint
+ * transactional process state.
+ *
+ * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
+ */
+
+#include <uapi/asm/tm.h>
+
+#ifndef __ASSEMBLY__
+
+extern void tm_reclaim(struct thread_struct *thread,
+ uint8_t cause);
+extern void tm_reclaim_current(uint8_t cause);
+extern void tm_recheckpoint(struct thread_struct *thread);
+extern void tm_save_sprs(struct thread_struct *thread);
+extern void tm_restore_sprs(struct thread_struct *thread);
+
+extern bool tm_suspend_disabled;
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
new file mode 100644
index 000000000..8a4d4f4d9
--- /dev/null
+++ b/arch/powerpc/include/asm/topology.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_TOPOLOGY_H
+#define _ASM_POWERPC_TOPOLOGY_H
+#ifdef __KERNEL__
+
+
+struct device;
+struct device_node;
+struct drmem_lmb;
+
+#ifdef CONFIG_NUMA
+
+/*
+ * If zone_reclaim_mode is enabled, a RECLAIM_DISTANCE of 10 will mean that
+ * all zones on all nodes will be eligible for zone_reclaim().
+ */
+#define RECLAIM_DISTANCE 10
+
+#include <asm/mmzone.h>
+
+#define cpumask_of_node(node) ((node) == -1 ? \
+ cpu_all_mask : \
+ node_to_cpumask_map[node])
+
+struct pci_bus;
+#ifdef CONFIG_PCI
+extern int pcibus_to_node(struct pci_bus *bus);
+#else
+static inline int pcibus_to_node(struct pci_bus *bus)
+{
+ return -1;
+}
+#endif
+
+#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
+ cpu_all_mask : \
+ cpumask_of_node(pcibus_to_node(bus)))
+
+int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc);
+extern int __node_distance(int, int);
+#define node_distance(a, b) __node_distance(a, b)
+
+extern void __init dump_numa_cpu_topology(void);
+
+extern int sysfs_add_device_to_node(struct device *dev, int nid);
+extern void sysfs_remove_device_from_node(struct device *dev, int nid);
+
+static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
+{
+ numa_cpu_lookup_table[cpu] = node;
+}
+
+static inline int early_cpu_to_node(int cpu)
+{
+ int nid;
+
+ nid = numa_cpu_lookup_table[cpu];
+
+ /*
+ * Fall back to node 0 if nid is unset (it should be, except bugs).
+ * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
+ */
+ return (nid < 0) ? 0 : nid;
+}
+
+int of_drconf_to_nid_single(struct drmem_lmb *lmb);
+void update_numa_distance(struct device_node *node);
+
+extern void map_cpu_to_node(int cpu, int node);
+#ifdef CONFIG_HOTPLUG_CPU
+extern void unmap_cpu_from_node(unsigned long cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#else
+
+static inline int early_cpu_to_node(int cpu) { return 0; }
+
+static inline void dump_numa_cpu_topology(void) {}
+
+static inline int sysfs_add_device_to_node(struct device *dev, int nid)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_device_from_node(struct device *dev,
+ int nid)
+{
+}
+
+static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
+
+static inline int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
+{
+ return 0;
+}
+
+static inline int of_drconf_to_nid_single(struct drmem_lmb *lmb)
+{
+ return first_online_node;
+}
+
+static inline void update_numa_distance(struct device_node *node) {}
+
+#ifdef CONFIG_SMP
+static inline void map_cpu_to_node(int cpu, int node) {}
+#ifdef CONFIG_HOTPLUG_CPU
+static inline void unmap_cpu_from_node(unsigned long cpu) {}
+#endif /* CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_SMP */
+
+#endif /* CONFIG_NUMA */
+
+#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
+void find_and_update_cpu_nid(int cpu);
+extern int cpu_to_coregroup_id(int cpu);
+#else
+static inline void find_and_update_cpu_nid(int cpu) {}
+static inline int cpu_to_coregroup_id(int cpu)
+{
+#ifdef CONFIG_SMP
+ return cpu_to_core_id(cpu);
+#else
+ return 0;
+#endif
+}
+
+#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
+
+#include <asm-generic/topology.h>
+
+#ifdef CONFIG_SMP
+#include <asm/cputable.h>
+
+#ifdef CONFIG_PPC64
+#include <asm/smp.h>
+
+#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
+
+#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
+#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
+#define topology_core_id(cpu) (cpu_to_core_id(cpu))
+
+#endif
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_TOPOLOGY_H */
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
new file mode 100644
index 000000000..08cd60cd7
--- /dev/null
+++ b/arch/powerpc/include/asm/trace.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM powerpc
+
+#if !defined(_TRACE_POWERPC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_POWERPC_H
+
+#include <linux/tracepoint.h>
+
+struct pt_regs;
+
+DECLARE_EVENT_CLASS(ppc64_interrupt_class,
+
+ TP_PROTO(struct pt_regs *regs),
+
+ TP_ARGS(regs),
+
+ TP_STRUCT__entry(
+ __field(struct pt_regs *, regs)
+ ),
+
+ TP_fast_assign(
+ __entry->regs = regs;
+ ),
+
+ TP_printk("pt_regs=%p", __entry->regs)
+);
+
+DEFINE_EVENT(ppc64_interrupt_class, irq_entry,
+
+ TP_PROTO(struct pt_regs *regs),
+
+ TP_ARGS(regs)
+);
+
+DEFINE_EVENT(ppc64_interrupt_class, irq_exit,
+
+ TP_PROTO(struct pt_regs *regs),
+
+ TP_ARGS(regs)
+);
+
+DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_entry,
+
+ TP_PROTO(struct pt_regs *regs),
+
+ TP_ARGS(regs)
+);
+
+DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_exit,
+
+ TP_PROTO(struct pt_regs *regs),
+
+ TP_ARGS(regs)
+);
+
+#ifdef CONFIG_PPC_DOORBELL
+DEFINE_EVENT(ppc64_interrupt_class, doorbell_entry,
+
+ TP_PROTO(struct pt_regs *regs),
+
+ TP_ARGS(regs)
+);
+
+DEFINE_EVENT(ppc64_interrupt_class, doorbell_exit,
+
+ TP_PROTO(struct pt_regs *regs),
+
+ TP_ARGS(regs)
+);
+#endif
+
+#ifdef CONFIG_PPC_PSERIES
+extern int hcall_tracepoint_regfunc(void);
+extern void hcall_tracepoint_unregfunc(void);
+
+TRACE_EVENT_FN_COND(hcall_entry,
+
+ TP_PROTO(unsigned long opcode, unsigned long *args),
+
+ TP_ARGS(opcode, args),
+
+ TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, opcode)
+ ),
+
+ TP_fast_assign(
+ __entry->opcode = opcode;
+ ),
+
+ TP_printk("opcode=%lu", __entry->opcode),
+
+ hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc
+);
+
+TRACE_EVENT_FN_COND(hcall_exit,
+
+ TP_PROTO(unsigned long opcode, long retval, unsigned long *retbuf),
+
+ TP_ARGS(opcode, retval, retbuf),
+
+ TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, opcode)
+ __field(long, retval)
+ ),
+
+ TP_fast_assign(
+ __entry->opcode = opcode;
+ __entry->retval = retval;
+ ),
+
+ TP_printk("opcode=%lu retval=%ld", __entry->opcode, __entry->retval),
+
+ hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc
+);
+#endif
+
+#ifdef CONFIG_PPC_POWERNV
+extern int opal_tracepoint_regfunc(void);
+extern void opal_tracepoint_unregfunc(void);
+
+TRACE_EVENT_FN(opal_entry,
+
+ TP_PROTO(unsigned long opcode, unsigned long *args),
+
+ TP_ARGS(opcode, args),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, opcode)
+ ),
+
+ TP_fast_assign(
+ __entry->opcode = opcode;
+ ),
+
+ TP_printk("opcode=%lu", __entry->opcode),
+
+ opal_tracepoint_regfunc, opal_tracepoint_unregfunc
+);
+
+TRACE_EVENT_FN(opal_exit,
+
+ TP_PROTO(unsigned long opcode, unsigned long retval),
+
+ TP_ARGS(opcode, retval),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, opcode)
+ __field(unsigned long, retval)
+ ),
+
+ TP_fast_assign(
+ __entry->opcode = opcode;
+ __entry->retval = retval;
+ ),
+
+ TP_printk("opcode=%lu retval=%lu", __entry->opcode, __entry->retval),
+
+ opal_tracepoint_regfunc, opal_tracepoint_unregfunc
+);
+#endif
+
+TRACE_EVENT(hash_fault,
+
+ TP_PROTO(unsigned long addr, unsigned long access, unsigned long trap),
+ TP_ARGS(addr, access, trap),
+ TP_STRUCT__entry(
+ __field(unsigned long, addr)
+ __field(unsigned long, access)
+ __field(unsigned long, trap)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->access = access;
+ __entry->trap = trap;
+ ),
+
+ TP_printk("hash fault with addr 0x%lx and access = 0x%lx trap = 0x%lx",
+ __entry->addr, __entry->access, __entry->trap)
+);
+
+
+TRACE_EVENT(tlbie,
+
+ TP_PROTO(unsigned long lpid, unsigned long local, unsigned long rb,
+ unsigned long rs, unsigned long ric, unsigned long prs,
+ unsigned long r),
+ TP_ARGS(lpid, local, rb, rs, ric, prs, r),
+ TP_STRUCT__entry(
+ __field(unsigned long, lpid)
+ __field(unsigned long, local)
+ __field(unsigned long, rb)
+ __field(unsigned long, rs)
+ __field(unsigned long, ric)
+ __field(unsigned long, prs)
+ __field(unsigned long, r)
+ ),
+
+ TP_fast_assign(
+ __entry->lpid = lpid;
+ __entry->local = local;
+ __entry->rb = rb;
+ __entry->rs = rs;
+ __entry->ric = ric;
+ __entry->prs = prs;
+ __entry->r = r;
+ ),
+
+ TP_printk("lpid=%ld, local=%ld, rb=0x%lx, rs=0x%lx, ric=0x%lx, "
+ "prs=0x%lx, r=0x%lx", __entry->lpid, __entry->local,
+ __entry->rb, __entry->rs, __entry->ric, __entry->prs,
+ __entry->r)
+);
+
+TRACE_EVENT(tlbia,
+
+ TP_PROTO(unsigned long id),
+ TP_ARGS(id),
+ TP_STRUCT__entry(
+ __field(unsigned long, id)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ ),
+
+ TP_printk("ctx.id=0x%lx", __entry->id)
+);
+
+#endif /* _TRACE_POWERPC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH asm
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/arch/powerpc/include/asm/trace_clock.h b/arch/powerpc/include/asm/trace_clock.h
new file mode 100644
index 000000000..ef70c2f79
--- /dev/null
+++ b/arch/powerpc/include/asm/trace_clock.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2015 Naveen N. Rao, IBM Corporation
+ */
+
+#ifndef _ASM_PPC_TRACE_CLOCK_H
+#define _ASM_PPC_TRACE_CLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern u64 notrace trace_clock_ppc_tb(void);
+
+#define ARCH_TRACE_CLOCKS { trace_clock_ppc_tb, "ppc-tb", 0 },
+
+#endif /* _ASM_PPC_TRACE_CLOCK_H */
diff --git a/arch/powerpc/include/asm/tsi108.h b/arch/powerpc/include/asm/tsi108.h
new file mode 100644
index 000000000..8a2b6427d
--- /dev/null
+++ b/arch/powerpc/include/asm/tsi108.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * common routine and memory layout for Tundra TSI108(Grendel) host bridge
+ * memory controller.
+ *
+ * Author: Jacob Pan (jacob.pan@freescale.com)
+ * Alex Bounine (alexandreb@tundra.com)
+ *
+ * Copyright 2004-2006 Freescale Semiconductor, Inc.
+ */
+
+#ifndef __PPC_KERNEL_TSI108_H
+#define __PPC_KERNEL_TSI108_H
+
+#include <asm/pci-bridge.h>
+
+/* Size of entire register space */
+#define TSI108_REG_SIZE (0x10000)
+
+/* Sizes of register spaces for individual blocks */
+#define TSI108_HLP_SIZE 0x1000
+#define TSI108_PCI_SIZE 0x1000
+#define TSI108_CLK_SIZE 0x1000
+#define TSI108_PB_SIZE 0x1000
+#define TSI108_SD_SIZE 0x1000
+#define TSI108_DMA_SIZE 0x1000
+#define TSI108_ETH_SIZE 0x1000
+#define TSI108_I2C_SIZE 0x400
+#define TSI108_MPIC_SIZE 0x400
+#define TSI108_UART0_SIZE 0x200
+#define TSI108_GPIO_SIZE 0x200
+#define TSI108_UART1_SIZE 0x200
+
+/* Offsets within Tsi108(A) CSR space for individual blocks */
+#define TSI108_HLP_OFFSET 0x0000
+#define TSI108_PCI_OFFSET 0x1000
+#define TSI108_CLK_OFFSET 0x2000
+#define TSI108_PB_OFFSET 0x3000
+#define TSI108_SD_OFFSET 0x4000
+#define TSI108_DMA_OFFSET 0x5000
+#define TSI108_ETH_OFFSET 0x6000
+#define TSI108_I2C_OFFSET 0x7000
+#define TSI108_MPIC_OFFSET 0x7400
+#define TSI108_UART0_OFFSET 0x7800
+#define TSI108_GPIO_OFFSET 0x7A00
+#define TSI108_UART1_OFFSET 0x7C00
+
+/* Tsi108 registers used by common code components */
+#define TSI108_PCI_CSR (0x004)
+#define TSI108_PCI_IRP_CFG_CTL (0x180)
+#define TSI108_PCI_IRP_STAT (0x184)
+#define TSI108_PCI_IRP_ENABLE (0x188)
+#define TSI108_PCI_IRP_INTAD (0x18C)
+
+#define TSI108_PCI_IRP_STAT_P_INT (0x00400000)
+#define TSI108_PCI_IRP_ENABLE_P_INT (0x00400000)
+
+#define TSI108_CG_PWRUP_STATUS (0x234)
+
+#define TSI108_PB_ISR (0x00C)
+#define TSI108_PB_ERRCS (0x404)
+#define TSI108_PB_AERR (0x408)
+
+#define TSI108_PB_ERRCS_ES (1 << 1)
+#define TSI108_PB_ISR_PBS_RD_ERR (1 << 8)
+
+#define TSI108_PCI_CFG_SIZE (0x01000000)
+
+/*
+ * PHY Configuration Options
+ *
+ * Specify "bcm54xx" in the compatible property of your device tree phy
+ * nodes if your board uses the Broadcom PHYs
+ */
+#define TSI108_PHY_MV88E 0 /* Marvel 88Exxxx PHY */
+#define TSI108_PHY_BCM54XX 1 /* Broadcom BCM54xx PHY */
+
+/* Global variables */
+
+extern u32 tsi108_pci_cfg_base;
+/* Exported functions */
+
+extern int tsi108_direct_write_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 val);
+extern int tsi108_direct_read_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 * val);
+extern void tsi108_clear_pci_error(u32 pci_cfg_base);
+
+extern phys_addr_t get_csrbase(void);
+
+typedef struct {
+ u32 regs; /* hw registers base address */
+ u32 phyregs; /* phy registers base address */
+ u16 phy; /* phy address */
+ u16 irq_num; /* irq number */
+ u8 mac_addr[6]; /* phy mac address */
+ u16 phy_type; /* type of phy on board */
+} hw_info;
+
+extern u32 get_vir_csrbase(void);
+extern u32 tsi108_csr_vir_base;
+
+static inline u32 tsi108_read_reg(u32 reg_offset)
+{
+ return in_be32((volatile u32 *)(tsi108_csr_vir_base + reg_offset));
+}
+
+static inline void tsi108_write_reg(u32 reg_offset, u32 val)
+{
+ out_be32((volatile u32 *)(tsi108_csr_vir_base + reg_offset), val);
+}
+
+#endif /* __PPC_KERNEL_TSI108_H */
diff --git a/arch/powerpc/include/asm/tsi108_irq.h b/arch/powerpc/include/asm/tsi108_irq.h
new file mode 100644
index 000000000..df602ca4c
--- /dev/null
+++ b/arch/powerpc/include/asm/tsi108_irq.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * (C) Copyright 2005 Tundra Semiconductor Corp.
+ * Alex Bounine, <alexandreb at tundra.com).
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ */
+
+/*
+ * definitions for interrupt controller initialization and external interrupt
+ * demultiplexing on TSI108EMU/SVB boards.
+ */
+
+#ifndef _ASM_POWERPC_TSI108_IRQ_H
+#define _ASM_POWERPC_TSI108_IRQ_H
+
+/*
+ * Tsi108 interrupts
+ */
+#ifndef TSI108_IRQ_REG_BASE
+#define TSI108_IRQ_REG_BASE 0
+#endif
+
+#define TSI108_IRQ(x) (TSI108_IRQ_REG_BASE + (x))
+
+#define TSI108_MAX_VECTORS (36 + 4) /* 36 sources + PCI INT demux */
+#define MAX_TASK_PRIO 0xF
+
+#define TSI108_IRQ_SPURIOUS (TSI108_MAX_VECTORS)
+
+#define DEFAULT_PRIO_LVL 10 /* initial priority level */
+
+/* Interrupt vectors assignment to external and internal
+ * sources of requests. */
+
+/* EXTERNAL INTERRUPT SOURCES */
+
+#define IRQ_TSI108_EXT_INT0 TSI108_IRQ(0) /* External Source at INT[0] */
+#define IRQ_TSI108_EXT_INT1 TSI108_IRQ(1) /* External Source at INT[1] */
+#define IRQ_TSI108_EXT_INT2 TSI108_IRQ(2) /* External Source at INT[2] */
+#define IRQ_TSI108_EXT_INT3 TSI108_IRQ(3) /* External Source at INT[3] */
+
+/* INTERNAL INTERRUPT SOURCES */
+
+#define IRQ_TSI108_RESERVED0 TSI108_IRQ(4) /* Reserved IRQ */
+#define IRQ_TSI108_RESERVED1 TSI108_IRQ(5) /* Reserved IRQ */
+#define IRQ_TSI108_RESERVED2 TSI108_IRQ(6) /* Reserved IRQ */
+#define IRQ_TSI108_RESERVED3 TSI108_IRQ(7) /* Reserved IRQ */
+#define IRQ_TSI108_DMA0 TSI108_IRQ(8) /* DMA0 */
+#define IRQ_TSI108_DMA1 TSI108_IRQ(9) /* DMA1 */
+#define IRQ_TSI108_DMA2 TSI108_IRQ(10) /* DMA2 */
+#define IRQ_TSI108_DMA3 TSI108_IRQ(11) /* DMA3 */
+#define IRQ_TSI108_UART0 TSI108_IRQ(12) /* UART0 */
+#define IRQ_TSI108_UART1 TSI108_IRQ(13) /* UART1 */
+#define IRQ_TSI108_I2C TSI108_IRQ(14) /* I2C */
+#define IRQ_TSI108_GPIO TSI108_IRQ(15) /* GPIO */
+#define IRQ_TSI108_GIGE0 TSI108_IRQ(16) /* GIGE0 */
+#define IRQ_TSI108_GIGE1 TSI108_IRQ(17) /* GIGE1 */
+#define IRQ_TSI108_RESERVED4 TSI108_IRQ(18) /* Reserved IRQ */
+#define IRQ_TSI108_HLP TSI108_IRQ(19) /* HLP */
+#define IRQ_TSI108_SDRAM TSI108_IRQ(20) /* SDC */
+#define IRQ_TSI108_PROC_IF TSI108_IRQ(21) /* Processor IF */
+#define IRQ_TSI108_RESERVED5 TSI108_IRQ(22) /* Reserved IRQ */
+#define IRQ_TSI108_PCI TSI108_IRQ(23) /* PCI/X block */
+
+#define IRQ_TSI108_MBOX0 TSI108_IRQ(24) /* Mailbox 0 register */
+#define IRQ_TSI108_MBOX1 TSI108_IRQ(25) /* Mailbox 1 register */
+#define IRQ_TSI108_MBOX2 TSI108_IRQ(26) /* Mailbox 2 register */
+#define IRQ_TSI108_MBOX3 TSI108_IRQ(27) /* Mailbox 3 register */
+
+#define IRQ_TSI108_DBELL0 TSI108_IRQ(28) /* Doorbell 0 */
+#define IRQ_TSI108_DBELL1 TSI108_IRQ(29) /* Doorbell 1 */
+#define IRQ_TSI108_DBELL2 TSI108_IRQ(30) /* Doorbell 2 */
+#define IRQ_TSI108_DBELL3 TSI108_IRQ(31) /* Doorbell 3 */
+
+#define IRQ_TSI108_TIMER0 TSI108_IRQ(32) /* Global Timer 0 */
+#define IRQ_TSI108_TIMER1 TSI108_IRQ(33) /* Global Timer 1 */
+#define IRQ_TSI108_TIMER2 TSI108_IRQ(34) /* Global Timer 2 */
+#define IRQ_TSI108_TIMER3 TSI108_IRQ(35) /* Global Timer 3 */
+
+/*
+ * PCI bus INTA# - INTD# lines demultiplexor
+ */
+#define IRQ_PCI_INTAD_BASE TSI108_IRQ(36)
+#define IRQ_PCI_INTA (IRQ_PCI_INTAD_BASE + 0)
+#define IRQ_PCI_INTB (IRQ_PCI_INTAD_BASE + 1)
+#define IRQ_PCI_INTC (IRQ_PCI_INTAD_BASE + 2)
+#define IRQ_PCI_INTD (IRQ_PCI_INTAD_BASE + 3)
+#define NUM_PCI_IRQS (4)
+
+/* number of entries in vector dispatch table */
+#define IRQ_TSI108_TAB_SIZE (TSI108_MAX_VECTORS + 1)
+
+/* Mapping of MPIC outputs to processors' interrupt pins */
+
+#define IDIR_INT_OUT0 0x1
+#define IDIR_INT_OUT1 0x2
+#define IDIR_INT_OUT2 0x4
+#define IDIR_INT_OUT3 0x8
+
+/*---------------------------------------------------------------
+ * IRQ line configuration parameters */
+
+/* Interrupt delivery modes */
+typedef enum {
+ TSI108_IRQ_DIRECTED,
+ TSI108_IRQ_DISTRIBUTED,
+} TSI108_IRQ_MODE;
+#endif /* _ASM_POWERPC_TSI108_IRQ_H */
diff --git a/arch/powerpc/include/asm/tsi108_pci.h b/arch/powerpc/include/asm/tsi108_pci.h
new file mode 100644
index 000000000..fb6f62669
--- /dev/null
+++ b/arch/powerpc/include/asm/tsi108_pci.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2007 IBM Corp
+ */
+
+#ifndef _ASM_POWERPC_TSI108_PCI_H
+#define _ASM_POWERPC_TSI108_PCI_H
+
+#include <asm/tsi108.h>
+
+/* Register definitions */
+#define TSI108_PCI_P2O_BAR0 (TSI108_PCI_OFFSET + 0x10)
+#define TSI108_PCI_P2O_BAR0_UPPER (TSI108_PCI_OFFSET + 0x14)
+#define TSI108_PCI_P2O_BAR2 (TSI108_PCI_OFFSET + 0x18)
+#define TSI108_PCI_P2O_BAR2_UPPER (TSI108_PCI_OFFSET + 0x1c)
+#define TSI108_PCI_P2O_PAGE_SIZES (TSI108_PCI_OFFSET + 0x4c)
+#define TSI108_PCI_PFAB_BAR0 (TSI108_PCI_OFFSET + 0x204)
+#define TSI108_PCI_PFAB_BAR0_UPPER (TSI108_PCI_OFFSET + 0x208)
+#define TSI108_PCI_PFAB_IO (TSI108_PCI_OFFSET + 0x20c)
+#define TSI108_PCI_PFAB_IO_UPPER (TSI108_PCI_OFFSET + 0x210)
+#define TSI108_PCI_PFAB_MEM32 (TSI108_PCI_OFFSET + 0x214)
+#define TSI108_PCI_PFAB_PFM3 (TSI108_PCI_OFFSET + 0x220)
+#define TSI108_PCI_PFAB_PFM4 (TSI108_PCI_OFFSET + 0x230)
+
+extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary);
+extern void tsi108_pci_int_init(struct device_node *node);
+extern void tsi108_irq_cascade(struct irq_desc *desc);
+extern void tsi108_clear_pci_cfg_error(void);
+
+#endif /* _ASM_POWERPC_TSI108_PCI_H */
diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h
new file mode 100644
index 000000000..93157a661
--- /dev/null
+++ b/arch/powerpc/include/asm/types.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * This file is never included by application software unless
+ * explicitly requested (e.g., via linux/types.h) in which case the
+ * application is Linux specific so (user-) name space pollution is
+ * not a major issue. However, for interoperability, libraries still
+ * need to be careful to avoid a name clashes.
+ */
+#ifndef _ASM_POWERPC_TYPES_H
+#define _ASM_POWERPC_TYPES_H
+
+#include <uapi/asm/types.h>
+
+#ifndef __ASSEMBLY__
+
+typedef __vector128 vector128;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_TYPES_H */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
new file mode 100644
index 000000000..3ddc65c63
--- /dev/null
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -0,0 +1,471 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARCH_POWERPC_UACCESS_H
+#define _ARCH_POWERPC_UACCESS_H
+
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/extable.h>
+#include <asm/kup.h>
+
+#ifdef __powerpc64__
+/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
+#define TASK_SIZE_MAX TASK_SIZE_USER64
+#endif
+
+#include <asm-generic/access_ok.h>
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the ugliness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ *
+ * As we use the same address space for kernel and user data on the
+ * PowerPC, we can just do these as direct assignments. (Of course, the
+ * exception handling means that it's no longer "just"...)
+ *
+ */
+#define __put_user(x, ptr) \
+({ \
+ long __pu_err; \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x); \
+ __typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr)); \
+ \
+ might_fault(); \
+ do { \
+ __label__ __pu_failed; \
+ \
+ allow_write_to_user(__pu_addr, __pu_size); \
+ __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed); \
+ prevent_write_to_user(__pu_addr, __pu_size); \
+ __pu_err = 0; \
+ break; \
+ \
+__pu_failed: \
+ prevent_write_to_user(__pu_addr, __pu_size); \
+ __pu_err = -EFAULT; \
+ } while (0); \
+ \
+ __pu_err; \
+})
+
+#define put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __user *_pu_addr = (ptr); \
+ \
+ access_ok(_pu_addr, sizeof(*(ptr))) ? \
+ __put_user(x, _pu_addr) : -EFAULT; \
+})
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ */
+#define __put_user_asm_goto(x, addr, label, op) \
+ asm_volatile_goto( \
+ "1: " op "%U1%X1 %0,%1 # put_user\n" \
+ EX_TABLE(1b, %l2) \
+ : \
+ : "r" (x), "m<>" (*addr) \
+ : \
+ : label)
+
+#ifdef __powerpc64__
+#define __put_user_asm2_goto(x, ptr, label) \
+ __put_user_asm_goto(x, ptr, label, "std")
+#else /* __powerpc64__ */
+#define __put_user_asm2_goto(x, addr, label) \
+ asm_volatile_goto( \
+ "1: stw%X1 %0, %1\n" \
+ "2: stw%X1 %L0, %L1\n" \
+ EX_TABLE(1b, %l2) \
+ EX_TABLE(2b, %l2) \
+ : \
+ : "r" (x), "m" (*addr) \
+ : \
+ : label)
+#endif /* __powerpc64__ */
+
+#define __put_user_size_goto(x, ptr, size, label) \
+do { \
+ __typeof__(*(ptr)) __user *__pus_addr = (ptr); \
+ \
+ switch (size) { \
+ case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break; \
+ case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break; \
+ case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break; \
+ case 8: __put_user_asm2_goto(x, __pus_addr, label); break; \
+ default: BUILD_BUG(); \
+ } \
+} while (0)
+
+/*
+ * This does an atomic 128 byte aligned load from userspace.
+ * Upto caller to do enable_kernel_vmx() before calling!
+ */
+#define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
+ __asm__ __volatile__( \
+ ".machine push\n" \
+ ".machine altivec\n" \
+ "1: lvx 0,0,%1 # get user\n" \
+ " stvx 0,0,%2 # put kernel\n" \
+ ".machine pop\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: li %0,%3\n" \
+ " b 2b\n" \
+ ".previous\n" \
+ EX_TABLE(1b, 3b) \
+ : "=r" (err) \
+ : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
+
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
+#define __get_user_asm_goto(x, addr, label, op) \
+ asm_volatile_goto( \
+ "1: "op"%U1%X1 %0, %1 # get_user\n" \
+ EX_TABLE(1b, %l2) \
+ : "=r" (x) \
+ : "m<>" (*addr) \
+ : \
+ : label)
+
+#ifdef __powerpc64__
+#define __get_user_asm2_goto(x, addr, label) \
+ __get_user_asm_goto(x, addr, label, "ld")
+#else /* __powerpc64__ */
+#define __get_user_asm2_goto(x, addr, label) \
+ asm_volatile_goto( \
+ "1: lwz%X1 %0, %1\n" \
+ "2: lwz%X1 %L0, %L1\n" \
+ EX_TABLE(1b, %l2) \
+ EX_TABLE(2b, %l2) \
+ : "=&r" (x) \
+ : "m" (*addr) \
+ : \
+ : label)
+#endif /* __powerpc64__ */
+
+#define __get_user_size_goto(x, ptr, size, label) \
+do { \
+ BUILD_BUG_ON(size > sizeof(x)); \
+ switch (size) { \
+ case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break; \
+ case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \
+ case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \
+ case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label); break; \
+ default: x = 0; BUILD_BUG(); \
+ } \
+} while (0)
+
+#define __get_user_size_allowed(x, ptr, size, retval) \
+do { \
+ __label__ __gus_failed; \
+ \
+ __get_user_size_goto(x, ptr, size, __gus_failed); \
+ retval = 0; \
+ break; \
+__gus_failed: \
+ x = 0; \
+ retval = -EFAULT; \
+} while (0)
+
+#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
+#define __get_user_asm(x, addr, err, op) \
+ __asm__ __volatile__( \
+ "1: "op"%U2%X2 %1, %2 # get_user\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: li %0,%3\n" \
+ " li %1,0\n" \
+ " b 2b\n" \
+ ".previous\n" \
+ EX_TABLE(1b, 3b) \
+ : "=r" (err), "=r" (x) \
+ : "m<>" (*addr), "i" (-EFAULT), "0" (err))
+
+#ifdef __powerpc64__
+#define __get_user_asm2(x, addr, err) \
+ __get_user_asm(x, addr, err, "ld")
+#else /* __powerpc64__ */
+#define __get_user_asm2(x, addr, err) \
+ __asm__ __volatile__( \
+ "1: lwz%X2 %1, %2\n" \
+ "2: lwz%X2 %L1, %L2\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: li %0,%3\n" \
+ " li %1,0\n" \
+ " li %1+1,0\n" \
+ " b 3b\n" \
+ ".previous\n" \
+ EX_TABLE(1b, 4b) \
+ EX_TABLE(2b, 4b) \
+ : "=r" (err), "=&r" (x) \
+ : "m" (*addr), "i" (-EFAULT), "0" (err))
+#endif /* __powerpc64__ */
+
+#define __get_user_size_allowed(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ BUILD_BUG_ON(size > sizeof(x)); \
+ switch (size) { \
+ case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \
+ case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \
+ case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \
+ case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \
+ default: x = 0; BUILD_BUG(); \
+ } \
+} while (0)
+
+#define __get_user_size_goto(x, ptr, size, label) \
+do { \
+ long __gus_retval; \
+ \
+ __get_user_size_allowed(x, ptr, size, __gus_retval); \
+ if (__gus_retval) \
+ goto label; \
+} while (0)
+
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
+/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __long_type(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+
+#define __get_user(x, ptr) \
+({ \
+ long __gu_err; \
+ __long_type(*(ptr)) __gu_val; \
+ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ __typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr)); \
+ \
+ might_fault(); \
+ allow_read_from_user(__gu_addr, __gu_size); \
+ __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
+ prevent_read_from_user(__gu_addr, __gu_size); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ \
+ __gu_err; \
+})
+
+#define get_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __user *_gu_addr = (ptr); \
+ \
+ access_ok(_gu_addr, sizeof(*(ptr))) ? \
+ __get_user(x, _gu_addr) : \
+ ((x) = (__force __typeof__(*(ptr)))0, -EFAULT); \
+})
+
+/* more complex routines */
+
+extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+#ifdef __powerpc64__
+static inline unsigned long
+raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
+{
+ unsigned long ret;
+
+ allow_read_write_user(to, from, n);
+ ret = __copy_tofrom_user(to, from, n);
+ prevent_read_write_user(to, from, n);
+ return ret;
+}
+#endif /* __powerpc64__ */
+
+static inline unsigned long raw_copy_from_user(void *to,
+ const void __user *from, unsigned long n)
+{
+ unsigned long ret;
+
+ allow_read_from_user(from, n);
+ ret = __copy_tofrom_user((__force void __user *)to, from, n);
+ prevent_read_from_user(from, n);
+ return ret;
+}
+
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned long ret;
+
+ allow_write_to_user(to, n);
+ ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
+ prevent_write_to_user(to, n);
+ return ret;
+}
+
+unsigned long __arch_clear_user(void __user *addr, unsigned long size);
+
+static inline unsigned long __clear_user(void __user *addr, unsigned long size)
+{
+ unsigned long ret;
+
+ might_fault();
+ allow_write_to_user(addr, size);
+ ret = __arch_clear_user(addr, size);
+ prevent_write_to_user(addr, size);
+ return ret;
+}
+
+static inline unsigned long clear_user(void __user *addr, unsigned long size)
+{
+ return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size;
+}
+
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+unsigned long __must_check
+copy_mc_generic(void *to, const void *from, unsigned long size);
+
+static inline unsigned long __must_check
+copy_mc_to_kernel(void *to, const void *from, unsigned long size)
+{
+ return copy_mc_generic(to, from, size);
+}
+#define copy_mc_to_kernel copy_mc_to_kernel
+
+static inline unsigned long __must_check
+copy_mc_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (check_copy_size(from, n, true)) {
+ if (access_ok(to, n)) {
+ allow_write_to_user(to, n);
+ n = copy_mc_generic((void *)to, from, n);
+ prevent_write_to_user(to, n);
+ }
+ }
+
+ return n;
+}
+#endif
+
+extern long __copy_from_user_flushcache(void *dst, const void __user *src,
+ unsigned size);
+extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
+ size_t len);
+
+static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
+{
+ if (unlikely(!access_ok(ptr, len)))
+ return false;
+
+ might_fault();
+
+ allow_read_write_user((void __user *)ptr, ptr, len);
+ return true;
+}
+#define user_access_begin user_access_begin
+#define user_access_end prevent_current_access_user
+#define user_access_save prevent_user_access_return
+#define user_access_restore restore_user_access
+
+static __must_check inline bool
+user_read_access_begin(const void __user *ptr, size_t len)
+{
+ if (unlikely(!access_ok(ptr, len)))
+ return false;
+
+ might_fault();
+
+ allow_read_from_user(ptr, len);
+ return true;
+}
+#define user_read_access_begin user_read_access_begin
+#define user_read_access_end prevent_current_read_from_user
+
+static __must_check inline bool
+user_write_access_begin(const void __user *ptr, size_t len)
+{
+ if (unlikely(!access_ok(ptr, len)))
+ return false;
+
+ might_fault();
+
+ allow_write_to_user((void __user *)ptr, len);
+ return true;
+}
+#define user_write_access_begin user_write_access_begin
+#define user_write_access_end prevent_current_write_to_user
+
+#define unsafe_get_user(x, p, e) do { \
+ __long_type(*(p)) __gu_val; \
+ __typeof__(*(p)) __user *__gu_addr = (p); \
+ \
+ __get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \
+ (x) = (__typeof__(*(p)))__gu_val; \
+} while (0)
+
+#define unsafe_put_user(x, p, e) \
+ __put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
+
+#define unsafe_copy_from_user(d, s, l, e) \
+do { \
+ u8 *_dst = (u8 *)(d); \
+ const u8 __user *_src = (const u8 __user *)(s); \
+ size_t _len = (l); \
+ int _i; \
+ \
+ for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
+ unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e); \
+ if (_len & 4) { \
+ unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e); \
+ _i += 4; \
+ } \
+ if (_len & 2) { \
+ unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e); \
+ _i += 2; \
+ } \
+ if (_len & 1) \
+ unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e); \
+} while (0)
+
+#define unsafe_copy_to_user(d, s, l, e) \
+do { \
+ u8 __user *_dst = (u8 __user *)(d); \
+ const u8 *_src = (const u8 *)(s); \
+ size_t _len = (l); \
+ int _i; \
+ \
+ for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
+ unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \
+ if (_len & 4) { \
+ unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
+ _i += 4; \
+ } \
+ if (_len & 2) { \
+ unsafe_put_user(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \
+ _i += 2; \
+ } \
+ if (_len & 1) \
+ unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
+} while (0)
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+ __get_user_size_goto(*((type *)(dst)), \
+ (__force type __user *)(src), sizeof(type), err_label)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+ __put_user_size_goto(*((type *)(src)), \
+ (__force type __user *)(dst), sizeof(type), err_label)
+
+#endif /* _ARCH_POWERPC_UACCESS_H */
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
new file mode 100644
index 000000000..b1f094728
--- /dev/null
+++ b/arch/powerpc/include/asm/udbg.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * (c) 2001, 2006 IBM Corporation.
+ */
+
+#ifndef _ASM_POWERPC_UDBG_H
+#define _ASM_POWERPC_UDBG_H
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/init.h>
+
+extern void (*udbg_putc)(char c);
+extern void (*udbg_flush)(void);
+extern int (*udbg_getc)(void);
+extern int (*udbg_getc_poll)(void);
+
+void udbg_puts(const char *s);
+int udbg_write(const char *s, int n);
+
+void register_early_udbg_console(void);
+void udbg_printf(const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+void udbg_progress(char *s, unsigned short hex);
+
+void __init udbg_uart_init_mmio(void __iomem *addr, unsigned int stride);
+void __init udbg_uart_init_pio(unsigned long port, unsigned int stride);
+
+void __init udbg_uart_setup(unsigned int speed, unsigned int clock);
+unsigned int __init udbg_probe_uart_speed(unsigned int clock);
+
+struct device_node;
+void __init udbg_scc_init(int force_scc);
+int udbg_adb_init(int force_btext);
+void udbg_adb_init_early(void);
+
+void __init udbg_early_init(void);
+void __init udbg_init_debug_lpar(void);
+void __init udbg_init_debug_lpar_hvsi(void);
+void __init udbg_init_pmac_realmode(void);
+void __init udbg_init_maple_realmode(void);
+void __init udbg_init_pas_realmode(void);
+void __init udbg_init_rtas_panel(void);
+void __init udbg_init_rtas_console(void);
+void __init udbg_init_btext(void);
+void __init udbg_init_44x_as1(void);
+void __init udbg_init_40x_realmode(void);
+void __init udbg_init_cpm(void);
+void __init udbg_init_usbgecko(void);
+void __init udbg_init_memcons(void);
+void __init udbg_init_ehv_bc(void);
+void __init udbg_init_ps3gelic(void);
+void __init udbg_init_debug_opal_raw(void);
+void __init udbg_init_debug_opal_hvsi(void);
+void __init udbg_init_debug_16550(void);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_UDBG_H */
diff --git a/arch/powerpc/include/asm/uic.h b/arch/powerpc/include/asm/uic.h
new file mode 100644
index 000000000..7b7bd15b1
--- /dev/null
+++ b/arch/powerpc/include/asm/uic.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * IBM PPC4xx UIC external definitions and structure.
+ *
+ * Maintainer: David Gibson <dwg@au1.ibm.com>
+ * Copyright 2007 IBM Corporation.
+ */
+#ifndef _ASM_POWERPC_UIC_H
+#define _ASM_POWERPC_UIC_H
+
+#ifdef __KERNEL__
+
+extern void __init uic_init_tree(void);
+extern unsigned int uic_get_irq(void);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_UIC_H */
diff --git a/arch/powerpc/include/asm/ultravisor-api.h b/arch/powerpc/include/asm/ultravisor-api.h
new file mode 100644
index 000000000..b66f6db7b
--- /dev/null
+++ b/arch/powerpc/include/asm/ultravisor-api.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Ultravisor API.
+ *
+ * Copyright 2019, IBM Corporation.
+ *
+ */
+#ifndef _ASM_POWERPC_ULTRAVISOR_API_H
+#define _ASM_POWERPC_ULTRAVISOR_API_H
+
+#include <asm/hvcall.h>
+
+/* Return codes */
+#define U_BUSY H_BUSY
+#define U_FUNCTION H_FUNCTION
+#define U_NOT_AVAILABLE H_NOT_AVAILABLE
+#define U_P2 H_P2
+#define U_P3 H_P3
+#define U_P4 H_P4
+#define U_P5 H_P5
+#define U_PARAMETER H_PARAMETER
+#define U_PERMISSION H_PERMISSION
+#define U_SUCCESS H_SUCCESS
+
+/* opcodes */
+#define UV_WRITE_PATE 0xF104
+#define UV_RETURN 0xF11C
+#define UV_ESM 0xF110
+#define UV_REGISTER_MEM_SLOT 0xF120
+#define UV_UNREGISTER_MEM_SLOT 0xF124
+#define UV_PAGE_IN 0xF128
+#define UV_PAGE_OUT 0xF12C
+#define UV_SHARE_PAGE 0xF130
+#define UV_UNSHARE_PAGE 0xF134
+#define UV_UNSHARE_ALL_PAGES 0xF140
+#define UV_PAGE_INVAL 0xF138
+#define UV_SVM_TERMINATE 0xF13C
+
+#endif /* _ASM_POWERPC_ULTRAVISOR_API_H */
diff --git a/arch/powerpc/include/asm/ultravisor.h b/arch/powerpc/include/asm/ultravisor.h
new file mode 100644
index 000000000..790b0e636
--- /dev/null
+++ b/arch/powerpc/include/asm/ultravisor.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Ultravisor definitions
+ *
+ * Copyright 2019, IBM Corporation.
+ *
+ */
+#ifndef _ASM_POWERPC_ULTRAVISOR_H
+#define _ASM_POWERPC_ULTRAVISOR_H
+
+#include <asm/asm-prototypes.h>
+#include <asm/ultravisor-api.h>
+#include <asm/firmware.h>
+
+int early_init_dt_scan_ultravisor(unsigned long node, const char *uname,
+ int depth, void *data);
+
+/*
+ * In ultravisor enabled systems, PTCR becomes ultravisor privileged only for
+ * writing and an attempt to write to it will cause a Hypervisor Emulation
+ * Assistance interrupt.
+ */
+static inline void set_ptcr_when_no_uv(u64 val)
+{
+ if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR))
+ mtspr(SPRN_PTCR, val);
+}
+
+static inline int uv_register_pate(u64 lpid, u64 dw0, u64 dw1)
+{
+ return ucall_norets(UV_WRITE_PATE, lpid, dw0, dw1);
+}
+
+static inline int uv_share_page(u64 pfn, u64 npages)
+{
+ return ucall_norets(UV_SHARE_PAGE, pfn, npages);
+}
+
+static inline int uv_unshare_page(u64 pfn, u64 npages)
+{
+ return ucall_norets(UV_UNSHARE_PAGE, pfn, npages);
+}
+
+static inline int uv_unshare_all_pages(void)
+{
+ return ucall_norets(UV_UNSHARE_ALL_PAGES);
+}
+
+static inline int uv_page_in(u64 lpid, u64 src_ra, u64 dst_gpa, u64 flags,
+ u64 page_shift)
+{
+ return ucall_norets(UV_PAGE_IN, lpid, src_ra, dst_gpa, flags,
+ page_shift);
+}
+
+static inline int uv_page_out(u64 lpid, u64 dst_ra, u64 src_gpa, u64 flags,
+ u64 page_shift)
+{
+ return ucall_norets(UV_PAGE_OUT, lpid, dst_ra, src_gpa, flags,
+ page_shift);
+}
+
+static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size,
+ u64 flags, u64 slotid)
+{
+ return ucall_norets(UV_REGISTER_MEM_SLOT, lpid, start_gpa,
+ size, flags, slotid);
+}
+
+static inline int uv_unregister_mem_slot(u64 lpid, u64 slotid)
+{
+ return ucall_norets(UV_UNREGISTER_MEM_SLOT, lpid, slotid);
+}
+
+static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift)
+{
+ return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift);
+}
+
+static inline int uv_svm_terminate(u64 lpid)
+{
+ return ucall_norets(UV_SVM_TERMINATE, lpid);
+}
+
+#endif /* _ASM_POWERPC_ULTRAVISOR_H */
diff --git a/arch/powerpc/include/asm/uninorth.h b/arch/powerpc/include/asm/uninorth.h
new file mode 100644
index 000000000..e278299b9
--- /dev/null
+++ b/arch/powerpc/include/asm/uninorth.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * uninorth.h: definitions for using the "UniNorth" host bridge chip
+ * from Apple. This chip is used on "Core99" machines
+ * This also includes U2 used on more recent MacRISC2/3
+ * machines and U3 (G5)
+ *
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_UNINORTH_H__
+#define __ASM_UNINORTH_H__
+
+/*
+ * Uni-N and U3 config space reg. definitions
+ *
+ * (Little endian)
+ */
+
+/* Address ranges selection. This one should work with Bandit too */
+/* Not U3 */
+#define UNI_N_ADDR_SELECT 0x48
+#define UNI_N_ADDR_COARSE_MASK 0xffff0000 /* 256Mb regions at *0000000 */
+#define UNI_N_ADDR_FINE_MASK 0x0000ffff /* 16Mb regions at f*000000 */
+
+/* AGP registers */
+/* Not U3 */
+#define UNI_N_CFG_GART_BASE 0x8c
+#define UNI_N_CFG_AGP_BASE 0x90
+#define UNI_N_CFG_GART_CTRL 0x94
+#define UNI_N_CFG_INTERNAL_STATUS 0x98
+#define UNI_N_CFG_GART_DUMMY_PAGE 0xa4
+
+/* UNI_N_CFG_GART_CTRL bits definitions */
+#define UNI_N_CFG_GART_INVAL 0x00000001
+#define UNI_N_CFG_GART_ENABLE 0x00000100
+#define UNI_N_CFG_GART_2xRESET 0x00010000
+#define UNI_N_CFG_GART_DISSBADET 0x00020000
+/* The following seems to only be used only on U3 <j.glisse@gmail.com> */
+#define U3_N_CFG_GART_SYNCMODE 0x00040000
+#define U3_N_CFG_GART_PERFRD 0x00080000
+#define U3_N_CFG_GART_B2BGNT 0x00200000
+#define U3_N_CFG_GART_FASTDDR 0x00400000
+
+/* My understanding of UniNorth AGP as of UniNorth rev 1.0x,
+ * revision 1.5 (x4 AGP) may need further changes.
+ *
+ * AGP_BASE register contains the base address of the AGP aperture on
+ * the AGP bus. It doesn't seem to be visible to the CPU as of UniNorth 1.x,
+ * even if decoding of this address range is enabled in the address select
+ * register. Apparently, the only supported bases are 256Mb multiples
+ * (high 4 bits of that register).
+ *
+ * GART_BASE register appear to contain the physical address of the GART
+ * in system memory in the high address bits (page aligned), and the
+ * GART size in the low order bits (number of GART pages)
+ *
+ * The GART format itself is one 32bits word per physical memory page.
+ * This word contains, in little-endian format (!!!), the physical address
+ * of the page in the high bits, and what appears to be an "enable" bit
+ * in the LSB bit (0) that must be set to 1 when the entry is valid.
+ *
+ * Obviously, the GART is not cache coherent and so any change to it
+ * must be flushed to memory (or maybe just make the GART space non
+ * cachable). AGP memory itself doesn't seem to be cache coherent neither.
+ *
+ * In order to invalidate the GART (which is probably necessary to inval
+ * the bridge internal TLBs), the following sequence has to be written,
+ * in order, to the GART_CTRL register:
+ *
+ * UNI_N_CFG_GART_ENABLE | UNI_N_CFG_GART_INVAL
+ * UNI_N_CFG_GART_ENABLE
+ * UNI_N_CFG_GART_ENABLE | UNI_N_CFG_GART_2xRESET
+ * UNI_N_CFG_GART_ENABLE
+ *
+ * As far as AGP "features" are concerned, it looks like fast write may
+ * not be supported but this has to be confirmed.
+ *
+ * Turning on AGP seem to require a double invalidate operation, one before
+ * setting the AGP command register, on after.
+ *
+ * Turning off AGP seems to require the following sequence: first wait
+ * for the AGP to be idle by reading the internal status register, then
+ * write in that order to the GART_CTRL register:
+ *
+ * UNI_N_CFG_GART_ENABLE | UNI_N_CFG_GART_INVAL
+ * 0
+ * UNI_N_CFG_GART_2xRESET
+ * 0
+ */
+
+/*
+ * Uni-N memory mapped reg. definitions
+ *
+ * Those registers are Big-Endian !!
+ *
+ * Their meaning come from either Darwin and/or from experiments I made with
+ * the bootrom, I'm not sure about their exact meaning yet
+ *
+ */
+
+/* Version of the UniNorth chip */
+#define UNI_N_VERSION 0x0000 /* Known versions: 3,7 and 8 */
+
+#define UNI_N_VERSION_107 0x0003 /* 1.0.7 */
+#define UNI_N_VERSION_10A 0x0007 /* 1.0.10 */
+#define UNI_N_VERSION_150 0x0011 /* 1.5 */
+#define UNI_N_VERSION_200 0x0024 /* 2.0 */
+#define UNI_N_VERSION_PANGEA 0x00C0 /* Integrated U1 + K */
+#define UNI_N_VERSION_INTREPID 0x00D2 /* Integrated U2 + K */
+#define UNI_N_VERSION_300 0x0030 /* 3.0 (U3 on G5) */
+
+/* This register is used to enable/disable various clocks */
+#define UNI_N_CLOCK_CNTL 0x0020
+#define UNI_N_CLOCK_CNTL_PCI 0x00000001 /* PCI2 clock control */
+#define UNI_N_CLOCK_CNTL_GMAC 0x00000002 /* GMAC clock control */
+#define UNI_N_CLOCK_CNTL_FW 0x00000004 /* FireWire clock control */
+#define UNI_N_CLOCK_CNTL_ATA100 0x00000010 /* ATA-100 clock control (U2) */
+
+/* Power Management control */
+#define UNI_N_POWER_MGT 0x0030
+#define UNI_N_POWER_MGT_NORMAL 0x00
+#define UNI_N_POWER_MGT_IDLE2 0x01
+#define UNI_N_POWER_MGT_SLEEP 0x02
+
+/* This register is configured by Darwin depending on the UniN
+ * revision
+ */
+#define UNI_N_ARB_CTRL 0x0040
+#define UNI_N_ARB_CTRL_QACK_DELAY_SHIFT 15
+#define UNI_N_ARB_CTRL_QACK_DELAY_MASK 0x0e1f8000
+#define UNI_N_ARB_CTRL_QACK_DELAY 0x30
+#define UNI_N_ARB_CTRL_QACK_DELAY105 0x00
+
+/* This one _might_ return the CPU number of the CPU reading it;
+ * the bootROM decides whether to boot or to sleep/spinloop depending
+ * on this register being 0 or not
+ */
+#define UNI_N_CPU_NUMBER 0x0050
+
+/* This register appear to be read by the bootROM to decide what
+ * to do on a non-recoverable reset (powerup or wakeup)
+ */
+#define UNI_N_HWINIT_STATE 0x0070
+#define UNI_N_HWINIT_STATE_SLEEPING 0x01
+#define UNI_N_HWINIT_STATE_RUNNING 0x02
+/* This last bit appear to be used by the bootROM to know the second
+ * CPU has started and will enter it's sleep loop with IP=0
+ */
+#define UNI_N_HWINIT_STATE_CPU1_FLAG 0x10000000
+
+/* This register controls AACK delay, which is set when 2004 iBook/PowerBook
+ * is in low speed mode.
+ */
+#define UNI_N_AACK_DELAY 0x0100
+#define UNI_N_AACK_DELAY_ENABLE 0x00000001
+
+/* Clock status for Intrepid */
+#define UNI_N_CLOCK_STOP_STATUS0 0x0150
+#define UNI_N_CLOCK_STOPPED_EXTAGP 0x00200000
+#define UNI_N_CLOCK_STOPPED_AGPDEL 0x00100000
+#define UNI_N_CLOCK_STOPPED_I2S0_45_49 0x00080000
+#define UNI_N_CLOCK_STOPPED_I2S0_18 0x00040000
+#define UNI_N_CLOCK_STOPPED_I2S1_45_49 0x00020000
+#define UNI_N_CLOCK_STOPPED_I2S1_18 0x00010000
+#define UNI_N_CLOCK_STOPPED_TIMER 0x00008000
+#define UNI_N_CLOCK_STOPPED_SCC_RTCLK18 0x00004000
+#define UNI_N_CLOCK_STOPPED_SCC_RTCLK32 0x00002000
+#define UNI_N_CLOCK_STOPPED_SCC_VIA32 0x00001000
+#define UNI_N_CLOCK_STOPPED_SCC_SLOT0 0x00000800
+#define UNI_N_CLOCK_STOPPED_SCC_SLOT1 0x00000400
+#define UNI_N_CLOCK_STOPPED_SCC_SLOT2 0x00000200
+#define UNI_N_CLOCK_STOPPED_PCI_FBCLKO 0x00000100
+#define UNI_N_CLOCK_STOPPED_VEO0 0x00000080
+#define UNI_N_CLOCK_STOPPED_VEO1 0x00000040
+#define UNI_N_CLOCK_STOPPED_USB0 0x00000020
+#define UNI_N_CLOCK_STOPPED_USB1 0x00000010
+#define UNI_N_CLOCK_STOPPED_USB2 0x00000008
+#define UNI_N_CLOCK_STOPPED_32 0x00000004
+#define UNI_N_CLOCK_STOPPED_45 0x00000002
+#define UNI_N_CLOCK_STOPPED_49 0x00000001
+
+#define UNI_N_CLOCK_STOP_STATUS1 0x0160
+#define UNI_N_CLOCK_STOPPED_PLL4REF 0x00080000
+#define UNI_N_CLOCK_STOPPED_CPUDEL 0x00040000
+#define UNI_N_CLOCK_STOPPED_CPU 0x00020000
+#define UNI_N_CLOCK_STOPPED_BUF_REFCKO 0x00010000
+#define UNI_N_CLOCK_STOPPED_PCI2 0x00008000
+#define UNI_N_CLOCK_STOPPED_FW 0x00004000
+#define UNI_N_CLOCK_STOPPED_GB 0x00002000
+#define UNI_N_CLOCK_STOPPED_ATA66 0x00001000
+#define UNI_N_CLOCK_STOPPED_ATA100 0x00000800
+#define UNI_N_CLOCK_STOPPED_MAX 0x00000400
+#define UNI_N_CLOCK_STOPPED_PCI1 0x00000200
+#define UNI_N_CLOCK_STOPPED_KLPCI 0x00000100
+#define UNI_N_CLOCK_STOPPED_USB0PCI 0x00000080
+#define UNI_N_CLOCK_STOPPED_USB1PCI 0x00000040
+#define UNI_N_CLOCK_STOPPED_USB2PCI 0x00000020
+#define UNI_N_CLOCK_STOPPED_7PCI1 0x00000008
+#define UNI_N_CLOCK_STOPPED_AGP 0x00000004
+#define UNI_N_CLOCK_STOPPED_PCI0 0x00000002
+#define UNI_N_CLOCK_STOPPED_18 0x00000001
+
+/* Intrepid registe to OF do-platform-clockspreading */
+#define UNI_N_CLOCK_SPREADING 0x190
+
+/* Uninorth 1.5 rev. has additional perf. monitor registers at 0xf00-0xf50 */
+
+
+/*
+ * U3 specific registers
+ */
+
+
+/* U3 Toggle */
+#define U3_TOGGLE_REG 0x00e0
+#define U3_PMC_START_STOP 0x0001
+#define U3_MPIC_RESET 0x0002
+#define U3_MPIC_OUTPUT_ENABLE 0x0004
+
+/* U3 API PHY Config 1 */
+#define U3_API_PHY_CONFIG_1 0x23030
+
+/* U3 HyperTransport registers */
+#define U3_HT_CONFIG_BASE 0x70000
+#define U3_HT_LINK_COMMAND 0x100
+#define U3_HT_LINK_CONFIG 0x110
+#define U3_HT_LINK_FREQ 0x120
+
+#endif /* __ASM_UNINORTH_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
new file mode 100644
index 000000000..659a996c7
--- /dev/null
+++ b/arch/powerpc/include/asm/unistd.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * This file contains the system call numbers.
+ */
+#ifndef _ASM_POWERPC_UNISTD_H_
+#define _ASM_POWERPC_UNISTD_H_
+
+#include <uapi/asm/unistd.h>
+
+#define NR_syscalls __NR_syscalls
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_IPC
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_SIGNAL
+#define __ARCH_WANT_SYS_TIME32
+#define __ARCH_WANT_SYS_UTIME32
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_OLD_GETRLIMIT
+#define __ARCH_WANT_SYS_OLD_UNAME
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#ifdef CONFIG_PPC32
+#define __ARCH_WANT_OLD_STAT
+#define __ARCH_WANT_SYS_OLD_SELECT
+#endif
+#ifdef CONFIG_PPC64
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_NEWFSTATAT
+#define __ARCH_WANT_COMPAT_STAT
+#define __ARCH_WANT_COMPAT_FALLOCATE
+#define __ARCH_WANT_COMPAT_SYS_SENDFILE
+#endif
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
new file mode 100644
index 000000000..4fea116d3
--- /dev/null
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+/*
+ * User-space Probes (UProbes) for powerpc
+ *
+ * Copyright IBM Corporation, 2007-2012
+ *
+ * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+ */
+
+#include <linux/notifier.h>
+#include <asm/probes.h>
+
+typedef u32 uprobe_opcode_t;
+
+#define MAX_UINSN_BYTES 8
+#define UPROBE_XOL_SLOT_BYTES (MAX_UINSN_BYTES)
+
+/* The following alias is needed for reference from arch-agnostic code */
+#define UPROBE_SWBP_INSN BREAKPOINT_INSTRUCTION
+#define UPROBE_SWBP_INSN_SIZE 4 /* swbp insn size in bytes */
+
+struct arch_uprobe {
+ union {
+ u32 insn[2];
+ u32 ixol[2];
+ };
+};
+
+struct arch_uprobe_task {
+ unsigned long saved_trap_nr;
+};
+
+#endif /* _ASM_UPROBES_H */
diff --git a/arch/powerpc/include/asm/user.h b/arch/powerpc/include/asm/user.h
new file mode 100644
index 000000000..7fae7e597
--- /dev/null
+++ b/arch/powerpc/include/asm/user.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_USER_H
+#define _ASM_POWERPC_USER_H
+
+#include <asm/ptrace.h>
+#include <asm/page.h>
+
+/*
+ * Adapted from <asm-alpha/user.h>
+ *
+ * Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the `trad-core' bfd, NOT the osf-core). The file contents
+ * are as follows:
+ *
+ * upage: 1 page consisting of a user struct that tells gdb
+ * what is present in the file. Directly after this is a
+ * copy of the task_struct, which is currently not used by gdb,
+ * but it may come in handy at some point. All of the registers
+ * are stored as part of the upage. The upage should always be
+ * only one page long.
+ * data: The data segment follows next. We use current->end_text to
+ * current->brk to pick up all of the user variables, plus any memory
+ * that may have been sbrk'ed. No attempt is made to determine if a
+ * page is demand-zero or if a page is totally unused, we just cover
+ * the entire range. All of the addresses are rounded in such a way
+ * that an integral number of pages is written.
+ * stack: We need the stack information in order to get a meaningful
+ * backtrace. We need to write the data from usp to
+ * current->start_stack, so we round each of these in order to be able
+ * to write an integer number of pages.
+ */
+struct user {
+ struct user_pt_regs regs; /* entire machine state */
+ size_t u_tsize; /* text size (pages) */
+ size_t u_dsize; /* data size (pages) */
+ size_t u_ssize; /* stack size (pages) */
+ unsigned long start_code; /* text starting address */
+ unsigned long start_data; /* data starting address */
+ unsigned long start_stack; /* stack starting address */
+ long int signal; /* signal causing core dump */
+ unsigned long u_ar0; /* help gdb find registers */
+ unsigned long magic; /* identifies a core file */
+ char u_comm[32]; /* user command name */
+};
+
+#endif /* _ASM_POWERPC_USER_H */
diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h
new file mode 100644
index 000000000..c36f71e01
--- /dev/null
+++ b/arch/powerpc/include/asm/vas.h
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-17 IBM Corp.
+ */
+
+#ifndef _ASM_POWERPC_VAS_H
+#define _ASM_POWERPC_VAS_H
+#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
+#include <asm/icswx.h>
+#include <uapi/asm/vas-api.h>
+
+/*
+ * Min and max FIFO sizes are based on Version 1.05 Section 3.1.4.25
+ * (Local FIFO Size Register) of the VAS workbook.
+ */
+#define VAS_RX_FIFO_SIZE_MIN (1 << 10) /* 1KB */
+#define VAS_RX_FIFO_SIZE_MAX (8 << 20) /* 8MB */
+
+/*
+ * Threshold Control Mode: Have paste operation fail if the number of
+ * requests in receive FIFO exceeds a threshold.
+ *
+ * NOTE: No special error code yet if paste is rejected because of these
+ * limits. So users can't distinguish between this and other errors.
+ */
+#define VAS_THRESH_DISABLED 0
+#define VAS_THRESH_FIFO_GT_HALF_FULL 1
+#define VAS_THRESH_FIFO_GT_QTR_FULL 2
+#define VAS_THRESH_FIFO_GT_EIGHTH_FULL 3
+
+/*
+ * VAS window Linux status bits
+ */
+#define VAS_WIN_ACTIVE 0x0 /* Used in platform independent */
+ /* vas mmap() */
+/* Window is closed in the hypervisor due to lost credit */
+#define VAS_WIN_NO_CRED_CLOSE 0x00000001
+/* Window is closed due to migration */
+#define VAS_WIN_MIGRATE_CLOSE 0x00000002
+
+/*
+ * Get/Set bit fields
+ */
+#define GET_FIELD(m, v) (((v) & (m)) >> MASK_LSH(m))
+#define MASK_LSH(m) (__builtin_ffsl(m) - 1)
+#define SET_FIELD(m, v, val) \
+ (((v) & ~(m)) | ((((typeof(v))(val)) << MASK_LSH(m)) & (m)))
+
+/*
+ * Co-processor Engine type.
+ */
+enum vas_cop_type {
+ VAS_COP_TYPE_FAULT,
+ VAS_COP_TYPE_842,
+ VAS_COP_TYPE_842_HIPRI,
+ VAS_COP_TYPE_GZIP,
+ VAS_COP_TYPE_GZIP_HIPRI,
+ VAS_COP_TYPE_FTW,
+ VAS_COP_TYPE_MAX,
+};
+
+/*
+ * User space VAS windows are opened by tasks and take references
+ * to pid and mm until windows are closed.
+ * Stores pid, mm, and tgid for each window.
+ */
+struct vas_user_win_ref {
+ struct pid *pid; /* PID of owner */
+ struct pid *tgid; /* Thread group ID of owner */
+ struct mm_struct *mm; /* Linux process mm_struct */
+ struct mutex mmap_mutex; /* protects paste address mmap() */
+ /* with DLPAR close/open windows */
+ struct vm_area_struct *vma; /* Save VMA and used in DLPAR ops */
+};
+
+/*
+ * Common VAS window struct on PowerNV and PowerVM
+ */
+struct vas_window {
+ u32 winid;
+ u32 wcreds_max; /* Window credits */
+ u32 status; /* Window status used in OS */
+ enum vas_cop_type cop;
+ struct vas_user_win_ref task_ref;
+ char *dbgname;
+ struct dentry *dbgdir;
+};
+
+/*
+ * User space window operations used for powernv and powerVM
+ */
+struct vas_user_win_ops {
+ struct vas_window * (*open_win)(int vas_id, u64 flags,
+ enum vas_cop_type);
+ u64 (*paste_addr)(struct vas_window *);
+ int (*close_win)(struct vas_window *);
+};
+
+static inline void put_vas_user_win_ref(struct vas_user_win_ref *ref)
+{
+ /* Drop references to pid, tgid, and mm */
+ put_pid(ref->pid);
+ put_pid(ref->tgid);
+ if (ref->mm)
+ mmdrop(ref->mm);
+}
+
+static inline void vas_user_win_add_mm_context(struct vas_user_win_ref *ref)
+{
+ mm_context_add_vas_window(ref->mm);
+ /*
+ * Even a process that has no foreign real address mapping can
+ * use an unpaired COPY instruction (to no real effect). Issue
+ * CP_ABORT to clear any pending COPY and prevent a covert
+ * channel.
+ *
+ * __switch_to() will issue CP_ABORT on future context switches
+ * if process / thread has any open VAS window (Use
+ * current->mm->context.vas_windows).
+ */
+ asm volatile(PPC_CP_ABORT);
+}
+
+/*
+ * Receive window attributes specified by the (in-kernel) owner of window.
+ */
+struct vas_rx_win_attr {
+ u64 rx_fifo;
+ int rx_fifo_size;
+ int wcreds_max;
+
+ bool pin_win;
+ bool rej_no_credit;
+ bool tx_wcred_mode;
+ bool rx_wcred_mode;
+ bool tx_win_ord_mode;
+ bool rx_win_ord_mode;
+ bool data_stamp;
+ bool nx_win;
+ bool fault_win;
+ bool user_win;
+ bool notify_disable;
+ bool intr_disable;
+ bool notify_early;
+
+ int lnotify_lpid;
+ int lnotify_pid;
+ int lnotify_tid;
+ u32 pswid;
+
+ int tc_mode;
+};
+
+/*
+ * Window attributes specified by the in-kernel owner of a send window.
+ */
+struct vas_tx_win_attr {
+ enum vas_cop_type cop;
+ int wcreds_max;
+ int lpid;
+ int pidr; /* hardware PID (from SPRN_PID) */
+ int pswid;
+ int rsvd_txbuf_count;
+ int tc_mode;
+
+ bool user_win;
+ bool pin_win;
+ bool rej_no_credit;
+ bool rsvd_txbuf_enable;
+ bool tx_wcred_mode;
+ bool rx_wcred_mode;
+ bool tx_win_ord_mode;
+ bool rx_win_ord_mode;
+};
+
+#ifdef CONFIG_PPC_POWERNV
+/*
+ * Helper to map a chip id to VAS id.
+ * For POWER9, this is a 1:1 mapping. In the future this maybe a 1:N
+ * mapping in which case, we will need to update this helper.
+ *
+ * Return the VAS id or -1 if no matching vasid is found.
+ */
+int chip_to_vas_id(int chipid);
+
+/*
+ * Helper to initialize receive window attributes to defaults for an
+ * NX window.
+ */
+void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop);
+
+/*
+ * Open a VAS receive window for the instance of VAS identified by @vasid
+ * Use @attr to initialize the attributes of the window.
+ *
+ * Return a handle to the window or ERR_PTR() on error.
+ */
+struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
+ struct vas_rx_win_attr *attr);
+
+/*
+ * Helper to initialize send window attributes to defaults for an NX window.
+ */
+extern void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr,
+ enum vas_cop_type cop);
+
+/*
+ * Open a VAS send window for the instance of VAS identified by @vasid
+ * and the co-processor type @cop. Use @attr to initialize attributes
+ * of the window.
+ *
+ * Note: The instance of VAS must already have an open receive window for
+ * the coprocessor type @cop.
+ *
+ * Return a handle to the send window or ERR_PTR() on error.
+ */
+struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
+ struct vas_tx_win_attr *attr);
+
+/*
+ * Close the send or receive window identified by @win. For receive windows
+ * return -EAGAIN if there are active send windows attached to this receive
+ * window.
+ */
+int vas_win_close(struct vas_window *win);
+
+/*
+ * Copy the co-processor request block (CRB) @crb into the local L2 cache.
+ */
+int vas_copy_crb(void *crb, int offset);
+
+/*
+ * Paste a previously copied CRB (see vas_copy_crb()) from the L2 cache to
+ * the hardware address associated with the window @win. @re is expected/
+ * assumed to be true for NX windows.
+ */
+int vas_paste_crb(struct vas_window *win, int offset, bool re);
+
+int vas_register_api_powernv(struct module *mod, enum vas_cop_type cop_type,
+ const char *name);
+void vas_unregister_api_powernv(void);
+#endif
+
+#ifdef CONFIG_PPC_PSERIES
+
+/* VAS Capabilities */
+#define VAS_GZIP_QOS_FEAT 0x1
+#define VAS_GZIP_DEF_FEAT 0x2
+#define VAS_GZIP_QOS_FEAT_BIT PPC_BIT(VAS_GZIP_QOS_FEAT) /* Bit 1 */
+#define VAS_GZIP_DEF_FEAT_BIT PPC_BIT(VAS_GZIP_DEF_FEAT) /* Bit 2 */
+
+/* NX Capabilities */
+#define VAS_NX_GZIP_FEAT 0x1
+#define VAS_NX_GZIP_FEAT_BIT PPC_BIT(VAS_NX_GZIP_FEAT) /* Bit 1 */
+
+/*
+ * These structs are used to retrieve overall VAS capabilities that
+ * the hypervisor provides.
+ */
+struct hv_vas_all_caps {
+ __be64 descriptor;
+ __be64 feat_type;
+} __packed __aligned(0x1000);
+
+struct vas_all_caps {
+ u64 descriptor;
+ u64 feat_type;
+};
+
+int h_query_vas_capabilities(const u64 hcall, u8 query_type, u64 result);
+int vas_register_api_pseries(struct module *mod,
+ enum vas_cop_type cop_type, const char *name);
+void vas_unregister_api_pseries(void);
+#endif
+
+/*
+ * Register / unregister coprocessor type to VAS API which will be exported
+ * to user space. Applications can use this API to open / close window
+ * which can be used to send / receive requests directly to cooprcessor.
+ *
+ * Only NX GZIP coprocessor type is supported now, but this API can be
+ * used for others in future.
+ */
+int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
+ const char *name,
+ const struct vas_user_win_ops *vops);
+void vas_unregister_coproc_api(void);
+
+int get_vas_user_win_ref(struct vas_user_win_ref *task_ref);
+void vas_update_csb(struct coprocessor_request_block *crb,
+ struct vas_user_win_ref *task_ref);
+void vas_dump_crb(struct coprocessor_request_block *crb);
+#endif /* __ASM_POWERPC_VAS_H */
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
new file mode 100644
index 000000000..7650b6ce1
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_VDSO_H
+#define _ASM_POWERPC_VDSO_H
+
+#define VDSO_VERSION_STRING LINUX_2.6.15
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PPC64
+#include <generated/vdso64-offsets.h>
+#endif
+
+#ifdef CONFIG_VDSO32
+#include <generated/vdso32-offsets.h>
+#endif
+
+#define VDSO64_SYMBOL(base, name) ((unsigned long)(base) + (vdso64_offset_##name))
+
+#define VDSO32_SYMBOL(base, name) ((unsigned long)(base) + (vdso32_offset_##name))
+
+int vdso_getcpu_init(void);
+
+#else /* __ASSEMBLY__ */
+
+#ifdef __VDSO64__
+#define V_FUNCTION_BEGIN(name) \
+ .globl name; \
+ name: \
+
+#define V_FUNCTION_END(name) \
+ .size name,.-name;
+
+#define V_LOCAL_FUNC(name) (name)
+#endif /* __VDSO64__ */
+
+#ifdef __VDSO32__
+
+#define V_FUNCTION_BEGIN(name) \
+ .globl name; \
+ .type name,@function; \
+ name: \
+
+#define V_FUNCTION_END(name) \
+ .size name,.-name;
+
+#define V_LOCAL_FUNC(name) (name)
+
+#endif /* __VDSO32__ */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_VDSO_H */
diff --git a/arch/powerpc/include/asm/vdso/clocksource.h b/arch/powerpc/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000..c1ba56b82
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso/clocksource.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_VDSO_CLOCKSOURCE_H
+#define _ASM_POWERPC_VDSO_CLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES VDSO_CLOCKMODE_ARCHTIMER
+
+#endif
diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000..f0a4cf01e
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
+#define _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/page.h>
+#include <asm/vdso/timebase.h>
+#include <asm/barrier.h>
+#include <asm/unistd.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+#define VDSO_HAS_TIME 1
+
+static __always_inline int do_syscall_2(const unsigned long _r0, const unsigned long _r3,
+ const unsigned long _r4)
+{
+ register long r0 asm("r0") = _r0;
+ register unsigned long r3 asm("r3") = _r3;
+ register unsigned long r4 asm("r4") = _r4;
+ register int ret asm ("r3");
+
+ asm volatile(
+ " sc\n"
+ " bns+ 1f\n"
+ " neg %0, %0\n"
+ "1:\n"
+ : "=r" (ret), "+r" (r4), "+r" (r0)
+ : "r" (r3)
+ : "memory", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "cr0", "ctr");
+
+ return ret;
+}
+
+static __always_inline
+int gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz)
+{
+ return do_syscall_2(__NR_gettimeofday, (unsigned long)_tv, (unsigned long)_tz);
+}
+
+#ifdef __powerpc64__
+
+static __always_inline
+int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts);
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
+}
+
+#else
+
+#define BUILD_VDSO32 1
+
+static __always_inline
+int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ return do_syscall_2(__NR_clock_gettime64, _clkid, (unsigned long)_ts);
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ return do_syscall_2(__NR_clock_getres_time64, _clkid, (unsigned long)_ts);
+}
+
+static __always_inline
+int clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts);
+}
+
+static __always_inline
+int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
+}
+#endif
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+ const struct vdso_data *vd)
+{
+ return get_tb();
+}
+
+const struct vdso_data *__arch_get_vdso_data(void);
+
+#ifdef CONFIG_TIME_NS
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
+{
+ return (void *)vd + PAGE_SIZE;
+}
+#endif
+
+static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+{
+ return true;
+}
+#define vdso_clocksource_ok vdso_clocksource_ok
+
+/*
+ * powerpc specific delta calculation.
+ *
+ * This variant removes the masking of the subtraction because the
+ * clocksource mask of all VDSO capable clocksources on powerpc is U64_MAX
+ * which would result in a pointless operation. The compiler cannot
+ * optimize it away as the mask comes from the vdso data and is not compile
+ * time constant.
+ */
+static __always_inline u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
+{
+ return (cycles - last) * mult;
+}
+#define vdso_calc_delta vdso_calc_delta
+
+#ifndef __powerpc64__
+static __always_inline u64 vdso_shift_ns(u64 ns, unsigned long shift)
+{
+ u32 hi = ns >> 32;
+ u32 lo = ns;
+
+ lo >>= shift;
+ lo |= hi << (32 - shift);
+ hi >>= shift;
+
+ if (likely(hi == 0))
+ return lo;
+
+ return ((u64)hi << 32) | lo;
+}
+#define vdso_shift_ns vdso_shift_ns
+#endif
+
+#ifdef __powerpc64__
+int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts,
+ const struct vdso_data *vd);
+int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res,
+ const struct vdso_data *vd);
+#else
+int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts,
+ const struct vdso_data *vd);
+int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts,
+ const struct vdso_data *vd);
+int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res,
+ const struct vdso_data *vd);
+#endif
+int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz,
+ const struct vdso_data *vd);
+__kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time,
+ const struct vdso_data *vd);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/powerpc/include/asm/vdso/processor.h b/arch/powerpc/include/asm/vdso/processor.h
new file mode 100644
index 000000000..80d13207c
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso/processor.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_POWERPC_VDSO_PROCESSOR_H
+#define _ASM_POWERPC_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+/* Macros for adjusting thread priority (hardware multi-threading) */
+#ifdef CONFIG_PPC64
+#define HMT_very_low() asm volatile("or 31, 31, 31 # very low priority")
+#define HMT_low() asm volatile("or 1, 1, 1 # low priority")
+#define HMT_medium_low() asm volatile("or 6, 6, 6 # medium low priority")
+#define HMT_medium() asm volatile("or 2, 2, 2 # medium priority")
+#define HMT_medium_high() asm volatile("or 5, 5, 5 # medium high priority")
+#define HMT_high() asm volatile("or 3, 3, 3 # high priority")
+#else
+#define HMT_very_low()
+#define HMT_low()
+#define HMT_medium_low()
+#define HMT_medium()
+#define HMT_medium_high()
+#define HMT_high()
+#endif
+
+#ifdef CONFIG_PPC64
+#define cpu_relax() \
+ asm volatile(ASM_FTR_IFCLR( \
+ /* Pre-POWER10 uses low ; medium priority nops */ \
+ "or 1,1,1 ; or 2,2,2", \
+ /* POWER10 onward uses pause_short (wait 2,0) */ \
+ PPC_WAIT(2, 0), \
+ %0) :: "i" (CPU_FTR_ARCH_31) : "memory")
+#else
+#define cpu_relax() barrier()
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_VDSO_PROCESSOR_H */
diff --git a/arch/powerpc/include/asm/vdso/timebase.h b/arch/powerpc/include/asm/vdso/timebase.h
new file mode 100644
index 000000000..e9245f86a
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso/timebase.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Common timebase prototypes and such for all ppc machines.
+ */
+
+#ifndef _ASM_POWERPC_VDSO_TIMEBASE_H
+#define _ASM_POWERPC_VDSO_TIMEBASE_H
+
+#include <asm/reg.h>
+
+/*
+ * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit
+ * version below in the else case of the ifdef.
+ */
+#if defined(__powerpc64__) && (defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_E500))
+#define mftb() ({unsigned long rval; \
+ asm volatile( \
+ "90: mfspr %0, %2;\n" \
+ ASM_FTR_IFSET( \
+ "97: cmpwi %0,0;\n" \
+ " beq- 90b;\n", "", %1) \
+ : "=r" (rval) \
+ : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \
+ rval;})
+#elif defined(CONFIG_PPC_8xx)
+#define mftb() ({unsigned long rval; \
+ asm volatile("mftbl %0" : "=r" (rval)); rval;})
+#else
+#define mftb() ({unsigned long rval; \
+ asm volatile("mfspr %0, %1" : \
+ "=r" (rval) : "i" (SPRN_TBRL)); rval;})
+#endif /* !CONFIG_PPC_CELL */
+
+#if defined(CONFIG_PPC_8xx)
+#define mftbu() ({unsigned long rval; \
+ asm volatile("mftbu %0" : "=r" (rval)); rval;})
+#else
+#define mftbu() ({unsigned long rval; \
+ asm volatile("mfspr %0, %1" : "=r" (rval) : \
+ "i" (SPRN_TBRU)); rval;})
+#endif
+
+#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
+#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
+
+static __always_inline u64 get_tb(void)
+{
+ unsigned int tbhi, tblo, tbhi2;
+
+ /*
+ * We use __powerpc64__ here not CONFIG_PPC64 because we want the compat
+ * VDSO to use the 32-bit compatible version in the while loop below.
+ */
+ if (__is_defined(__powerpc64__))
+ return mftb();
+
+ do {
+ tbhi = mftbu();
+ tblo = mftb();
+ tbhi2 = mftbu();
+ } while (tbhi != tbhi2);
+
+ return ((u64)tbhi << 32) | tblo;
+}
+
+static inline void set_tb(unsigned int upper, unsigned int lower)
+{
+ mtspr(SPRN_TBWL, 0);
+ mtspr(SPRN_TBWU, upper);
+ mtspr(SPRN_TBWL, lower);
+}
+
+#endif /* _ASM_POWERPC_VDSO_TIMEBASE_H */
diff --git a/arch/powerpc/include/asm/vdso/vsyscall.h b/arch/powerpc/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000..48cf23f1e
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso/vsyscall.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_VDSO_VSYSCALL_H
+#define _ASM_POWERPC_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <asm/vdso_datapage.h>
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline
+struct vdso_data *__arch_get_k_vdso_data(void)
+{
+ return vdso_data->data;
+}
+#define __arch_get_k_vdso_data __arch_get_k_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_VDSO_VSYSCALL_H */
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
new file mode 100644
index 000000000..a585c8e53
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _VDSO_DATAPAGE_H
+#define _VDSO_DATAPAGE_H
+#ifdef __KERNEL__
+
+/*
+ * Copyright (C) 2002 Peter Bergner <bergner@vnet.ibm.com>, IBM
+ * Copyright (C) 2005 Benjamin Herrenschmidy <benh@kernel.crashing.org>,
+ * IBM Corp.
+ */
+
+
+/*
+ * Note about this structure:
+ *
+ * This structure was historically called systemcfg and exposed to
+ * userland via /proc/ppc64/systemcfg. Unfortunately, this became an
+ * ABI issue as some proprietary software started relying on being able
+ * to mmap() it, thus we have to keep the base layout at least for a
+ * few kernel versions.
+ *
+ * However, since ppc32 doesn't suffer from this backward handicap,
+ * a simpler version of the data structure is used there with only the
+ * fields actually used by the vDSO.
+ *
+ */
+
+/*
+ * If the major version changes we are incompatible.
+ * Minor version changes are a hint.
+ */
+#define SYSTEMCFG_MAJOR 1
+#define SYSTEMCFG_MINOR 1
+
+#ifndef __ASSEMBLY__
+
+#include <linux/unistd.h>
+#include <linux/time.h>
+#include <vdso/datapage.h>
+
+#define SYSCALL_MAP_SIZE ((NR_syscalls + 31) / 32)
+
+/*
+ * So here is the ppc64 backward compatible version
+ */
+
+#ifdef CONFIG_PPC64
+
+struct vdso_arch_data {
+ __u8 eye_catcher[16]; /* Eyecatcher: SYSTEMCFG:PPC64 0x00 */
+ struct { /* Systemcfg version numbers */
+ __u32 major; /* Major number 0x10 */
+ __u32 minor; /* Minor number 0x14 */
+ } version;
+
+ /* Note about the platform flags: it now only contains the lpar
+ * bit. The actual platform number is dead and buried
+ */
+ __u32 platform; /* Platform flags 0x18 */
+ __u32 processor; /* Processor type 0x1C */
+ __u64 processorCount; /* # of physical processors 0x20 */
+ __u64 physicalMemorySize; /* Size of real memory(B) 0x28 */
+ __u64 tb_orig_stamp; /* (NU) Timebase at boot 0x30 */
+ __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */
+ __u64 tb_to_xs; /* (NU) Inverse of TB to 2^20 0x40 */
+ __u64 stamp_xsec; /* (NU) 0x48 */
+ __u64 tb_update_count; /* (NU) Timebase atomicity ctr 0x50 */
+ __u32 tz_minuteswest; /* (NU) Min. west of Greenwich 0x58 */
+ __u32 tz_dsttime; /* (NU) Type of dst correction 0x5C */
+ __u32 dcache_size; /* L1 d-cache size 0x60 */
+ __u32 dcache_line_size; /* L1 d-cache line size 0x64 */
+ __u32 icache_size; /* L1 i-cache size 0x68 */
+ __u32 icache_line_size; /* L1 i-cache line size 0x6C */
+
+ /* those additional ones don't have to be located anywhere
+ * special as they were not part of the original systemcfg
+ */
+ __u32 dcache_block_size; /* L1 d-cache block size */
+ __u32 icache_block_size; /* L1 i-cache block size */
+ __u32 dcache_log_block_size; /* L1 d-cache log block size */
+ __u32 icache_log_block_size; /* L1 i-cache log block size */
+ __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */
+ __u32 compat_syscall_map[SYSCALL_MAP_SIZE]; /* Map of compat syscalls */
+
+ struct vdso_data data[CS_BASES];
+};
+
+#else /* CONFIG_PPC64 */
+
+/*
+ * And here is the simpler 32 bits version
+ */
+struct vdso_arch_data {
+ __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */
+ __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */
+ __u32 compat_syscall_map[0]; /* No compat syscalls on PPC32 */
+ struct vdso_data data[CS_BASES];
+};
+
+#endif /* CONFIG_PPC64 */
+
+extern struct vdso_arch_data *vdso_data;
+
+#else /* __ASSEMBLY__ */
+
+.macro get_datapage ptr
+ bcl 20, 31, .+4
+999:
+ mflr \ptr
+ addis \ptr, \ptr, (_vdso_datapage - 999b)@ha
+ addi \ptr, \ptr, (_vdso_datapage - 999b)@l
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* _SYSTEMCFG_H */
diff --git a/arch/powerpc/include/asm/vermagic.h b/arch/powerpc/include/asm/vermagic.h
new file mode 100644
index 000000000..b054a8576
--- /dev/null
+++ b/arch/powerpc/include/asm/vermagic.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#ifdef CONFIG_MPROFILE_KERNEL
+#define MODULE_ARCH_VERMAGIC_FTRACE "mprofile-kernel "
+#else
+#define MODULE_ARCH_VERMAGIC_FTRACE ""
+#endif
+
+#ifdef CONFIG_RELOCATABLE
+#define MODULE_ARCH_VERMAGIC_RELOCATABLE "relocatable "
+#else
+#define MODULE_ARCH_VERMAGIC_RELOCATABLE ""
+#endif
+
+#define MODULE_ARCH_VERMAGIC \
+ MODULE_ARCH_VERMAGIC_FTRACE MODULE_ARCH_VERMAGIC_RELOCATABLE
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/powerpc/include/asm/vga.h b/arch/powerpc/include/asm/vga.h
new file mode 100644
index 000000000..fcf721682
--- /dev/null
+++ b/arch/powerpc/include/asm/vga.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_VGA_H_
+#define _ASM_POWERPC_VGA_H_
+
+#ifdef __KERNEL__
+
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ */
+
+
+#include <asm/io.h>
+
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
+
+#define VT_BUF_HAVE_RW
+/*
+ * These are only needed for supporting VGA or MDA text mode, which use little
+ * endian byte ordering.
+ * In other cases, we can optimize by using native byte ordering and
+ * <linux/vt_buffer.h> has already done the right job for us.
+ */
+
+static inline void scr_writew(u16 val, volatile u16 *addr)
+{
+ *addr = cpu_to_le16(val);
+}
+
+static inline u16 scr_readw(volatile const u16 *addr)
+{
+ return le16_to_cpu(*addr);
+}
+
+#define VT_BUF_HAVE_MEMSETW
+static inline void scr_memsetw(u16 *s, u16 v, unsigned int n)
+{
+ memset16(s, cpu_to_le16(v), n / 2);
+}
+
+#define VT_BUF_HAVE_MEMCPYW
+#define VT_BUF_HAVE_MEMMOVEW
+#define scr_memcpyw memcpy
+#define scr_memmovew memmove
+
+#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
+
+#ifdef __powerpc64__
+#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap((x), s))
+#else
+#define VGA_MAP_MEM(x,s) (x)
+#endif
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_VGA_H_ */
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
new file mode 100644
index 000000000..e7479a4ab
--- /dev/null
+++ b/arch/powerpc/include/asm/vio.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * IBM PowerPC Virtual I/O Infrastructure Support.
+ *
+ * Copyright (c) 2003 IBM Corp.
+ * Dave Engebretsen engebret@us.ibm.com
+ * Santiago Leon santil@us.ibm.com
+ */
+
+#ifndef _ASM_POWERPC_VIO_H
+#define _ASM_POWERPC_VIO_H
+#ifdef __KERNEL__
+
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
+#include <linux/scatterlist.h>
+
+#include <asm/hvcall.h>
+
+/*
+ * Architecture-specific constants for drivers to
+ * extract attributes of the device using vio_get_attribute()
+ */
+#define VETH_MAC_ADDR "local-mac-address"
+#define VETH_MCAST_FILTER_SIZE "ibm,mac-address-filters"
+
+/* End architecture-specific constants */
+
+#define h_vio_signal(ua, mode) \
+ plpar_hcall_norets(H_VIO_SIGNAL, ua, mode)
+
+#define VIO_IRQ_DISABLE 0UL
+#define VIO_IRQ_ENABLE 1UL
+
+/*
+ * VIO CMO minimum entitlement for all devices and spare entitlement
+ */
+#define VIO_CMO_MIN_ENT 1562624
+
+extern struct bus_type vio_bus_type;
+
+struct iommu_table;
+
+/*
+ * Platform Facilities Option (PFO)-specific data
+ */
+
+/* Starting unit address for PFO devices on the VIO BUS */
+#define VIO_BASE_PFO_UA 0x50000000
+
+/**
+ * vio_pfo_op - PFO operation parameters
+ *
+ * @flags: h_call subfunctions and modifiers
+ * @in: Input data block logical real address
+ * @inlen: If non-negative, the length of the input data block. If negative,
+ * the length of the input data descriptor list in bytes.
+ * @out: Output data block logical real address
+ * @outlen: If non-negative, the length of the input data block. If negative,
+ * the length of the input data descriptor list in bytes.
+ * @csbcpb: Logical real address of the 4k naturally-aligned storage block
+ * containing the CSB & optional FC field specific CPB
+ * @timeout: # of milliseconds to retry h_call, 0 for no timeout.
+ * @hcall_err: pointer to return the h_call return value, else NULL
+ */
+struct vio_pfo_op {
+ u64 flags;
+ s64 in;
+ s64 inlen;
+ s64 out;
+ s64 outlen;
+ u64 csbcpb;
+ void *done;
+ unsigned long handle;
+ unsigned int timeout;
+ long hcall_err;
+};
+
+/* End PFO specific data */
+
+enum vio_dev_family {
+ VDEVICE, /* The OF node is a child of /vdevice */
+ PFO, /* The OF node is a child of /ibm,platform-facilities */
+};
+
+/**
+ * vio_dev - This structure is used to describe virtual I/O devices.
+ *
+ * @desired: set from return of driver's get_desired_dma() function
+ * @entitled: bytes of IO data that has been reserved for this device.
+ * @allocated: bytes of IO data currently in use by the device.
+ * @allocs_failed: number of DMA failures due to insufficient entitlement.
+ */
+struct vio_dev {
+ const char *name;
+ const char *type;
+ uint32_t unit_address;
+ uint32_t resource_id;
+ unsigned int irq;
+ struct {
+ size_t desired;
+ size_t entitled;
+ size_t allocated;
+ atomic_t allocs_failed;
+ } cmo;
+ enum vio_dev_family family;
+ struct device dev;
+};
+
+struct vio_driver {
+ const char *name;
+ const struct vio_device_id *id_table;
+ int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
+ void (*remove)(struct vio_dev *dev);
+ void (*shutdown)(struct vio_dev *dev);
+ /* A driver must have a get_desired_dma() function to
+ * be loaded in a CMO environment if it uses DMA.
+ */
+ unsigned long (*get_desired_dma)(struct vio_dev *dev);
+ const struct dev_pm_ops *pm;
+ struct device_driver driver;
+};
+
+extern int __vio_register_driver(struct vio_driver *drv, struct module *owner,
+ const char *mod_name);
+/*
+ * vio_register_driver must be a macro so that KBUILD_MODNAME can be expanded
+ */
+#define vio_register_driver(driver) \
+ __vio_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
+extern void vio_unregister_driver(struct vio_driver *drv);
+
+extern int vio_cmo_entitlement_update(size_t);
+extern void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired);
+
+extern void vio_unregister_device(struct vio_dev *dev);
+
+extern int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op);
+
+struct device_node;
+
+extern struct vio_dev *vio_register_device_node(
+ struct device_node *node_vdev);
+extern const void *vio_get_attribute(struct vio_dev *vdev, char *which,
+ int *length);
+#ifdef CONFIG_PPC_PSERIES
+extern struct vio_dev *vio_find_node(struct device_node *vnode);
+extern int vio_enable_interrupts(struct vio_dev *dev);
+extern int vio_disable_interrupts(struct vio_dev *dev);
+#else
+static inline int vio_enable_interrupts(struct vio_dev *dev)
+{
+ return 0;
+}
+#endif
+
+static inline struct vio_driver *to_vio_driver(struct device_driver *drv)
+{
+ return container_of(drv, struct vio_driver, driver);
+}
+
+static inline struct vio_dev *to_vio_dev(struct device *dev)
+{
+ return container_of(dev, struct vio_dev, dev);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_VIO_H */
diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h
new file mode 100644
index 000000000..4c69ece52
--- /dev/null
+++ b/arch/powerpc/include/asm/vmalloc.h
@@ -0,0 +1,24 @@
+#ifndef _ASM_POWERPC_VMALLOC_H
+#define _ASM_POWERPC_VMALLOC_H
+
+#include <asm/mmu.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#define arch_vmap_pud_supported arch_vmap_pud_supported
+static inline bool arch_vmap_pud_supported(pgprot_t prot)
+{
+ /* HPT does not cope with large pages in the vmalloc area */
+ return radix_enabled();
+}
+
+#define arch_vmap_pmd_supported arch_vmap_pmd_supported
+static inline bool arch_vmap_pmd_supported(pgprot_t prot)
+{
+ return radix_enabled();
+}
+
+#endif
+
+#endif /* _ASM_POWERPC_VMALLOC_H */
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
new file mode 100644
index 000000000..30a12d208
--- /dev/null
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -0,0 +1,206 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+/*
+ * Word-at-a-time interfaces for PowerPC.
+ */
+
+#include <linux/kernel.h>
+#include <asm/asm-compat.h>
+#include <asm/extable.h>
+
+#ifdef __BIG_ENDIAN__
+
+struct word_at_a_time {
+ const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+ unsigned long mask = (val & c->low_bits) + c->low_bits;
+ return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+ long leading_zero_bits;
+
+ asm (PPC_CNTLZL "%0,%1" : "=r" (leading_zero_bits) : "r" (mask));
+ return leading_zero_bits >> 3;
+}
+
+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+ unsigned long rhs = val | c->low_bits;
+ *data = rhs;
+ return (val + c->high_bits) & ~rhs;
+}
+
+static inline unsigned long zero_bytemask(unsigned long mask)
+{
+ return ~1ul << __fls(mask);
+}
+
+#else
+
+#ifdef CONFIG_64BIT
+
+/* unused */
+struct word_at_a_time {
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { }
+
+/* This will give us 0xff for a NULL char and 0x00 elsewhere */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long ret;
+ unsigned long zero = 0;
+
+ asm("cmpb %0,%1,%2" : "=r" (ret) : "r" (a), "r" (zero));
+ *bits = ret;
+
+ return ret;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+/* Alan Modra's little-endian strlen tail for 64-bit */
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ unsigned long leading_zero_bits;
+ long trailing_zero_bit_mask;
+
+ asm("addi %1,%2,-1\n\t"
+ "andc %1,%1,%2\n\t"
+ "popcntd %0,%1"
+ : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
+ : "b" (bits));
+
+ return leading_zero_bits;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return mask >> 3;
+}
+
+/* This assumes that we never ask for an all 1s bitmask */
+static inline unsigned long zero_bytemask(unsigned long mask)
+{
+ return (1UL << mask) - 1;
+}
+
+#else /* 32-bit case */
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+/*
+ * This is largely generic for little-endian machines, but the
+ * optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return count_masked_bytes(mask);
+}
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+#endif /* CONFIG_64BIT */
+
+#endif /* __BIG_ENDIAN__ */
+
+/*
+ * We use load_unaligned_zero() in a selftest, which builds a userspace
+ * program. Some linker scripts seem to discard the .fixup section, so allow
+ * the test code to use a different section name.
+ */
+#ifndef FIXUP_SECTION
+#define FIXUP_SECTION ".fixup"
+#endif
+
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long ret, offset, tmp;
+
+ asm(
+ "1: " PPC_LL "%[ret], 0(%[addr])\n"
+ "2:\n"
+ ".section " FIXUP_SECTION ",\"ax\"\n"
+ "3: "
+#ifdef __powerpc64__
+ "clrrdi %[tmp], %[addr], 3\n\t"
+ "clrlsldi %[offset], %[addr], 61, 3\n\t"
+ "ld %[ret], 0(%[tmp])\n\t"
+#ifdef __BIG_ENDIAN__
+ "sld %[ret], %[ret], %[offset]\n\t"
+#else
+ "srd %[ret], %[ret], %[offset]\n\t"
+#endif
+#else
+ "clrrwi %[tmp], %[addr], 2\n\t"
+ "clrlslwi %[offset], %[addr], 30, 3\n\t"
+ "lwz %[ret], 0(%[tmp])\n\t"
+#ifdef __BIG_ENDIAN__
+ "slw %[ret], %[ret], %[offset]\n\t"
+#else
+ "srw %[ret], %[ret], %[offset]\n\t"
+#endif
+#endif
+ "b 2b\n"
+ ".previous\n"
+ EX_TABLE(1b, 3b)
+ : [tmp] "=&b" (tmp), [offset] "=&r" (offset), [ret] "=&r" (ret)
+ : [addr] "b" (addr), "m" (*(unsigned long *)addr));
+
+ return ret;
+}
+
+#undef FIXUP_SECTION
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
new file mode 100644
index 000000000..89090485b
--- /dev/null
+++ b/arch/powerpc/include/asm/xics.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common definitions across all variants of ICP and ICS interrupt
+ * controllers.
+ */
+
+#ifndef _XICS_H
+#define _XICS_H
+
+#include <linux/interrupt.h>
+
+#define XICS_IPI 2
+#define XICS_IRQ_SPURIOUS 0
+
+/* Want a priority other than 0. Various HW issues require this. */
+#define DEFAULT_PRIORITY 5
+
+/*
+ * Mark IPIs as higher priority so we can take them inside interrupts
+ * FIXME: still true now?
+ */
+#define IPI_PRIORITY 4
+
+/* The least favored priority */
+#define LOWEST_PRIORITY 0xFF
+
+/* The number of priorities defined above */
+#define MAX_NUM_PRIORITIES 3
+
+/* Native ICP */
+#ifdef CONFIG_PPC_ICP_NATIVE
+extern int icp_native_init(void);
+extern void icp_native_flush_interrupt(void);
+extern void icp_native_cause_ipi_rm(int cpu);
+#else
+static inline int icp_native_init(void) { return -ENODEV; }
+#endif
+
+/* PAPR ICP */
+#ifdef CONFIG_PPC_ICP_HV
+int __init icp_hv_init(void);
+#else
+static inline int icp_hv_init(void) { return -ENODEV; }
+#endif
+
+#ifdef CONFIG_PPC_POWERNV
+int __init icp_opal_init(void);
+extern void icp_opal_flush_interrupt(void);
+#else
+static inline int icp_opal_init(void) { return -ENODEV; }
+#endif
+
+/* ICP ops */
+struct icp_ops {
+ unsigned int (*get_irq)(void);
+ void (*eoi)(struct irq_data *d);
+ void (*set_priority)(unsigned char prio);
+ void (*teardown_cpu)(void);
+ void (*flush_ipi)(void);
+#ifdef CONFIG_SMP
+ void (*cause_ipi)(int cpu);
+ irq_handler_t ipi_action;
+#endif
+};
+
+extern const struct icp_ops *icp_ops;
+
+#ifdef CONFIG_PPC_ICS_NATIVE
+/* Native ICS */
+extern int ics_native_init(void);
+#else
+static inline int ics_native_init(void) { return -ENODEV; }
+#endif
+
+/* RTAS ICS */
+#ifdef CONFIG_PPC_ICS_RTAS
+extern int ics_rtas_init(void);
+#else
+static inline int ics_rtas_init(void) { return -ENODEV; }
+#endif
+
+/* HAL ICS */
+#ifdef CONFIG_PPC_POWERNV
+extern int ics_opal_init(void);
+#else
+static inline int ics_opal_init(void) { return -ENODEV; }
+#endif
+
+/* ICS instance, hooked up to chip_data of an irq */
+struct ics {
+ struct list_head link;
+ int (*check)(struct ics *ics, unsigned int hwirq);
+ void (*mask_unknown)(struct ics *ics, unsigned long vec);
+ long (*get_server)(struct ics *ics, unsigned long vec);
+ int (*host_match)(struct ics *ics, struct device_node *node);
+ struct irq_chip *chip;
+ char data[];
+};
+
+/* Commons */
+extern unsigned int xics_default_server;
+extern unsigned int xics_default_distrib_server;
+extern unsigned int xics_interrupt_server_size;
+extern struct irq_domain *xics_host;
+
+struct xics_cppr {
+ unsigned char stack[MAX_NUM_PRIORITIES];
+ int index;
+};
+
+DECLARE_PER_CPU(struct xics_cppr, xics_cppr);
+
+static inline void xics_push_cppr(unsigned int vec)
+{
+ struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
+
+ if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
+ return;
+
+ if (vec == XICS_IPI)
+ os_cppr->stack[++os_cppr->index] = IPI_PRIORITY;
+ else
+ os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY;
+}
+
+static inline unsigned char xics_pop_cppr(void)
+{
+ struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
+
+ if (WARN_ON(os_cppr->index < 1))
+ return LOWEST_PRIORITY;
+
+ return os_cppr->stack[--os_cppr->index];
+}
+
+static inline void xics_set_base_cppr(unsigned char cppr)
+{
+ struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
+
+ /* we only really want to set the priority when there's
+ * just one cppr value on the stack
+ */
+ WARN_ON(os_cppr->index != 0);
+
+ os_cppr->stack[0] = cppr;
+}
+
+static inline unsigned char xics_cppr_top(void)
+{
+ struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
+
+ return os_cppr->stack[os_cppr->index];
+}
+
+DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message);
+
+extern void xics_init(void);
+extern void xics_setup_cpu(void);
+extern void xics_update_irq_servers(void);
+extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join);
+extern void xics_mask_unknown_vec(unsigned int vec);
+extern void xics_smp_probe(void);
+extern void xics_register_ics(struct ics *ics);
+extern void xics_teardown_cpu(void);
+extern void xics_kexec_teardown_cpu(int secondary);
+extern void xics_migrate_irqs_away(void);
+extern void icp_native_eoi(struct irq_data *d);
+extern int xics_set_irq_type(struct irq_data *d, unsigned int flow_type);
+extern int xics_retrigger(struct irq_data *data);
+#ifdef CONFIG_SMP
+extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
+ unsigned int strict_check);
+#else
+#define xics_get_irq_server(virq, cpumask, strict_check) (xics_default_server)
+#endif
+
+
+#endif /* _XICS_H */
diff --git a/arch/powerpc/include/asm/xive-regs.h b/arch/powerpc/include/asm/xive-regs.h
new file mode 100644
index 000000000..cf8bb6ac4
--- /dev/null
+++ b/arch/powerpc/include/asm/xive-regs.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016,2017 IBM Corporation.
+ */
+#ifndef _ASM_POWERPC_XIVE_REGS_H
+#define _ASM_POWERPC_XIVE_REGS_H
+
+/*
+ * "magic" Event State Buffer (ESB) MMIO offsets.
+ *
+ * Each interrupt source has a 2-bit state machine called ESB
+ * which can be controlled by MMIO. It's made of 2 bits, P and
+ * Q. P indicates that an interrupt is pending (has been sent
+ * to a queue and is waiting for an EOI). Q indicates that the
+ * interrupt has been triggered while pending.
+ *
+ * This acts as a coalescing mechanism in order to guarantee
+ * that a given interrupt only occurs at most once in a queue.
+ *
+ * When doing an EOI, the Q bit will indicate if the interrupt
+ * needs to be re-triggered.
+ *
+ * The following offsets into the ESB MMIO allow to read or
+ * manipulate the PQ bits. They must be used with an 8-bytes
+ * load instruction. They all return the previous state of the
+ * interrupt (atomically).
+ *
+ * Additionally, some ESB pages support doing an EOI via a
+ * store at 0 and some ESBs support doing a trigger via a
+ * separate trigger page.
+ */
+#define XIVE_ESB_STORE_EOI 0x400 /* Store */
+#define XIVE_ESB_LOAD_EOI 0x000 /* Load */
+#define XIVE_ESB_GET 0x800 /* Load */
+#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
+#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
+#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
+#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
+
+/*
+ * Load-after-store ordering
+ *
+ * Adding this offset to the load address will enforce
+ * load-after-store ordering. This is required to use StoreEOI.
+ */
+#define XIVE_ESB_LD_ST_MO 0x40 /* Load-after-store ordering */
+
+#define XIVE_ESB_VAL_P 0x2
+#define XIVE_ESB_VAL_Q 0x1
+#define XIVE_ESB_INVALID 0xFF
+
+/*
+ * Thread Management (aka "TM") registers
+ */
+
+/* TM register offsets */
+#define TM_QW0_USER 0x000 /* All rings */
+#define TM_QW1_OS 0x010 /* Ring 0..2 */
+#define TM_QW2_HV_POOL 0x020 /* Ring 0..1 */
+#define TM_QW3_HV_PHYS 0x030 /* Ring 0..1 */
+
+/* Byte offsets inside a QW QW0 QW1 QW2 QW3 */
+#define TM_NSR 0x0 /* + + - + */
+#define TM_CPPR 0x1 /* - + - + */
+#define TM_IPB 0x2 /* - + + + */
+#define TM_LSMFB 0x3 /* - + + + */
+#define TM_ACK_CNT 0x4 /* - + - - */
+#define TM_INC 0x5 /* - + - + */
+#define TM_AGE 0x6 /* - + - + */
+#define TM_PIPR 0x7 /* - + - + */
+
+#define TM_WORD0 0x0
+#define TM_WORD1 0x4
+
+/*
+ * QW word 2 contains the valid bit at the top and other fields
+ * depending on the QW.
+ */
+#define TM_WORD2 0x8
+#define TM_QW0W2_VU PPC_BIT32(0)
+#define TM_QW0W2_LOGIC_SERV PPC_BITMASK32(1,31) // XX 2,31 ?
+#define TM_QW1W2_VO PPC_BIT32(0)
+#define TM_QW1W2_HO PPC_BIT32(1) /* P10 XIVE2 */
+#define TM_QW1W2_OS_CAM PPC_BITMASK32(8,31)
+#define TM_QW2W2_VP PPC_BIT32(0)
+#define TM_QW2W2_HP PPC_BIT32(1) /* P10 XIVE2 */
+#define TM_QW2W2_POOL_CAM PPC_BITMASK32(8,31)
+#define TM_QW3W2_VT PPC_BIT32(0)
+#define TM_QW3W2_HT PPC_BIT32(1) /* P10 XIVE2 */
+#define TM_QW3W2_LP PPC_BIT32(6)
+#define TM_QW3W2_LE PPC_BIT32(7)
+#define TM_QW3W2_T PPC_BIT32(31)
+
+/*
+ * In addition to normal loads to "peek" and writes (only when invalid)
+ * using 4 and 8 bytes accesses, the above registers support these
+ * "special" byte operations:
+ *
+ * - Byte load from QW0[NSR] - User level NSR (EBB)
+ * - Byte store to QW0[NSR] - User level NSR (EBB)
+ * - Byte load/store to QW1[CPPR] and QW3[CPPR] - CPPR access
+ * - Byte load from QW3[TM_WORD2] - Read VT||00000||LP||LE on thrd 0
+ * otherwise VT||0000000
+ * - Byte store to QW3[TM_WORD2] - Set VT bit (and LP/LE if present)
+ *
+ * Then we have all these "special" CI ops at these offset that trigger
+ * all sorts of side effects:
+ */
+#define TM_SPC_ACK_EBB 0x800 /* Load8 ack EBB to reg*/
+#define TM_SPC_ACK_OS_REG 0x810 /* Load16 ack OS irq to reg */
+#define TM_SPC_PUSH_USR_CTX 0x808 /* Store32 Push/Validate user context */
+#define TM_SPC_PULL_USR_CTX 0x808 /* Load32 Pull/Invalidate user context */
+#define TM_SPC_SET_OS_PENDING 0x812 /* Store8 Set OS irq pending bit */
+#define TM_SPC_PULL_OS_CTX 0x818 /* Load32/Load64 Pull/Invalidate OS context to reg */
+#define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool context to reg*/
+#define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */
+#define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd line */
+#define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */
+#define TM_SPC_ACK_HV_POOL_EL 0xc20 /* Store8 ack HV evt pool to even line */
+#define TM_SPC_ACK_HV_EL 0xc30 /* Store8 ack HV irq to even line */
+/* XXX more... */
+
+/* NSR fields for the various QW ack types */
+#define TM_QW0_NSR_EB PPC_BIT8(0)
+#define TM_QW1_NSR_EO PPC_BIT8(0)
+#define TM_QW3_NSR_HE PPC_BITMASK8(0,1)
+#define TM_QW3_NSR_HE_NONE 0
+#define TM_QW3_NSR_HE_POOL 1
+#define TM_QW3_NSR_HE_PHYS 2
+#define TM_QW3_NSR_HE_LSI 3
+#define TM_QW3_NSR_I PPC_BIT8(2)
+#define TM_QW3_NSR_GRP_LVL PPC_BIT8(3,7)
+
+#endif /* _ASM_POWERPC_XIVE_REGS_H */
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
new file mode 100644
index 000000000..92930b0b5
--- /dev/null
+++ b/arch/powerpc/include/asm/xive.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016,2017 IBM Corporation.
+ */
+#ifndef _ASM_POWERPC_XIVE_H
+#define _ASM_POWERPC_XIVE_H
+
+#include <asm/opal-api.h>
+
+#define XIVE_INVALID_VP 0xffffffff
+
+#ifdef CONFIG_PPC_XIVE
+
+/*
+ * Thread Interrupt Management Area (TIMA)
+ *
+ * This is a global MMIO region divided in 4 pages of varying access
+ * permissions, providing access to per-cpu interrupt management
+ * functions. It always identifies the CPU doing the access based
+ * on the PowerBus initiator ID, thus we always access via the
+ * same offset regardless of where the code is executing
+ */
+extern void __iomem *xive_tima;
+extern unsigned long xive_tima_os;
+
+/*
+ * Offset in the TM area of our current execution level (provided by
+ * the backend)
+ */
+extern u32 xive_tima_offset;
+
+/*
+ * Per-irq data (irq_get_handler_data for normal IRQs), IPIs
+ * have it stored in the xive_cpu structure. We also cache
+ * for normal interrupts the current target CPU.
+ *
+ * This structure is setup by the backend for each interrupt.
+ */
+struct xive_irq_data {
+ u64 flags;
+ u64 eoi_page;
+ void __iomem *eoi_mmio;
+ u64 trig_page;
+ void __iomem *trig_mmio;
+ u32 esb_shift;
+ int src_chip;
+ u32 hw_irq;
+
+ /* Setup/used by frontend */
+ int target;
+ /*
+ * saved_p means that there is a queue entry for this interrupt
+ * in some CPU's queue (not including guest vcpu queues), even
+ * if P is not set in the source ESB.
+ * stale_p means that there is no queue entry for this interrupt
+ * in some CPU's queue, even if P is set in the source ESB.
+ */
+ bool saved_p;
+ bool stale_p;
+};
+#define XIVE_IRQ_FLAG_STORE_EOI 0x01
+#define XIVE_IRQ_FLAG_LSI 0x02
+/* #define XIVE_IRQ_FLAG_SHIFT_BUG 0x04 */ /* P9 DD1.0 workaround */
+/* #define XIVE_IRQ_FLAG_MASK_FW 0x08 */ /* P9 DD1.0 workaround */
+/* #define XIVE_IRQ_FLAG_EOI_FW 0x10 */ /* P9 DD1.0 workaround */
+#define XIVE_IRQ_FLAG_H_INT_ESB 0x20
+
+/* Special flag set by KVM for excalation interrupts */
+#define XIVE_IRQ_FLAG_NO_EOI 0x80
+
+#define XIVE_INVALID_CHIP_ID -1
+
+/* A queue tracking structure in a CPU */
+struct xive_q {
+ __be32 *qpage;
+ u32 msk;
+ u32 idx;
+ u32 toggle;
+ u64 eoi_phys;
+ u32 esc_irq;
+ atomic_t count;
+ atomic_t pending_count;
+ u64 guest_qaddr;
+ u32 guest_qshift;
+};
+
+/* Global enable flags for the XIVE support */
+extern bool __xive_enabled;
+
+static inline bool xive_enabled(void) { return __xive_enabled; }
+
+bool xive_spapr_init(void);
+bool xive_native_init(void);
+void xive_smp_probe(void);
+int xive_smp_prepare_cpu(unsigned int cpu);
+void xive_smp_setup_cpu(void);
+void xive_smp_disable_cpu(void);
+void xive_teardown_cpu(void);
+void xive_shutdown(void);
+void xive_flush_interrupt(void);
+
+/* xmon hook */
+void xmon_xive_do_dump(int cpu);
+int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d);
+void xmon_xive_get_irq_all(void);
+
+/* APIs used by KVM */
+u32 xive_native_default_eq_shift(void);
+u32 xive_native_alloc_vp_block(u32 max_vcpus);
+void xive_native_free_vp_block(u32 vp_base);
+int xive_native_populate_irq_data(u32 hw_irq,
+ struct xive_irq_data *data);
+void xive_cleanup_irq_data(struct xive_irq_data *xd);
+void xive_irq_free_data(unsigned int virq);
+void xive_native_free_irq(u32 irq);
+int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
+
+int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
+ __be32 *qpage, u32 order, bool can_escalate);
+void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
+
+void xive_native_sync_source(u32 hw_irq);
+void xive_native_sync_queue(u32 hw_irq);
+bool is_xive_irq(struct irq_chip *chip);
+int xive_native_enable_vp(u32 vp_id, bool single_escalation);
+int xive_native_disable_vp(u32 vp_id);
+int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
+bool xive_native_has_single_escalation(void);
+bool xive_native_has_save_restore(void);
+
+int xive_native_get_queue_info(u32 vp_id, uint32_t prio,
+ u64 *out_qpage,
+ u64 *out_qsize,
+ u64 *out_qeoi_page,
+ u32 *out_escalate_irq,
+ u64 *out_qflags);
+
+int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle,
+ u32 *qindex);
+int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
+ u32 qindex);
+int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
+bool xive_native_has_queue_state_support(void);
+extern u32 xive_native_alloc_irq_on_chip(u32 chip_id);
+
+static inline u32 xive_native_alloc_irq(void)
+{
+ return xive_native_alloc_irq_on_chip(OPAL_XIVE_ANY_CHIP);
+}
+
+#else
+
+static inline bool xive_enabled(void) { return false; }
+
+static inline bool xive_spapr_init(void) { return false; }
+static inline bool xive_native_init(void) { return false; }
+static inline void xive_smp_probe(void) { }
+static inline int xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; }
+static inline void xive_smp_setup_cpu(void) { }
+static inline void xive_smp_disable_cpu(void) { }
+static inline void xive_shutdown(void) { }
+static inline void xive_flush_interrupt(void) { }
+
+static inline u32 xive_native_alloc_vp_block(u32 max_vcpus) { return XIVE_INVALID_VP; }
+static inline void xive_native_free_vp_block(u32 vp_base) { }
+
+#endif
+
+#endif /* _ASM_POWERPC_XIVE_H */
diff --git a/arch/powerpc/include/asm/xmon.h b/arch/powerpc/include/asm/xmon.h
new file mode 100644
index 000000000..f2d44b44f
--- /dev/null
+++ b/arch/powerpc/include/asm/xmon.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_POWERPC_XMON_H
+#define __ASM_POWERPC_XMON_H
+
+/*
+ * Copyrignt (C) 2006 IBM Corp
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/irqreturn.h>
+
+#ifdef CONFIG_XMON
+extern void xmon_setup(void);
+void __init xmon_register_spus(struct list_head *list);
+struct pt_regs;
+extern int xmon(struct pt_regs *excp);
+extern irqreturn_t xmon_irq(int, void *);
+#else
+static inline void xmon_setup(void) { }
+static inline void xmon_register_spus(struct list_head *list) { }
+#endif
+
+#if defined(CONFIG_XMON) && defined(CONFIG_SMP)
+extern int cpus_are_in_xmon(void);
+#endif
+
+extern __printf(1, 2) void xmon_printf(const char *format, ...);
+
+#endif /* __KERNEL __ */
+#endif /* __ASM_POWERPC_XMON_H */
diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h
new file mode 100644
index 000000000..37d05c11d
--- /dev/null
+++ b/arch/powerpc/include/asm/xor.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#ifndef _ASM_POWERPC_XOR_H
+#define _ASM_POWERPC_XOR_H
+
+#ifdef CONFIG_ALTIVEC
+
+#include <asm/cputable.h>
+#include <asm/cpu_has_feature.h>
+#include <asm/xor_altivec.h>
+
+static struct xor_block_template xor_block_altivec = {
+ .name = "altivec",
+ .do_2 = xor_altivec_2,
+ .do_3 = xor_altivec_3,
+ .do_4 = xor_altivec_4,
+ .do_5 = xor_altivec_5,
+};
+
+#define XOR_SPEED_ALTIVEC() \
+ do { \
+ if (cpu_has_feature(CPU_FTR_ALTIVEC)) \
+ xor_speed(&xor_block_altivec); \
+ } while (0)
+#else
+#define XOR_SPEED_ALTIVEC()
+#endif
+
+/* Also try the generic routines. */
+#include <asm-generic/xor.h>
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_8regs_p); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_32regs_p); \
+ XOR_SPEED_ALTIVEC(); \
+} while (0)
+
+#endif /* _ASM_POWERPC_XOR_H */
diff --git a/arch/powerpc/include/asm/xor_altivec.h b/arch/powerpc/include/asm/xor_altivec.h
new file mode 100644
index 000000000..294620a25
--- /dev/null
+++ b/arch/powerpc/include/asm/xor_altivec.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_XOR_ALTIVEC_H
+#define _ASM_POWERPC_XOR_ALTIVEC_H
+
+#ifdef CONFIG_ALTIVEC
+void xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+void xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
+
+#endif
+#endif /* _ASM_POWERPC_XOR_ALTIVEC_H */
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
new file mode 100644
index 000000000..353b70b19
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+generated-y += unistd_32.h
+generated-y += unistd_64.h
diff --git a/arch/powerpc/include/uapi/asm/auxvec.h b/arch/powerpc/include/uapi/asm/auxvec.h
new file mode 100644
index 000000000..aa7c16215
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/auxvec.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_AUXVEC_H
+#define _ASM_POWERPC_AUXVEC_H
+
+/*
+ * We need to put in some extra aux table entries to tell glibc what
+ * the cache block size is, so it can use the dcbz instruction safely.
+ */
+#define AT_DCACHEBSIZE 19
+#define AT_ICACHEBSIZE 20
+#define AT_UCACHEBSIZE 21
+/* A special ignored type value for PPC, for glibc compatibility. */
+#define AT_IGNOREPPC 22
+
+/* The vDSO location. We have to use the same value as x86 for glibc's
+ * sake :-)
+ */
+#define AT_SYSINFO_EHDR 33
+
+/*
+ * AT_*CACHEBSIZE above represent the cache *block* size which is
+ * the size that is affected by the cache management instructions.
+ *
+ * It doesn't nececssarily matches the cache *line* size which is
+ * more of a performance tuning hint. Additionally the latter can
+ * be different for the different cache levels.
+ *
+ * The set of entries below represent more extensive information
+ * about the caches, in the form of two entry per cache type,
+ * one entry containing the cache size in bytes, and the other
+ * containing the cache line size in bytes in the bottom 16 bits
+ * and the cache associativity in the next 16 bits.
+ *
+ * The associativity is such that if N is the 16-bit value, the
+ * cache is N way set associative. A value if 0xffff means fully
+ * associative, a value of 1 means directly mapped.
+ *
+ * For all these fields, a value of 0 means that the information
+ * is not known.
+ */
+
+#define AT_L1I_CACHESIZE 40
+#define AT_L1I_CACHEGEOMETRY 41
+#define AT_L1D_CACHESIZE 42
+#define AT_L1D_CACHEGEOMETRY 43
+#define AT_L2_CACHESIZE 44
+#define AT_L2_CACHEGEOMETRY 45
+#define AT_L3_CACHESIZE 46
+#define AT_L3_CACHEGEOMETRY 47
+
+#define AT_MINSIGSTKSZ 51 /* stack needed for signal delivery */
+
+#define AT_VECTOR_SIZE_ARCH 15 /* entries in ARCH_DLINFO */
+
+#endif
diff --git a/arch/powerpc/include/uapi/asm/bitsperlong.h b/arch/powerpc/include/uapi/asm/bitsperlong.h
new file mode 100644
index 000000000..46ece3ecf
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_POWERPC_BITSPERLONG_H
+#define __ASM_POWERPC_BITSPERLONG_H
+
+#if defined(__powerpc64__)
+# define __BITS_PER_LONG 64
+#else
+# define __BITS_PER_LONG 32
+#endif
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_POWERPC_BITSPERLONG_H */
diff --git a/arch/powerpc/include/uapi/asm/bootx.h b/arch/powerpc/include/uapi/asm/bootx.h
new file mode 100644
index 000000000..6728c7e24
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/bootx.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file describes the structure passed from the BootX application
+ * (for MacOS) when it is used to boot Linux.
+ *
+ * Written by Benjamin Herrenschmidt.
+ */
+
+
+#ifndef _UAPI__ASM_BOOTX_H__
+#define _UAPI__ASM_BOOTX_H__
+
+#include <linux/types.h>
+
+#ifdef macintosh
+#include <Types.h>
+#include "linux_type_defs.h"
+#endif
+
+#ifdef macintosh
+/* All this requires PowerPC alignment */
+#pragma options align=power
+#endif
+
+/* On kernel entry:
+ *
+ * r3 = 0x426f6f58 ('BooX')
+ * r4 = pointer to boot_infos
+ * r5 = NULL
+ *
+ * Data and instruction translation disabled, interrupts
+ * disabled, kernel loaded at physical 0x00000000 on PCI
+ * machines (will be different on NuBus).
+ */
+
+#define BOOT_INFO_VERSION 5
+#define BOOT_INFO_COMPATIBLE_VERSION 1
+
+/* Bit in the architecture flag mask. More to be defined in
+ future versions. Note that either BOOT_ARCH_PCI or
+ BOOT_ARCH_NUBUS is set. The other BOOT_ARCH_NUBUS_xxx are
+ set additionally when BOOT_ARCH_NUBUS is set.
+ */
+#define BOOT_ARCH_PCI 0x00000001UL
+#define BOOT_ARCH_NUBUS 0x00000002UL
+#define BOOT_ARCH_NUBUS_PDM 0x00000010UL
+#define BOOT_ARCH_NUBUS_PERFORMA 0x00000020UL
+#define BOOT_ARCH_NUBUS_POWERBOOK 0x00000040UL
+
+/* Maximum number of ranges in phys memory map */
+#define MAX_MEM_MAP_SIZE 26
+
+/* This is the format of an element in the physical memory map. Note that
+ the map is optional and current BootX will only build it for pre-PCI
+ machines */
+typedef struct boot_info_map_entry
+{
+ __u32 physAddr; /* Physical starting address */
+ __u32 size; /* Size in bytes */
+} boot_info_map_entry_t;
+
+
+/* Here are the boot informations that are passed to the bootstrap
+ * Note that the kernel arguments and the device tree are appended
+ * at the end of this structure. */
+typedef struct boot_infos
+{
+ /* Version of this structure */
+ __u32 version;
+ /* backward compatible down to version: */
+ __u32 compatible_version;
+
+ /* NEW (vers. 2) this holds the current _logical_ base addr of
+ the frame buffer (for use by early boot message) */
+ __u8* logicalDisplayBase;
+
+ /* NEW (vers. 4) Apple's machine identification */
+ __u32 machineID;
+
+ /* NEW (vers. 4) Detected hw architecture */
+ __u32 architecture;
+
+ /* The device tree (internal addresses relative to the beginning of the tree,
+ * device tree offset relative to the beginning of this structure).
+ * On pre-PCI macintosh (BOOT_ARCH_PCI bit set to 0 in architecture), this
+ * field is 0.
+ */
+ __u32 deviceTreeOffset; /* Device tree offset */
+ __u32 deviceTreeSize; /* Size of the device tree */
+
+ /* Some infos about the current MacOS display */
+ __u32 dispDeviceRect[4]; /* left,top,right,bottom */
+ __u32 dispDeviceDepth; /* (8, 16 or 32) */
+ __u8* dispDeviceBase; /* base address (physical) */
+ __u32 dispDeviceRowBytes; /* rowbytes (in bytes) */
+ __u32 dispDeviceColorsOffset; /* Colormap (8 bits only) or 0 (*) */
+ /* Optional offset in the registry to the current
+ * MacOS display. (Can be 0 when not detected) */
+ __u32 dispDeviceRegEntryOffset;
+
+ /* Optional pointer to boot ramdisk (offset from this structure) */
+ __u32 ramDisk;
+ __u32 ramDiskSize; /* size of ramdisk image */
+
+ /* Kernel command line arguments (offset from this structure) */
+ __u32 kernelParamsOffset;
+
+ /* ALL BELOW NEW (vers. 4) */
+
+ /* This defines the physical memory. Valid with BOOT_ARCH_NUBUS flag
+ (non-PCI) only. On PCI, memory is contiguous and it's size is in the
+ device-tree. */
+ boot_info_map_entry_t
+ physMemoryMap[MAX_MEM_MAP_SIZE]; /* Where the phys memory is */
+ __u32 physMemoryMapSize; /* How many entries in map */
+
+
+ /* The framebuffer size (optional, currently 0) */
+ __u32 frameBufferSize; /* Represents a max size, can be 0. */
+
+ /* NEW (vers. 5) */
+
+ /* Total params size (args + colormap + device tree + ramdisk) */
+ __u32 totalParamsSize;
+
+} boot_infos_t;
+
+
+#ifdef macintosh
+#pragma options align=reset
+#endif
+
+#endif /* _UAPI__ASM_BOOTX_H__ */
diff --git a/arch/powerpc/include/uapi/asm/byteorder.h b/arch/powerpc/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000..8ef66f7d9
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/byteorder.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_BYTEORDER_H
+#define _ASM_POWERPC_BYTEORDER_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifdef __LITTLE_ENDIAN__
+#include <linux/byteorder/little_endian.h>
+#else
+#include <linux/byteorder/big_endian.h>
+#endif
+
+#endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
new file mode 100644
index 000000000..731b97dc2
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI__ASM_POWERPC_CPUTABLE_H
+#define _UAPI__ASM_POWERPC_CPUTABLE_H
+
+/* in AT_HWCAP */
+#define PPC_FEATURE_32 0x80000000
+#define PPC_FEATURE_64 0x40000000
+#define PPC_FEATURE_601_INSTR 0x20000000
+#define PPC_FEATURE_HAS_ALTIVEC 0x10000000
+#define PPC_FEATURE_HAS_FPU 0x08000000
+#define PPC_FEATURE_HAS_MMU 0x04000000
+#define PPC_FEATURE_HAS_4xxMAC 0x02000000
+#define PPC_FEATURE_UNIFIED_CACHE 0x01000000
+#define PPC_FEATURE_HAS_SPE 0x00800000
+#define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000
+#define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000
+#define PPC_FEATURE_NO_TB 0x00100000
+#define PPC_FEATURE_POWER4 0x00080000
+#define PPC_FEATURE_POWER5 0x00040000
+#define PPC_FEATURE_POWER5_PLUS 0x00020000
+#define PPC_FEATURE_CELL 0x00010000
+#define PPC_FEATURE_BOOKE 0x00008000
+#define PPC_FEATURE_SMT 0x00004000
+#define PPC_FEATURE_ICACHE_SNOOP 0x00002000
+#define PPC_FEATURE_ARCH_2_05 0x00001000
+#define PPC_FEATURE_PA6T 0x00000800
+#define PPC_FEATURE_HAS_DFP 0x00000400
+#define PPC_FEATURE_POWER6_EXT 0x00000200
+#define PPC_FEATURE_ARCH_2_06 0x00000100
+#define PPC_FEATURE_HAS_VSX 0x00000080
+
+#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
+ 0x00000040
+
+/* Reserved - do not use 0x00000004 */
+#define PPC_FEATURE_TRUE_LE 0x00000002
+#define PPC_FEATURE_PPC_LE 0x00000001
+
+/* in AT_HWCAP2 */
+#define PPC_FEATURE2_ARCH_2_07 0x80000000
+#define PPC_FEATURE2_HTM 0x40000000
+#define PPC_FEATURE2_DSCR 0x20000000
+#define PPC_FEATURE2_EBB 0x10000000
+#define PPC_FEATURE2_ISEL 0x08000000
+#define PPC_FEATURE2_TAR 0x04000000
+#define PPC_FEATURE2_VEC_CRYPTO 0x02000000
+#define PPC_FEATURE2_HTM_NOSC 0x01000000
+#define PPC_FEATURE2_ARCH_3_00 0x00800000 /* ISA 3.00 */
+#define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* VSX IEEE Binary Float 128-bit */
+#define PPC_FEATURE2_DARN 0x00200000 /* darn random number insn */
+#define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */
+#define PPC_FEATURE2_HTM_NO_SUSPEND 0x00080000 /* TM w/out suspended state */
+#define PPC_FEATURE2_ARCH_3_1 0x00040000 /* ISA 3.1 */
+#define PPC_FEATURE2_MMA 0x00020000 /* Matrix Multiply Assist */
+
+/*
+ * IMPORTANT!
+ * All future PPC_FEATURE definitions should be allocated in cooperation with
+ * OPAL / skiboot firmware, in accordance with the ibm,powerpc-cpu-features
+ * device tree binding.
+ */
+
+#endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
diff --git a/arch/powerpc/include/uapi/asm/eeh.h b/arch/powerpc/include/uapi/asm/eeh.h
new file mode 100644
index 000000000..28186071f
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/eeh.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2015
+ *
+ * Authors: Gavin Shan <gwshan@linux.vnet.ibm.com>
+ */
+
+#ifndef _ASM_POWERPC_EEH_H
+#define _ASM_POWERPC_EEH_H
+
+/* PE states */
+#define EEH_PE_STATE_NORMAL 0 /* Normal state */
+#define EEH_PE_STATE_RESET 1 /* PE reset asserted */
+#define EEH_PE_STATE_STOPPED_IO_DMA 2 /* Frozen PE */
+#define EEH_PE_STATE_STOPPED_DMA 4 /* Stopped DMA only */
+#define EEH_PE_STATE_UNAVAIL 5 /* Unavailable */
+
+/* EEH error types and functions */
+#define EEH_ERR_TYPE_32 0 /* 32-bits error */
+#define EEH_ERR_TYPE_64 1 /* 64-bits error */
+#define EEH_ERR_FUNC_MIN 0
+#define EEH_ERR_FUNC_LD_MEM_ADDR 0 /* Memory load */
+#define EEH_ERR_FUNC_LD_MEM_DATA 1
+#define EEH_ERR_FUNC_LD_IO_ADDR 2 /* IO load */
+#define EEH_ERR_FUNC_LD_IO_DATA 3
+#define EEH_ERR_FUNC_LD_CFG_ADDR 4 /* Config load */
+#define EEH_ERR_FUNC_LD_CFG_DATA 5
+#define EEH_ERR_FUNC_ST_MEM_ADDR 6 /* Memory store */
+#define EEH_ERR_FUNC_ST_MEM_DATA 7
+#define EEH_ERR_FUNC_ST_IO_ADDR 8 /* IO store */
+#define EEH_ERR_FUNC_ST_IO_DATA 9
+#define EEH_ERR_FUNC_ST_CFG_ADDR 10 /* Config store */
+#define EEH_ERR_FUNC_ST_CFG_DATA 11
+#define EEH_ERR_FUNC_DMA_RD_ADDR 12 /* DMA read */
+#define EEH_ERR_FUNC_DMA_RD_DATA 13
+#define EEH_ERR_FUNC_DMA_RD_MASTER 14
+#define EEH_ERR_FUNC_DMA_RD_TARGET 15
+#define EEH_ERR_FUNC_DMA_WR_ADDR 16 /* DMA write */
+#define EEH_ERR_FUNC_DMA_WR_DATA 17
+#define EEH_ERR_FUNC_DMA_WR_MASTER 18
+#define EEH_ERR_FUNC_DMA_WR_TARGET 19
+#define EEH_ERR_FUNC_MAX 19
+
+#endif /* _ASM_POWERPC_EEH_H */
diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h
new file mode 100644
index 000000000..308857123
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/elf.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * ELF register definitions..
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UAPI_ASM_POWERPC_ELF_H
+#define _UAPI_ASM_POWERPC_ELF_H
+
+
+#include <linux/types.h>
+
+#include <asm/ptrace.h>
+#include <asm/cputable.h>
+#include <asm/auxvec.h>
+
+/* PowerPC relocations defined by the ABIs */
+#define R_PPC_NONE 0
+#define R_PPC_ADDR32 1 /* 32bit absolute address */
+#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
+#define R_PPC_ADDR16 3 /* 16bit absolute address */
+#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
+#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
+#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
+#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
+#define R_PPC_ADDR14_BRTAKEN 8
+#define R_PPC_ADDR14_BRNTAKEN 9
+#define R_PPC_REL24 10 /* PC relative 26 bit */
+#define R_PPC_REL14 11 /* PC relative 16 bit */
+#define R_PPC_REL14_BRTAKEN 12
+#define R_PPC_REL14_BRNTAKEN 13
+#define R_PPC_GOT16 14
+#define R_PPC_GOT16_LO 15
+#define R_PPC_GOT16_HI 16
+#define R_PPC_GOT16_HA 17
+#define R_PPC_PLTREL24 18
+#define R_PPC_COPY 19
+#define R_PPC_GLOB_DAT 20
+#define R_PPC_JMP_SLOT 21
+#define R_PPC_RELATIVE 22
+#define R_PPC_LOCAL24PC 23
+#define R_PPC_UADDR32 24
+#define R_PPC_UADDR16 25
+#define R_PPC_REL32 26
+#define R_PPC_PLT32 27
+#define R_PPC_PLTREL32 28
+#define R_PPC_PLT16_LO 29
+#define R_PPC_PLT16_HI 30
+#define R_PPC_PLT16_HA 31
+#define R_PPC_SDAREL16 32
+#define R_PPC_SECTOFF 33
+#define R_PPC_SECTOFF_LO 34
+#define R_PPC_SECTOFF_HI 35
+#define R_PPC_SECTOFF_HA 36
+
+/* PowerPC relocations defined for the TLS access ABI. */
+#define R_PPC_TLS 67 /* none (sym+add)@tls */
+#define R_PPC_DTPMOD32 68 /* word32 (sym+add)@dtpmod */
+#define R_PPC_TPREL16 69 /* half16* (sym+add)@tprel */
+#define R_PPC_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
+#define R_PPC_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
+#define R_PPC_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
+#define R_PPC_TPREL32 73 /* word32 (sym+add)@tprel */
+#define R_PPC_DTPREL16 74 /* half16* (sym+add)@dtprel */
+#define R_PPC_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
+#define R_PPC_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
+#define R_PPC_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
+#define R_PPC_DTPREL32 78 /* word32 (sym+add)@dtprel */
+#define R_PPC_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
+#define R_PPC_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
+#define R_PPC_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
+#define R_PPC_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
+#define R_PPC_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
+#define R_PPC_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
+#define R_PPC_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
+#define R_PPC_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
+#define R_PPC_GOT_TPREL16 87 /* half16* (sym+add)@got@tprel */
+#define R_PPC_GOT_TPREL16_LO 88 /* half16 (sym+add)@got@tprel@l */
+#define R_PPC_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
+#define R_PPC_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
+#define R_PPC_GOT_DTPREL16 91 /* half16* (sym+add)@got@dtprel */
+#define R_PPC_GOT_DTPREL16_LO 92 /* half16* (sym+add)@got@dtprel@l */
+#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */
+#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */
+
+/* keep this the last entry. */
+#define R_PPC_NUM 95
+
+
+#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
+#define ELF_NFPREG 33 /* includes fpscr */
+#define ELF_NVMX 34 /* includes all vector registers */
+#define ELF_NVSX 32 /* includes all VSX registers */
+#define ELF_NTMSPRREG 3 /* include tfhar, tfiar, texasr */
+#define ELF_NEBB 3 /* includes ebbrr, ebbhr, bescr */
+#define ELF_NPMU 5 /* includes siar, sdar, sier, mmcr2, mmcr0 */
+#define ELF_NPKEY 3 /* includes amr, iamr, uamor */
+
+typedef unsigned long elf_greg_t64;
+typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
+
+typedef unsigned int elf_greg_t32;
+typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
+typedef elf_gregset_t32 compat_elf_gregset_t;
+
+/*
+ * ELF_ARCH, CLASS, and DATA are used to set parameters in the core dumps.
+ */
+#ifdef __powerpc64__
+# define ELF_NVRREG32 33 /* includes vscr & vrsave stuffed together */
+# define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */
+# define ELF_NVSRHALFREG 32 /* Half the vsx registers */
+# define ELF_GREG_TYPE elf_greg_t64
+# define ELF_ARCH EM_PPC64
+# define ELF_CLASS ELFCLASS64
+typedef elf_greg_t64 elf_greg_t;
+typedef elf_gregset_t64 elf_gregset_t;
+#else
+# define ELF_NEVRREG 34 /* includes acc (as 2) */
+# define ELF_NVRREG 33 /* includes vscr */
+# define ELF_GREG_TYPE elf_greg_t32
+# define ELF_ARCH EM_PPC
+# define ELF_CLASS ELFCLASS32
+typedef elf_greg_t32 elf_greg_t;
+typedef elf_gregset_t32 elf_gregset_t;
+#endif /* __powerpc64__ */
+
+#ifdef __BIG_ENDIAN__
+#define ELF_DATA ELFDATA2MSB
+#else
+#define ELF_DATA ELFDATA2LSB
+#endif
+
+/* Floating point registers */
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/* Altivec registers */
+/*
+ * The entries with indexes 0-31 contain the corresponding vector registers.
+ * The entry with index 32 contains the vscr as the last word (offset 12)
+ * within the quadword. This allows the vscr to be stored as either a
+ * quadword (since it must be copied via a vector register to/from storage)
+ * or as a word.
+ *
+ * 64-bit kernel notes: The entry at index 33 contains the vrsave as the first
+ * word (offset 0) within the quadword.
+ *
+ * This definition of the VMX state is compatible with the current PPC32
+ * ptrace interface. This allows signal handling and ptrace to use the same
+ * structures. This also simplifies the implementation of a bi-arch
+ * (combined (32- and 64-bit) gdb.
+ *
+ * Note that it's _not_ compatible with 32 bits ucontext which stuffs the
+ * vrsave along with vscr and so only uses 33 vectors for the register set
+ */
+typedef __vector128 elf_vrreg_t;
+typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
+#ifdef __powerpc64__
+typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
+typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
+#endif
+
+/* PowerPC64 relocations defined by the ABIs */
+#define R_PPC64_NONE R_PPC_NONE
+#define R_PPC64_ADDR32 R_PPC_ADDR32 /* 32bit absolute address. */
+#define R_PPC64_ADDR24 R_PPC_ADDR24 /* 26bit address, word aligned. */
+#define R_PPC64_ADDR16 R_PPC_ADDR16 /* 16bit absolute address. */
+#define R_PPC64_ADDR16_LO R_PPC_ADDR16_LO /* lower 16bits of abs. address. */
+#define R_PPC64_ADDR16_HI R_PPC_ADDR16_HI /* high 16bits of abs. address. */
+#define R_PPC64_ADDR16_HA R_PPC_ADDR16_HA /* adjusted high 16bits. */
+#define R_PPC64_ADDR14 R_PPC_ADDR14 /* 16bit address, word aligned. */
+#define R_PPC64_ADDR14_BRTAKEN R_PPC_ADDR14_BRTAKEN
+#define R_PPC64_ADDR14_BRNTAKEN R_PPC_ADDR14_BRNTAKEN
+#define R_PPC64_REL24 R_PPC_REL24 /* PC relative 26 bit, word aligned. */
+#define R_PPC64_REL14 R_PPC_REL14 /* PC relative 16 bit. */
+#define R_PPC64_REL14_BRTAKEN R_PPC_REL14_BRTAKEN
+#define R_PPC64_REL14_BRNTAKEN R_PPC_REL14_BRNTAKEN
+#define R_PPC64_GOT16 R_PPC_GOT16
+#define R_PPC64_GOT16_LO R_PPC_GOT16_LO
+#define R_PPC64_GOT16_HI R_PPC_GOT16_HI
+#define R_PPC64_GOT16_HA R_PPC_GOT16_HA
+
+#define R_PPC64_COPY R_PPC_COPY
+#define R_PPC64_GLOB_DAT R_PPC_GLOB_DAT
+#define R_PPC64_JMP_SLOT R_PPC_JMP_SLOT
+#define R_PPC64_RELATIVE R_PPC_RELATIVE
+
+#define R_PPC64_UADDR32 R_PPC_UADDR32
+#define R_PPC64_UADDR16 R_PPC_UADDR16
+#define R_PPC64_REL32 R_PPC_REL32
+#define R_PPC64_PLT32 R_PPC_PLT32
+#define R_PPC64_PLTREL32 R_PPC_PLTREL32
+#define R_PPC64_PLT16_LO R_PPC_PLT16_LO
+#define R_PPC64_PLT16_HI R_PPC_PLT16_HI
+#define R_PPC64_PLT16_HA R_PPC_PLT16_HA
+
+#define R_PPC64_SECTOFF R_PPC_SECTOFF
+#define R_PPC64_SECTOFF_LO R_PPC_SECTOFF_LO
+#define R_PPC64_SECTOFF_HI R_PPC_SECTOFF_HI
+#define R_PPC64_SECTOFF_HA R_PPC_SECTOFF_HA
+#define R_PPC64_ADDR30 37 /* word30 (S + A - P) >> 2. */
+#define R_PPC64_ADDR64 38 /* doubleword64 S + A. */
+#define R_PPC64_ADDR16_HIGHER 39 /* half16 #higher(S + A). */
+#define R_PPC64_ADDR16_HIGHERA 40 /* half16 #highera(S + A). */
+#define R_PPC64_ADDR16_HIGHEST 41 /* half16 #highest(S + A). */
+#define R_PPC64_ADDR16_HIGHESTA 42 /* half16 #highesta(S + A). */
+#define R_PPC64_UADDR64 43 /* doubleword64 S + A. */
+#define R_PPC64_REL64 44 /* doubleword64 S + A - P. */
+#define R_PPC64_PLT64 45 /* doubleword64 L + A. */
+#define R_PPC64_PLTREL64 46 /* doubleword64 L + A - P. */
+#define R_PPC64_TOC16 47 /* half16* S + A - .TOC. */
+#define R_PPC64_TOC16_LO 48 /* half16 #lo(S + A - .TOC.). */
+#define R_PPC64_TOC16_HI 49 /* half16 #hi(S + A - .TOC.). */
+#define R_PPC64_TOC16_HA 50 /* half16 #ha(S + A - .TOC.). */
+#define R_PPC64_TOC 51 /* doubleword64 .TOC. */
+#define R_PPC64_PLTGOT16 52 /* half16* M + A. */
+#define R_PPC64_PLTGOT16_LO 53 /* half16 #lo(M + A). */
+#define R_PPC64_PLTGOT16_HI 54 /* half16 #hi(M + A). */
+#define R_PPC64_PLTGOT16_HA 55 /* half16 #ha(M + A). */
+
+#define R_PPC64_ADDR16_DS 56 /* half16ds* (S + A) >> 2. */
+#define R_PPC64_ADDR16_LO_DS 57 /* half16ds #lo(S + A) >> 2. */
+#define R_PPC64_GOT16_DS 58 /* half16ds* (G + A) >> 2. */
+#define R_PPC64_GOT16_LO_DS 59 /* half16ds #lo(G + A) >> 2. */
+#define R_PPC64_PLT16_LO_DS 60 /* half16ds #lo(L + A) >> 2. */
+#define R_PPC64_SECTOFF_DS 61 /* half16ds* (R + A) >> 2. */
+#define R_PPC64_SECTOFF_LO_DS 62 /* half16ds #lo(R + A) >> 2. */
+#define R_PPC64_TOC16_DS 63 /* half16ds* (S + A - .TOC.) >> 2. */
+#define R_PPC64_TOC16_LO_DS 64 /* half16ds #lo(S + A - .TOC.) >> 2. */
+#define R_PPC64_PLTGOT16_DS 65 /* half16ds* (M + A) >> 2. */
+#define R_PPC64_PLTGOT16_LO_DS 66 /* half16ds #lo(M + A) >> 2. */
+
+/* PowerPC64 relocations defined for the TLS access ABI. */
+#define R_PPC64_TLS 67 /* none (sym+add)@tls */
+#define R_PPC64_DTPMOD64 68 /* doubleword64 (sym+add)@dtpmod */
+#define R_PPC64_TPREL16 69 /* half16* (sym+add)@tprel */
+#define R_PPC64_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
+#define R_PPC64_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
+#define R_PPC64_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
+#define R_PPC64_TPREL64 73 /* doubleword64 (sym+add)@tprel */
+#define R_PPC64_DTPREL16 74 /* half16* (sym+add)@dtprel */
+#define R_PPC64_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
+#define R_PPC64_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
+#define R_PPC64_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
+#define R_PPC64_DTPREL64 78 /* doubleword64 (sym+add)@dtprel */
+#define R_PPC64_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
+#define R_PPC64_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
+#define R_PPC64_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
+#define R_PPC64_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
+#define R_PPC64_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
+#define R_PPC64_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
+#define R_PPC64_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
+#define R_PPC64_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
+#define R_PPC64_GOT_TPREL16_DS 87 /* half16ds* (sym+add)@got@tprel */
+#define R_PPC64_GOT_TPREL16_LO_DS 88 /* half16ds (sym+add)@got@tprel@l */
+#define R_PPC64_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
+#define R_PPC64_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
+#define R_PPC64_GOT_DTPREL16_DS 91 /* half16ds* (sym+add)@got@dtprel */
+#define R_PPC64_GOT_DTPREL16_LO_DS 92 /* half16ds (sym+add)@got@dtprel@l */
+#define R_PPC64_GOT_DTPREL16_HI 93 /* half16 (sym+add)@got@dtprel@h */
+#define R_PPC64_GOT_DTPREL16_HA 94 /* half16 (sym+add)@got@dtprel@ha */
+#define R_PPC64_TPREL16_DS 95 /* half16ds* (sym+add)@tprel */
+#define R_PPC64_TPREL16_LO_DS 96 /* half16ds (sym+add)@tprel@l */
+#define R_PPC64_TPREL16_HIGHER 97 /* half16 (sym+add)@tprel@higher */
+#define R_PPC64_TPREL16_HIGHERA 98 /* half16 (sym+add)@tprel@highera */
+#define R_PPC64_TPREL16_HIGHEST 99 /* half16 (sym+add)@tprel@highest */
+#define R_PPC64_TPREL16_HIGHESTA 100 /* half16 (sym+add)@tprel@highesta */
+#define R_PPC64_DTPREL16_DS 101 /* half16ds* (sym+add)@dtprel */
+#define R_PPC64_DTPREL16_LO_DS 102 /* half16ds (sym+add)@dtprel@l */
+#define R_PPC64_DTPREL16_HIGHER 103 /* half16 (sym+add)@dtprel@higher */
+#define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */
+#define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */
+#define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */
+#define R_PPC64_TLSGD 107
+#define R_PPC64_TLSLD 108
+#define R_PPC64_TOCSAVE 109
+
+#define R_PPC64_ENTRY 118
+
+#define R_PPC64_REL16 249
+#define R_PPC64_REL16_LO 250
+#define R_PPC64_REL16_HI 251
+#define R_PPC64_REL16_HA 252
+
+/* Keep this the last entry. */
+#define R_PPC64_NUM 253
+
+#endif /* _UAPI_ASM_POWERPC_ELF_H */
diff --git a/arch/powerpc/include/uapi/asm/epapr_hcalls.h b/arch/powerpc/include/uapi/asm/epapr_hcalls.h
new file mode 100644
index 000000000..90a0ee6d0
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/epapr_hcalls.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * ePAPR hcall interface
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * This file is provided under a dual BSD/GPL license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _UAPI_ASM_POWERPC_EPAPR_HCALLS_H
+#define _UAPI_ASM_POWERPC_EPAPR_HCALLS_H
+
+#define EV_BYTE_CHANNEL_SEND 1
+#define EV_BYTE_CHANNEL_RECEIVE 2
+#define EV_BYTE_CHANNEL_POLL 3
+#define EV_INT_SET_CONFIG 4
+#define EV_INT_GET_CONFIG 5
+#define EV_INT_SET_MASK 6
+#define EV_INT_GET_MASK 7
+#define EV_INT_IACK 9
+#define EV_INT_EOI 10
+#define EV_INT_SEND_IPI 11
+#define EV_INT_SET_TASK_PRIORITY 12
+#define EV_INT_GET_TASK_PRIORITY 13
+#define EV_DOORBELL_SEND 14
+#define EV_MSGSND 15
+#define EV_IDLE 16
+
+/* vendor ID: epapr */
+#define EV_LOCAL_VENDOR_ID 0 /* for private use */
+#define EV_EPAPR_VENDOR_ID 1
+#define EV_FSL_VENDOR_ID 2 /* Freescale Semiconductor */
+#define EV_IBM_VENDOR_ID 3 /* IBM */
+#define EV_GHS_VENDOR_ID 4 /* Green Hills Software */
+#define EV_ENEA_VENDOR_ID 5 /* Enea */
+#define EV_WR_VENDOR_ID 6 /* Wind River Systems */
+#define EV_AMCC_VENDOR_ID 7 /* Applied Micro Circuits */
+#define EV_KVM_VENDOR_ID 42 /* KVM */
+
+/* The max number of bytes that a byte channel can send or receive per call */
+#define EV_BYTE_CHANNEL_MAX_BYTES 16
+
+
+#define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num))
+#define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num)
+
+/* epapr return codes */
+#define EV_SUCCESS 0
+#define EV_EPERM 1 /* Operation not permitted */
+#define EV_ENOENT 2 /* Entry Not Found */
+#define EV_EIO 3 /* I/O error occurred */
+#define EV_EAGAIN 4 /* The operation had insufficient
+ * resources to complete and should be
+ * retried
+ */
+#define EV_ENOMEM 5 /* There was insufficient memory to
+ * complete the operation */
+#define EV_EFAULT 6 /* Bad guest address */
+#define EV_ENODEV 7 /* No such device */
+#define EV_EINVAL 8 /* An argument supplied to the hcall
+ was out of range or invalid */
+#define EV_INTERNAL 9 /* An internal error occurred */
+#define EV_CONFIG 10 /* A configuration error was detected */
+#define EV_INVALID_STATE 11 /* The object is in an invalid state */
+#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */
+#define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */
+
+#endif /* _UAPI_ASM_POWERPC_EPAPR_HCALLS_H */
diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h
new file mode 100644
index 000000000..4ba87de32
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/errno.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_ERRNO_H
+#define _ASM_POWERPC_ERRNO_H
+
+#undef EDEADLOCK
+#include <asm-generic/errno.h>
+
+#undef EDEADLOCK
+#define EDEADLOCK 58 /* File locking deadlock error */
+
+#endif /* _ASM_POWERPC_ERRNO_H */
diff --git a/arch/powerpc/include/uapi/asm/fcntl.h b/arch/powerpc/include/uapi/asm/fcntl.h
new file mode 100644
index 000000000..65ce08322
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/fcntl.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_FCNTL_H
+#define _ASM_FCNTL_H
+
+#define O_DIRECTORY 040000 /* must be a directory */
+#define O_NOFOLLOW 0100000 /* don't follow links */
+#define O_LARGEFILE 0200000
+#define O_DIRECT 0400000 /* direct disk access hint */
+
+#include <asm-generic/fcntl.h>
+
+#endif /* _ASM_FCNTL_H */
diff --git a/arch/powerpc/include/uapi/asm/ioctl.h b/arch/powerpc/include/uapi/asm/ioctl.h
new file mode 100644
index 000000000..d623af4b9
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/ioctl.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_IOCTL_H
+#define _ASM_POWERPC_IOCTL_H
+
+#define _IOC_SIZEBITS 13
+#define _IOC_DIRBITS 3
+
+#define _IOC_NONE 1U
+#define _IOC_READ 2U
+#define _IOC_WRITE 4U
+
+#include <asm-generic/ioctl.h>
+
+#endif /* _ASM_POWERPC_IOCTL_H */
diff --git a/arch/powerpc/include/uapi/asm/ioctls.h b/arch/powerpc/include/uapi/asm/ioctls.h
new file mode 100644
index 000000000..2c145da3b
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/ioctls.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_IOCTLS_H
+#define _ASM_POWERPC_IOCTLS_H
+
+#include <asm/ioctl.h>
+
+#define FIOCLEX _IO('f', 1)
+#define FIONCLEX _IO('f', 2)
+#define FIOASYNC _IOW('f', 125, int)
+#define FIONBIO _IOW('f', 126, int)
+#define FIONREAD _IOR('f', 127, int)
+#define TIOCINQ FIONREAD
+#define FIOQSIZE _IOR('f', 128, loff_t)
+
+#define TIOCGETP _IOR('t', 8, struct sgttyb)
+#define TIOCSETP _IOW('t', 9, struct sgttyb)
+#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */
+
+#define TIOCSETC _IOW('t', 17, struct tchars)
+#define TIOCGETC _IOR('t', 18, struct tchars)
+#define TCGETS _IOR('t', 19, struct termios)
+#define TCSETS _IOW('t', 20, struct termios)
+#define TCSETSW _IOW('t', 21, struct termios)
+#define TCSETSF _IOW('t', 22, struct termios)
+
+#define TCGETA _IOR('t', 23, struct termio)
+#define TCSETA _IOW('t', 24, struct termio)
+#define TCSETAW _IOW('t', 25, struct termio)
+#define TCSETAF _IOW('t', 28, struct termio)
+
+#define TCSBRK _IO('t', 29)
+#define TCXONC _IO('t', 30)
+#define TCFLSH _IO('t', 31)
+
+#define TIOCSWINSZ _IOW('t', 103, struct winsize)
+#define TIOCGWINSZ _IOR('t', 104, struct winsize)
+#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
+#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
+#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
+
+#define TIOCGLTC _IOR('t', 116, struct ltchars)
+#define TIOCSLTC _IOW('t', 117, struct ltchars)
+#define TIOCSPGRP _IOW('t', 118, int)
+#define TIOCGPGRP _IOR('t', 119, int)
+
+#define TIOCEXCL 0x540C
+#define TIOCNXCL 0x540D
+#define TIOCSCTTY 0x540E
+
+#define TIOCSTI 0x5412
+#define TIOCMGET 0x5415
+#define TIOCMBIS 0x5416
+#define TIOCMBIC 0x5417
+#define TIOCMSET 0x5418
+# define TIOCM_LE 0x001
+# define TIOCM_DTR 0x002
+# define TIOCM_RTS 0x004
+# define TIOCM_ST 0x008
+# define TIOCM_SR 0x010
+# define TIOCM_CTS 0x020
+# define TIOCM_CAR 0x040
+# define TIOCM_RNG 0x080
+# define TIOCM_DSR 0x100
+# define TIOCM_CD TIOCM_CAR
+# define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+#define TIOCGSOFTCAR 0x5419
+#define TIOCSSOFTCAR 0x541A
+#define TIOCLINUX 0x541C
+#define TIOCCONS 0x541D
+#define TIOCGSERIAL 0x541E
+#define TIOCSSERIAL 0x541F
+#define TIOCPKT 0x5420
+# define TIOCPKT_DATA 0
+# define TIOCPKT_FLUSHREAD 1
+# define TIOCPKT_FLUSHWRITE 2
+# define TIOCPKT_STOP 4
+# define TIOCPKT_START 8
+# define TIOCPKT_NOSTOP 16
+# define TIOCPKT_DOSTOP 32
+# define TIOCPKT_IOCTL 64
+
+
+#define TIOCNOTTY 0x5422
+#define TIOCSETD 0x5423
+#define TIOCGETD 0x5424
+#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TIOCSBRK 0x5427 /* BSD compatibility */
+#define TIOCCBRK 0x5428 /* BSD compatibility */
+#define TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TIOCGRS485 0x542e
+#define TIOCSRS485 0x542f
+#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */
+#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
+#define TIOCVHANGUP 0x5437
+#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
+#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
+#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
+#define TIOCGISO7816 _IOR('T', 0x42, struct serial_iso7816)
+#define TIOCSISO7816 _IOWR('T', 0x43, struct serial_iso7816)
+
+#define TIOCSERCONFIG 0x5453
+#define TIOCSERGWILD 0x5454
+#define TIOCSERSWILD 0x5455
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR 0x5459 /* Get line status register */
+ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+
+#endif /* _ASM_POWERPC_IOCTLS_H */
diff --git a/arch/powerpc/include/uapi/asm/ipcbuf.h b/arch/powerpc/include/uapi/asm/ipcbuf.h
new file mode 100644
index 000000000..21e1e0ec0
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/ipcbuf.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_IPCBUF_H
+#define _ASM_POWERPC_IPCBUF_H
+
+/*
+ * The ipc64_perm structure for the powerpc is identical to
+ * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the
+ * kernel. Note extra padding because this structure is passed back
+ * and forth between kernel and user space. Pad space is left for:
+ * - 1 32-bit value to fill up for 8-byte alignment
+ * - 2 miscellaneous 64-bit values
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid_t uid;
+ __kernel_gid_t gid;
+ __kernel_uid_t cuid;
+ __kernel_gid_t cgid;
+ __kernel_mode_t mode;
+ unsigned int seq;
+ unsigned int __pad1;
+ unsigned long long __unused1;
+ unsigned long long __unused2;
+};
+
+#endif /* _ASM_POWERPC_IPCBUF_H */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
new file mode 100644
index 000000000..9f18fa090
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -0,0 +1,736 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __LINUX_KVM_POWERPC_H
+#define __LINUX_KVM_POWERPC_H
+
+#include <linux/types.h>
+
+/* Select powerpc specific features in <linux/kvm.h> */
+#define __KVM_HAVE_SPAPR_TCE
+#define __KVM_HAVE_PPC_SMT
+#define __KVM_HAVE_IRQCHIP
+#define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_GUEST_DEBUG
+
+/* Not always available, but if it is, this is the correct offset. */
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+struct kvm_regs {
+ __u64 pc;
+ __u64 cr;
+ __u64 ctr;
+ __u64 lr;
+ __u64 xer;
+ __u64 msr;
+ __u64 srr0;
+ __u64 srr1;
+ __u64 pid;
+
+ __u64 sprg0;
+ __u64 sprg1;
+ __u64 sprg2;
+ __u64 sprg3;
+ __u64 sprg4;
+ __u64 sprg5;
+ __u64 sprg6;
+ __u64 sprg7;
+
+ __u64 gpr[32];
+};
+
+#define KVM_SREGS_E_IMPL_NONE 0
+#define KVM_SREGS_E_IMPL_FSL 1
+
+#define KVM_SREGS_E_FSL_PIDn (1 << 0) /* PID1/PID2 */
+
+/* flags for kvm_run.flags */
+#define KVM_RUN_PPC_NMI_DISP_MASK (3 << 0)
+#define KVM_RUN_PPC_NMI_DISP_FULLY_RECOV (1 << 0)
+#define KVM_RUN_PPC_NMI_DISP_LIMITED_RECOV (2 << 0)
+#define KVM_RUN_PPC_NMI_DISP_NOT_RECOV (3 << 0)
+
+/*
+ * Feature bits indicate which sections of the sregs struct are valid,
+ * both in KVM_GET_SREGS and KVM_SET_SREGS. On KVM_SET_SREGS, registers
+ * corresponding to unset feature bits will not be modified. This allows
+ * restoring a checkpoint made without that feature, while keeping the
+ * default values of the new registers.
+ *
+ * KVM_SREGS_E_BASE contains:
+ * CSRR0/1 (refers to SRR2/3 on 40x)
+ * ESR
+ * DEAR
+ * MCSR
+ * TSR
+ * TCR
+ * DEC
+ * TB
+ * VRSAVE (USPRG0)
+ */
+#define KVM_SREGS_E_BASE (1 << 0)
+
+/*
+ * KVM_SREGS_E_ARCH206 contains:
+ *
+ * PIR
+ * MCSRR0/1
+ * DECAR
+ * IVPR
+ */
+#define KVM_SREGS_E_ARCH206 (1 << 1)
+
+/*
+ * Contains EPCR, plus the upper half of 64-bit registers
+ * that are 32-bit on 32-bit implementations.
+ */
+#define KVM_SREGS_E_64 (1 << 2)
+
+#define KVM_SREGS_E_SPRG8 (1 << 3)
+#define KVM_SREGS_E_MCIVPR (1 << 4)
+
+/*
+ * IVORs are used -- contains IVOR0-15, plus additional IVORs
+ * in combination with an appropriate feature bit.
+ */
+#define KVM_SREGS_E_IVOR (1 << 5)
+
+/*
+ * Contains MAS0-4, MAS6-7, TLBnCFG, MMUCFG.
+ * Also TLBnPS if MMUCFG[MAVN] = 1.
+ */
+#define KVM_SREGS_E_ARCH206_MMU (1 << 6)
+
+/* DBSR, DBCR, IAC, DAC, DVC */
+#define KVM_SREGS_E_DEBUG (1 << 7)
+
+/* Enhanced debug -- DSRR0/1, SPRG9 */
+#define KVM_SREGS_E_ED (1 << 8)
+
+/* Embedded Floating Point (SPE) -- IVOR32-34 if KVM_SREGS_E_IVOR */
+#define KVM_SREGS_E_SPE (1 << 9)
+
+/*
+ * DEPRECATED! USE ONE_REG FOR THIS ONE!
+ * External Proxy (EXP) -- EPR
+ */
+#define KVM_SREGS_EXP (1 << 10)
+
+/* External PID (E.PD) -- EPSC/EPLC */
+#define KVM_SREGS_E_PD (1 << 11)
+
+/* Processor Control (E.PC) -- IVOR36-37 if KVM_SREGS_E_IVOR */
+#define KVM_SREGS_E_PC (1 << 12)
+
+/* Page table (E.PT) -- EPTCFG */
+#define KVM_SREGS_E_PT (1 << 13)
+
+/* Embedded Performance Monitor (E.PM) -- IVOR35 if KVM_SREGS_E_IVOR */
+#define KVM_SREGS_E_PM (1 << 14)
+
+/*
+ * Special updates:
+ *
+ * Some registers may change even while a vcpu is not running.
+ * To avoid losing these changes, by default these registers are
+ * not updated by KVM_SET_SREGS. To force an update, set the bit
+ * in u.e.update_special corresponding to the register to be updated.
+ *
+ * The update_special field is zero on return from KVM_GET_SREGS.
+ *
+ * When restoring a checkpoint, the caller can set update_special
+ * to 0xffffffff to ensure that everything is restored, even new features
+ * that the caller doesn't know about.
+ */
+#define KVM_SREGS_E_UPDATE_MCSR (1 << 0)
+#define KVM_SREGS_E_UPDATE_TSR (1 << 1)
+#define KVM_SREGS_E_UPDATE_DEC (1 << 2)
+#define KVM_SREGS_E_UPDATE_DBSR (1 << 3)
+
+/*
+ * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a
+ * previous KVM_GET_REGS.
+ *
+ * Unless otherwise indicated, setting any register with KVM_SET_SREGS
+ * directly sets its value. It does not trigger any special semantics such
+ * as write-one-to-clear. Calling KVM_SET_SREGS on an unmodified struct
+ * just received from KVM_GET_SREGS is always a no-op.
+ */
+struct kvm_sregs {
+ __u32 pvr;
+ union {
+ struct {
+ __u64 sdr1;
+ struct {
+ struct {
+ __u64 slbe;
+ __u64 slbv;
+ } slb[64];
+ } ppc64;
+ struct {
+ __u32 sr[16];
+ __u64 ibat[8];
+ __u64 dbat[8];
+ } ppc32;
+ } s;
+ struct {
+ union {
+ struct { /* KVM_SREGS_E_IMPL_FSL */
+ __u32 features; /* KVM_SREGS_E_FSL_ */
+ __u32 svr;
+ __u64 mcar;
+ __u32 hid0;
+
+ /* KVM_SREGS_E_FSL_PIDn */
+ __u32 pid1, pid2;
+ } fsl;
+ __u8 pad[256];
+ } impl;
+
+ __u32 features; /* KVM_SREGS_E_ */
+ __u32 impl_id; /* KVM_SREGS_E_IMPL_ */
+ __u32 update_special; /* KVM_SREGS_E_UPDATE_ */
+ __u32 pir; /* read-only */
+ __u64 sprg8;
+ __u64 sprg9; /* E.ED */
+ __u64 csrr0;
+ __u64 dsrr0; /* E.ED */
+ __u64 mcsrr0;
+ __u32 csrr1;
+ __u32 dsrr1; /* E.ED */
+ __u32 mcsrr1;
+ __u32 esr;
+ __u64 dear;
+ __u64 ivpr;
+ __u64 mcivpr;
+ __u64 mcsr; /* KVM_SREGS_E_UPDATE_MCSR */
+
+ __u32 tsr; /* KVM_SREGS_E_UPDATE_TSR */
+ __u32 tcr;
+ __u32 decar;
+ __u32 dec; /* KVM_SREGS_E_UPDATE_DEC */
+
+ /*
+ * Userspace can read TB directly, but the
+ * value reported here is consistent with "dec".
+ *
+ * Read-only.
+ */
+ __u64 tb;
+
+ __u32 dbsr; /* KVM_SREGS_E_UPDATE_DBSR */
+ __u32 dbcr[3];
+ /*
+ * iac/dac registers are 64bit wide, while this API
+ * interface provides only lower 32 bits on 64 bit
+ * processors. ONE_REG interface is added for 64bit
+ * iac/dac registers.
+ */
+ __u32 iac[4];
+ __u32 dac[2];
+ __u32 dvc[2];
+ __u8 num_iac; /* read-only */
+ __u8 num_dac; /* read-only */
+ __u8 num_dvc; /* read-only */
+ __u8 pad;
+
+ __u32 epr; /* EXP */
+ __u32 vrsave; /* a.k.a. USPRG0 */
+ __u32 epcr; /* KVM_SREGS_E_64 */
+
+ __u32 mas0;
+ __u32 mas1;
+ __u64 mas2;
+ __u64 mas7_3;
+ __u32 mas4;
+ __u32 mas6;
+
+ __u32 ivor_low[16]; /* IVOR0-15 */
+ __u32 ivor_high[18]; /* IVOR32+, plus room to expand */
+
+ __u32 mmucfg; /* read-only */
+ __u32 eptcfg; /* E.PT, read-only */
+ __u32 tlbcfg[4];/* read-only */
+ __u32 tlbps[4]; /* read-only */
+
+ __u32 eplc, epsc; /* E.PD */
+ } e;
+ __u8 pad[1020];
+ } u;
+};
+
+struct kvm_fpu {
+ __u64 fpr[32];
+};
+
+/*
+ * Defines for h/w breakpoint, watchpoint (read, write or both) and
+ * software breakpoint.
+ * These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status"
+ * for KVM_DEBUG_EXIT.
+ */
+#define KVMPPC_DEBUG_NONE 0x0
+#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
+#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
+#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
+struct kvm_debug_exit_arch {
+ __u64 address;
+ /*
+ * exiting to userspace because of h/w breakpoint, watchpoint
+ * (read, write or both) and software breakpoint.
+ */
+ __u32 status;
+ __u32 reserved;
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+ struct {
+ /* H/W breakpoint/watchpoint address */
+ __u64 addr;
+ /*
+ * Type denotes h/w breakpoint, read watchpoint, write
+ * watchpoint or watchpoint (both read and write).
+ */
+ __u32 type;
+ __u32 reserved;
+ } bp[16];
+};
+
+/* Debug related defines */
+/*
+ * kvm_guest_debug->control is a 32 bit field. The lower 16 bits are generic
+ * and upper 16 bits are architecture specific. Architecture specific defines
+ * that ioctl is for setting hardware breakpoint or software breakpoint.
+ */
+#define KVM_GUESTDBG_USE_SW_BP 0x00010000
+#define KVM_GUESTDBG_USE_HW_BP 0x00020000
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+#define KVM_INTERRUPT_SET -1U
+#define KVM_INTERRUPT_UNSET -2U
+#define KVM_INTERRUPT_SET_LEVEL -3U
+
+#define KVM_CPU_440 1
+#define KVM_CPU_E500V2 2
+#define KVM_CPU_3S_32 3
+#define KVM_CPU_3S_64 4
+#define KVM_CPU_E500MC 5
+
+/* for KVM_CAP_SPAPR_TCE */
+struct kvm_create_spapr_tce {
+ __u64 liobn;
+ __u32 window_size;
+};
+
+/* for KVM_CAP_SPAPR_TCE_64 */
+struct kvm_create_spapr_tce_64 {
+ __u64 liobn;
+ __u32 page_shift;
+ __u32 flags;
+ __u64 offset; /* in pages */
+ __u64 size; /* in pages */
+};
+
+/* for KVM_ALLOCATE_RMA */
+struct kvm_allocate_rma {
+ __u64 rma_size;
+};
+
+/* for KVM_CAP_PPC_RTAS */
+struct kvm_rtas_token_args {
+ char name[120];
+ __u64 token; /* Use a token of 0 to undefine a mapping */
+};
+
+struct kvm_book3e_206_tlb_entry {
+ __u32 mas8;
+ __u32 mas1;
+ __u64 mas2;
+ __u64 mas7_3;
+};
+
+struct kvm_book3e_206_tlb_params {
+ /*
+ * For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
+ *
+ * - The number of ways of TLB0 must be a power of two between 2 and
+ * 16.
+ * - TLB1 must be fully associative.
+ * - The size of TLB0 must be a multiple of the number of ways, and
+ * the number of sets must be a power of two.
+ * - The size of TLB1 may not exceed 64 entries.
+ * - TLB0 supports 4 KiB pages.
+ * - The page sizes supported by TLB1 are as indicated by
+ * TLB1CFG (if MMUCFG[MAVN] = 0) or TLB1PS (if MMUCFG[MAVN] = 1)
+ * as returned by KVM_GET_SREGS.
+ * - TLB2 and TLB3 are reserved, and their entries in tlb_sizes[]
+ * and tlb_ways[] must be zero.
+ *
+ * tlb_ways[n] = tlb_sizes[n] means the array is fully associative.
+ *
+ * KVM will adjust TLBnCFG based on the sizes configured here,
+ * though arrays greater than 2048 entries will have TLBnCFG[NENTRY]
+ * set to zero.
+ */
+ __u32 tlb_sizes[4];
+ __u32 tlb_ways[4];
+ __u32 reserved[8];
+};
+
+/* For KVM_PPC_GET_HTAB_FD */
+struct kvm_get_htab_fd {
+ __u64 flags;
+ __u64 start_index;
+ __u64 reserved[2];
+};
+
+/* Values for kvm_get_htab_fd.flags */
+#define KVM_GET_HTAB_BOLTED_ONLY ((__u64)0x1)
+#define KVM_GET_HTAB_WRITE ((__u64)0x2)
+
+/*
+ * Data read on the file descriptor is formatted as a series of
+ * records, each consisting of a header followed by a series of
+ * `n_valid' HPTEs (16 bytes each), which are all valid. Following
+ * those valid HPTEs there are `n_invalid' invalid HPTEs, which
+ * are not represented explicitly in the stream. The same format
+ * is used for writing.
+ */
+struct kvm_get_htab_header {
+ __u32 index;
+ __u16 n_valid;
+ __u16 n_invalid;
+};
+
+/* For KVM_PPC_CONFIGURE_V3_MMU */
+struct kvm_ppc_mmuv3_cfg {
+ __u64 flags;
+ __u64 process_table; /* second doubleword of partition table entry */
+};
+
+/* Flag values for KVM_PPC_CONFIGURE_V3_MMU */
+#define KVM_PPC_MMUV3_RADIX 1 /* 1 = radix mode, 0 = HPT */
+#define KVM_PPC_MMUV3_GTSE 2 /* global translation shootdown enb. */
+
+/* For KVM_PPC_GET_RMMU_INFO */
+struct kvm_ppc_rmmu_info {
+ struct kvm_ppc_radix_geom {
+ __u8 page_shift;
+ __u8 level_bits[4];
+ __u8 pad[3];
+ } geometries[8];
+ __u32 ap_encodings[8];
+};
+
+/* For KVM_PPC_GET_CPU_CHAR */
+struct kvm_ppc_cpu_char {
+ __u64 character; /* characteristics of the CPU */
+ __u64 behaviour; /* recommended software behaviour */
+ __u64 character_mask; /* valid bits in character */
+ __u64 behaviour_mask; /* valid bits in behaviour */
+};
+
+/*
+ * Values for character and character_mask.
+ * These are identical to the values used by H_GET_CPU_CHARACTERISTICS.
+ */
+#define KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 (1ULL << 63)
+#define KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED (1ULL << 62)
+#define KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 (1ULL << 61)
+#define KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 (1ULL << 60)
+#define KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV (1ULL << 59)
+#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58)
+#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57)
+#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56)
+#define KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54)
+
+#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63)
+#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62)
+#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61)
+#define KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58)
+
+/* Per-vcpu XICS interrupt controller state */
+#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
+
+#define KVM_REG_PPC_ICP_CPPR_SHIFT 56 /* current proc priority */
+#define KVM_REG_PPC_ICP_CPPR_MASK 0xff
+#define KVM_REG_PPC_ICP_XISR_SHIFT 32 /* interrupt status field */
+#define KVM_REG_PPC_ICP_XISR_MASK 0xffffff
+#define KVM_REG_PPC_ICP_MFRR_SHIFT 24 /* pending IPI priority */
+#define KVM_REG_PPC_ICP_MFRR_MASK 0xff
+#define KVM_REG_PPC_ICP_PPRI_SHIFT 16 /* pending irq priority */
+#define KVM_REG_PPC_ICP_PPRI_MASK 0xff
+
+#define KVM_REG_PPC_VP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x8d)
+
+/* Device control API: PPC-specific devices */
+#define KVM_DEV_MPIC_GRP_MISC 1
+#define KVM_DEV_MPIC_BASE_ADDR 0 /* 64-bit */
+
+#define KVM_DEV_MPIC_GRP_REGISTER 2 /* 32-bit */
+#define KVM_DEV_MPIC_GRP_IRQ_ACTIVE 3 /* 32-bit */
+
+/* One-Reg API: PPC-specific registers */
+#define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1)
+#define KVM_REG_PPC_IAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x2)
+#define KVM_REG_PPC_IAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3)
+#define KVM_REG_PPC_IAC3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x4)
+#define KVM_REG_PPC_IAC4 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x5)
+#define KVM_REG_PPC_DAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x6)
+#define KVM_REG_PPC_DAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x7)
+#define KVM_REG_PPC_DABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8)
+#define KVM_REG_PPC_DSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9)
+#define KVM_REG_PPC_PURR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa)
+#define KVM_REG_PPC_SPURR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb)
+#define KVM_REG_PPC_DAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc)
+#define KVM_REG_PPC_DSISR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd)
+#define KVM_REG_PPC_AMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xe)
+#define KVM_REG_PPC_UAMOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xf)
+
+#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
+#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
+#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
+#define KVM_REG_PPC_MMCR2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13)
+#define KVM_REG_PPC_MMCRS (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14)
+#define KVM_REG_PPC_SIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
+#define KVM_REG_PPC_SDAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
+#define KVM_REG_PPC_SIER (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17)
+
+#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
+#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
+#define KVM_REG_PPC_PMC3 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1a)
+#define KVM_REG_PPC_PMC4 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1b)
+#define KVM_REG_PPC_PMC5 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1c)
+#define KVM_REG_PPC_PMC6 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1d)
+#define KVM_REG_PPC_PMC7 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1e)
+#define KVM_REG_PPC_PMC8 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1f)
+
+/* 32 floating-point registers */
+#define KVM_REG_PPC_FPR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x20)
+#define KVM_REG_PPC_FPR(n) (KVM_REG_PPC_FPR0 + (n))
+#define KVM_REG_PPC_FPR31 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3f)
+
+/* 32 VMX/Altivec vector registers */
+#define KVM_REG_PPC_VR0 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x40)
+#define KVM_REG_PPC_VR(n) (KVM_REG_PPC_VR0 + (n))
+#define KVM_REG_PPC_VR31 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x5f)
+
+/* 32 double-width FP registers for VSX */
+/* High-order halves overlap with FP regs */
+#define KVM_REG_PPC_VSR0 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x60)
+#define KVM_REG_PPC_VSR(n) (KVM_REG_PPC_VSR0 + (n))
+#define KVM_REG_PPC_VSR31 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x7f)
+
+/* FP and vector status/control registers */
+#define KVM_REG_PPC_FPSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
+/*
+ * VSCR register is documented as a 32-bit register in the ISA, but it can
+ * only be accesses via a vector register. Expose VSCR as a 32-bit register
+ * even though the kernel represents it as a 128-bit vector.
+ */
+#define KVM_REG_PPC_VSCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
+
+/* Virtual processor areas */
+/* For SLB & DTL, address in high (first) half, length in low half */
+#define KVM_REG_PPC_VPA_ADDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x82)
+#define KVM_REG_PPC_VPA_SLB (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x83)
+#define KVM_REG_PPC_VPA_DTL (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x84)
+
+#define KVM_REG_PPC_EPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x85)
+#define KVM_REG_PPC_EPR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x86)
+
+/* Timer Status Register OR/CLEAR interface */
+#define KVM_REG_PPC_OR_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x87)
+#define KVM_REG_PPC_CLEAR_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x88)
+#define KVM_REG_PPC_TCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x89)
+#define KVM_REG_PPC_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8a)
+
+/* Debugging: Special instruction for software breakpoint */
+#define KVM_REG_PPC_DEBUG_INST (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8b)
+
+/* MMU registers */
+#define KVM_REG_PPC_MAS0 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8c)
+#define KVM_REG_PPC_MAS1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8d)
+#define KVM_REG_PPC_MAS2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8e)
+#define KVM_REG_PPC_MAS7_3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8f)
+#define KVM_REG_PPC_MAS4 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x90)
+#define KVM_REG_PPC_MAS6 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x91)
+#define KVM_REG_PPC_MMUCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x92)
+/*
+ * TLBnCFG fields TLBnCFG_N_ENTRY and TLBnCFG_ASSOC can be changed only using
+ * KVM_CAP_SW_TLB ioctl
+ */
+#define KVM_REG_PPC_TLB0CFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x93)
+#define KVM_REG_PPC_TLB1CFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x94)
+#define KVM_REG_PPC_TLB2CFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x95)
+#define KVM_REG_PPC_TLB3CFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x96)
+#define KVM_REG_PPC_TLB0PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x97)
+#define KVM_REG_PPC_TLB1PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x98)
+#define KVM_REG_PPC_TLB2PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x99)
+#define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a)
+#define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b)
+
+/* Timebase offset */
+#define KVM_REG_PPC_TB_OFFSET (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c)
+
+/* POWER8 registers */
+#define KVM_REG_PPC_SPMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9d)
+#define KVM_REG_PPC_SPMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9e)
+#define KVM_REG_PPC_IAMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9f)
+#define KVM_REG_PPC_TFHAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa0)
+#define KVM_REG_PPC_TFIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa1)
+#define KVM_REG_PPC_TEXASR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa2)
+#define KVM_REG_PPC_FSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa3)
+#define KVM_REG_PPC_PSPB (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xa4)
+#define KVM_REG_PPC_EBBHR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa5)
+#define KVM_REG_PPC_EBBRR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa6)
+#define KVM_REG_PPC_BESCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa7)
+#define KVM_REG_PPC_TAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa8)
+#define KVM_REG_PPC_DPDES (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa9)
+#define KVM_REG_PPC_DAWR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaa)
+#define KVM_REG_PPC_DAWRX (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xab)
+#define KVM_REG_PPC_CIABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xac)
+#define KVM_REG_PPC_IC (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xad)
+#define KVM_REG_PPC_VTB (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xae)
+#define KVM_REG_PPC_CSIGR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaf)
+#define KVM_REG_PPC_TACR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb0)
+#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
+#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
+#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
+
+#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
+#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
+#define KVM_REG_PPC_LPCR_64 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb5)
+#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
+
+/* Architecture compatibility level */
+#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
+
+#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
+#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9)
+#define KVM_REG_PPC_SPRG9 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
+#define KVM_REG_PPC_DBSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbb)
+
+/* POWER9 registers */
+#define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
+#define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
+
+#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
+#define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
+#define KVM_REG_PPC_PTCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc0)
+
+/* POWER10 registers */
+#define KVM_REG_PPC_MMCR3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc1)
+#define KVM_REG_PPC_SIER2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc2)
+#define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3)
+#define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4)
+#define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5)
+
+/* Transactional Memory checkpointed state:
+ * This is all GPRs, all VSX regs and a subset of SPRs
+ */
+#define KVM_REG_PPC_TM (KVM_REG_PPC | 0x80000000)
+/* TM GPRs */
+#define KVM_REG_PPC_TM_GPR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_PPC_TM_GPR(n) (KVM_REG_PPC_TM_GPR0 + (n))
+#define KVM_REG_PPC_TM_GPR31 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x1f)
+/* TM VSX */
+#define KVM_REG_PPC_TM_VSR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x20)
+#define KVM_REG_PPC_TM_VSR(n) (KVM_REG_PPC_TM_VSR0 + (n))
+#define KVM_REG_PPC_TM_VSR63 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x5f)
+/* TM SPRS */
+#define KVM_REG_PPC_TM_CR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x60)
+#define KVM_REG_PPC_TM_LR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x61)
+#define KVM_REG_PPC_TM_CTR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x62)
+#define KVM_REG_PPC_TM_FPSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x63)
+#define KVM_REG_PPC_TM_AMR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x64)
+#define KVM_REG_PPC_TM_PPR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x65)
+#define KVM_REG_PPC_TM_VRSAVE (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x66)
+#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
+#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
+#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
+#define KVM_REG_PPC_TM_XER (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
+
+/* PPC64 eXternal Interrupt Controller Specification */
+#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
+#define KVM_DEV_XICS_GRP_CTRL 2
+#define KVM_DEV_XICS_NR_SERVERS 1
+
+/* Layout of 64-bit source attribute values */
+#define KVM_XICS_DESTINATION_SHIFT 0
+#define KVM_XICS_DESTINATION_MASK 0xffffffffULL
+#define KVM_XICS_PRIORITY_SHIFT 32
+#define KVM_XICS_PRIORITY_MASK 0xff
+#define KVM_XICS_LEVEL_SENSITIVE (1ULL << 40)
+#define KVM_XICS_MASKED (1ULL << 41)
+#define KVM_XICS_PENDING (1ULL << 42)
+#define KVM_XICS_PRESENTED (1ULL << 43)
+#define KVM_XICS_QUEUED (1ULL << 44)
+
+/* POWER9 XIVE Native Interrupt Controller */
+#define KVM_DEV_XIVE_GRP_CTRL 1
+#define KVM_DEV_XIVE_RESET 1
+#define KVM_DEV_XIVE_EQ_SYNC 2
+#define KVM_DEV_XIVE_NR_SERVERS 3
+#define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */
+#define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */
+#define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */
+#define KVM_DEV_XIVE_GRP_SOURCE_SYNC 5 /* 64-bit source identifier */
+
+/* Layout of 64-bit XIVE source attribute values */
+#define KVM_XIVE_LEVEL_SENSITIVE (1ULL << 0)
+#define KVM_XIVE_LEVEL_ASSERTED (1ULL << 1)
+
+/* Layout of 64-bit XIVE source configuration attribute values */
+#define KVM_XIVE_SOURCE_PRIORITY_SHIFT 0
+#define KVM_XIVE_SOURCE_PRIORITY_MASK 0x7
+#define KVM_XIVE_SOURCE_SERVER_SHIFT 3
+#define KVM_XIVE_SOURCE_SERVER_MASK 0xfffffff8ULL
+#define KVM_XIVE_SOURCE_MASKED_SHIFT 32
+#define KVM_XIVE_SOURCE_MASKED_MASK 0x100000000ULL
+#define KVM_XIVE_SOURCE_EISN_SHIFT 33
+#define KVM_XIVE_SOURCE_EISN_MASK 0xfffffffe00000000ULL
+
+/* Layout of 64-bit EQ identifier */
+#define KVM_XIVE_EQ_PRIORITY_SHIFT 0
+#define KVM_XIVE_EQ_PRIORITY_MASK 0x7
+#define KVM_XIVE_EQ_SERVER_SHIFT 3
+#define KVM_XIVE_EQ_SERVER_MASK 0xfffffff8ULL
+
+/* Layout of EQ configuration values (64 bytes) */
+struct kvm_ppc_xive_eq {
+ __u32 flags;
+ __u32 qshift;
+ __u64 qaddr;
+ __u32 qtoggle;
+ __u32 qindex;
+ __u8 pad[40];
+};
+
+#define KVM_XIVE_EQ_ALWAYS_NOTIFY 0x00000001
+
+#define KVM_XIVE_TIMA_PAGE_OFFSET 0
+#define KVM_XIVE_ESB_PAGE_OFFSET 4
+
+#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h
new file mode 100644
index 000000000..a809b1b44
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/kvm_para.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _UAPI__POWERPC_KVM_PARA_H__
+#define _UAPI__POWERPC_KVM_PARA_H__
+
+#include <linux/types.h>
+
+/*
+ * Additions to this struct must only occur at the end, and should be
+ * accompanied by a KVM_MAGIC_FEAT flag to advertise that they are present
+ * (albeit not necessarily relevant to the current target hardware platform).
+ *
+ * Struct fields are always 32 or 64 bit aligned, depending on them being 32
+ * or 64 bit wide respectively.
+ *
+ * See Documentation/virt/kvm/ppc-pv.rst
+ */
+struct kvm_vcpu_arch_shared {
+ __u64 scratch1;
+ __u64 scratch2;
+ __u64 scratch3;
+ __u64 critical; /* Guest may not get interrupts if == r1 */
+ __u64 sprg0;
+ __u64 sprg1;
+ __u64 sprg2;
+ __u64 sprg3;
+ __u64 srr0;
+ __u64 srr1;
+ __u64 dar; /* dear on BookE */
+ __u64 msr;
+ __u32 dsisr;
+ __u32 int_pending; /* Tells the guest if we have an interrupt */
+ __u32 sr[16];
+ __u32 mas0;
+ __u32 mas1;
+ __u64 mas7_3;
+ __u64 mas2;
+ __u32 mas4;
+ __u32 mas6;
+ __u32 esr;
+ __u32 pir;
+
+ /*
+ * SPRG4-7 are user-readable, so we can only keep these consistent
+ * between the shared area and the real registers when there's an
+ * intervening exit to KVM. This also applies to SPRG3 on some
+ * chips.
+ *
+ * This suffices for access by guest userspace, since in PR-mode
+ * KVM, an exit must occur when changing the guest's MSR[PR].
+ * If the guest kernel writes to SPRG3-7 via the shared area, it
+ * must also use the shared area for reading while in kernel space.
+ */
+ __u64 sprg4;
+ __u64 sprg5;
+ __u64 sprg6;
+ __u64 sprg7;
+};
+
+#define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */
+
+#define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num)
+
+#include <asm/epapr_hcalls.h>
+
+#define KVM_FEATURE_MAGIC_PAGE 1
+
+/* Magic page flags from host to guest */
+
+#define KVM_MAGIC_FEAT_SR (1 << 0)
+
+/* MASn, ESR, PIR, and high SPRGs */
+#define KVM_MAGIC_FEAT_MAS0_TO_SPRG7 (1 << 1)
+
+/* Magic page flags from guest to host */
+
+#define MAGIC_PAGE_FLAG_NOT_MAPPED_NX (1 << 0)
+
+
+#endif /* _UAPI__POWERPC_KVM_PARA_H__ */
diff --git a/arch/powerpc/include/uapi/asm/mman.h b/arch/powerpc/include/uapi/asm/mman.h
new file mode 100644
index 000000000..c0c737215
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/mman.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UAPI_ASM_POWERPC_MMAN_H
+#define _UAPI_ASM_POWERPC_MMAN_H
+
+#include <asm-generic/mman-common.h>
+
+
+#define PROT_SAO 0x10 /* Strong Access Ordering */
+
+#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
+#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
+#define MAP_LOCKED 0x80
+
+#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
+#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+
+
+#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */
+#define MCL_FUTURE 0x4000 /* lock all additions to address space */
+#define MCL_ONFAULT 0x8000 /* lock all pages that are faulted in */
+
+/* Override any generic PKEY permission defines */
+#define PKEY_DISABLE_EXECUTE 0x4
+#undef PKEY_ACCESS_MASK
+#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
+ PKEY_DISABLE_WRITE |\
+ PKEY_DISABLE_EXECUTE)
+#endif /* _UAPI_ASM_POWERPC_MMAN_H */
diff --git a/arch/powerpc/include/uapi/asm/msgbuf.h b/arch/powerpc/include/uapi/asm/msgbuf.h
new file mode 100644
index 000000000..7919b2ba4
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/msgbuf.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_MSGBUF_H
+#define _ASM_POWERPC_MSGBUF_H
+
+#include <asm/ipcbuf.h>
+
+/*
+ * The msqid64_ds structure for the PowerPC architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+#ifdef __powerpc64__
+ long msg_stime; /* last msgsnd time */
+ long msg_rtime; /* last msgrcv time */
+ long msg_ctime; /* last change time */
+#else
+ unsigned long msg_stime_high;
+ unsigned long msg_stime; /* last msgsnd time */
+ unsigned long msg_rtime_high;
+ unsigned long msg_rtime; /* last msgrcv time */
+ unsigned long msg_ctime_high;
+ unsigned long msg_ctime; /* last change time */
+#endif
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif /* _ASM_POWERPC_MSGBUF_H */
diff --git a/arch/powerpc/include/uapi/asm/nvram.h b/arch/powerpc/include/uapi/asm/nvram.h
new file mode 100644
index 000000000..c92c7f056
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/nvram.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * NVRAM definitions and access functions.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_ASM_POWERPC_NVRAM_H
+#define _UAPI_ASM_POWERPC_NVRAM_H
+
+/* Signatures for nvram partitions */
+#define NVRAM_SIG_SP 0x02 /* support processor */
+#define NVRAM_SIG_OF 0x50 /* open firmware config */
+#define NVRAM_SIG_FW 0x51 /* general firmware */
+#define NVRAM_SIG_HW 0x52 /* hardware (VPD) */
+#define NVRAM_SIG_FLIP 0x5a /* Apple flip/flop header */
+#define NVRAM_SIG_APPL 0x5f /* Apple "system" (???) */
+#define NVRAM_SIG_SYS 0x70 /* system env vars */
+#define NVRAM_SIG_CFG 0x71 /* config data */
+#define NVRAM_SIG_ELOG 0x72 /* error log */
+#define NVRAM_SIG_VEND 0x7e /* vendor defined */
+#define NVRAM_SIG_FREE 0x7f /* Free space */
+#define NVRAM_SIG_OS 0xa0 /* OS defined */
+#define NVRAM_SIG_PANIC 0xa1 /* Apple OSX "panic" */
+
+
+/* PowerMac specific nvram stuffs */
+
+enum {
+ pmac_nvram_OF, /* Open Firmware partition */
+ pmac_nvram_XPRAM, /* MacOS XPRAM partition */
+ pmac_nvram_NR /* MacOS Name Registry partition */
+};
+
+
+/* Some offsets in XPRAM */
+#define PMAC_XPRAM_MACHINE_LOC 0xe4
+#define PMAC_XPRAM_SOUND_VOLUME 0x08
+
+/* Machine location structure in PowerMac XPRAM */
+struct pmac_machine_location {
+ unsigned int latitude; /* 2+30 bit Fractional number */
+ unsigned int longitude; /* 2+30 bit Fractional number */
+ unsigned int delta; /* mix of GMT delta and DLS */
+};
+
+/*
+ * /dev/nvram ioctls
+ *
+ * Note that PMAC_NVRAM_GET_OFFSET is still supported, but is
+ * definitely obsolete. Do not use it if you can avoid it
+ */
+
+#define OBSOLETE_PMAC_NVRAM_GET_OFFSET \
+ _IOWR('p', 0x40, int)
+
+#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int) /* Get NVRAM partition offset */
+#define IOC_NVRAM_SYNC _IO('p', 0x43) /* Sync NVRAM image */
+
+#endif /* _UAPI_ASM_POWERPC_NVRAM_H */
diff --git a/arch/powerpc/include/uapi/asm/opal-prd.h b/arch/powerpc/include/uapi/asm/opal-prd.h
new file mode 100644
index 000000000..1869cf83a
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/opal-prd.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * OPAL Runtime Diagnostics interface driver
+ * Supported on POWERNV platform
+ *
+ * (C) Copyright IBM 2015
+ *
+ * Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>
+ * Author: Jeremy Kerr <jk@ozlabs.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_ASM_POWERPC_OPAL_PRD_H_
+#define _UAPI_ASM_POWERPC_OPAL_PRD_H_
+
+#include <linux/types.h>
+
+/**
+ * The version of the kernel interface of the PRD system. This describes the
+ * interface available for the /dev/opal-prd device. The actual PRD message
+ * layout and content is private to the firmware <--> userspace interface, so
+ * is not covered by this versioning.
+ *
+ * Future interface versions are backwards-compatible; if a later kernel
+ * version is encountered, functionality provided in earlier versions
+ * will work.
+ */
+#define OPAL_PRD_KERNEL_VERSION 1
+
+#define OPAL_PRD_GET_INFO _IOR('o', 0x01, struct opal_prd_info)
+#define OPAL_PRD_SCOM_READ _IOR('o', 0x02, struct opal_prd_scom)
+#define OPAL_PRD_SCOM_WRITE _IOW('o', 0x03, struct opal_prd_scom)
+
+#ifndef __ASSEMBLY__
+
+struct opal_prd_info {
+ __u64 version;
+ __u64 reserved[3];
+};
+
+struct opal_prd_scom {
+ __u64 chip;
+ __u64 addr;
+ __u64 data;
+ __s64 rc;
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI_ASM_POWERPC_OPAL_PRD_H */
diff --git a/arch/powerpc/include/uapi/asm/papr_pdsm.h b/arch/powerpc/include/uapi/asm/papr_pdsm.h
new file mode 100644
index 000000000..174399250
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/papr_pdsm.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * PAPR nvDimm Specific Methods (PDSM) and structs for libndctl
+ *
+ * (C) Copyright IBM 2020
+ *
+ * Author: Vaibhav Jain <vaibhav at linux.ibm.com>
+ */
+
+#ifndef _UAPI_ASM_POWERPC_PAPR_PDSM_H_
+#define _UAPI_ASM_POWERPC_PAPR_PDSM_H_
+
+#include <linux/types.h>
+#include <linux/ndctl.h>
+
+/*
+ * PDSM Envelope:
+ *
+ * The ioctl ND_CMD_CALL exchange data between user-space and kernel via
+ * envelope which consists of 2 headers sections and payload sections as
+ * illustrated below:
+ * +-----------------+---------------+---------------------------+
+ * | 64-Bytes | 8-Bytes | Max 184-Bytes |
+ * +-----------------+---------------+---------------------------+
+ * | ND-HEADER | PDSM-HEADER | PDSM-PAYLOAD |
+ * +-----------------+---------------+---------------------------+
+ * | nd_family | | |
+ * | nd_size_out | cmd_status | |
+ * | nd_size_in | reserved | nd_pdsm_payload |
+ * | nd_command | payload --> | |
+ * | nd_fw_size | | |
+ * | nd_payload ---> | | |
+ * +---------------+-----------------+---------------------------+
+ *
+ * ND Header:
+ * This is the generic libnvdimm header described as 'struct nd_cmd_pkg'
+ * which is interpreted by libnvdimm before passed on to papr_scm. Important
+ * member fields used are:
+ * 'nd_family' : (In) NVDIMM_FAMILY_PAPR_SCM
+ * 'nd_size_in' : (In) PDSM-HEADER + PDSM-IN-PAYLOAD (usually 0)
+ * 'nd_size_out' : (In) PDSM-HEADER + PDSM-RETURN-PAYLOAD
+ * 'nd_command' : (In) One of PAPR_PDSM_XXX
+ * 'nd_fw_size' : (Out) PDSM-HEADER + size of actual payload returned
+ *
+ * PDSM Header:
+ * This is papr-scm specific header that precedes the payload. This is defined
+ * as nd_cmd_pdsm_pkg. Following fields aare available in this header:
+ *
+ * 'cmd_status' : (Out) Errors if any encountered while servicing PDSM.
+ * 'reserved' : Not used, reserved for future and should be set to 0.
+ * 'payload' : A union of all the possible payload structs
+ *
+ * PDSM Payload:
+ *
+ * The layout of the PDSM Payload is defined by various structs shared between
+ * papr_scm and libndctl so that contents of payload can be interpreted. As such
+ * its defined as a union of all possible payload structs as
+ * 'union nd_pdsm_payload'. Based on the value of 'nd_cmd_pkg.nd_command'
+ * appropriate member of the union is accessed.
+ */
+
+/* Max payload size that we can handle */
+#define ND_PDSM_PAYLOAD_MAX_SIZE 184
+
+/* Max payload size that we can handle */
+#define ND_PDSM_HDR_SIZE \
+ (sizeof(struct nd_pkg_pdsm) - ND_PDSM_PAYLOAD_MAX_SIZE)
+
+/* Various nvdimm health indicators */
+#define PAPR_PDSM_DIMM_HEALTHY 0
+#define PAPR_PDSM_DIMM_UNHEALTHY 1
+#define PAPR_PDSM_DIMM_CRITICAL 2
+#define PAPR_PDSM_DIMM_FATAL 3
+
+/* struct nd_papr_pdsm_health.extension_flags field flags */
+
+/* Indicate that the 'dimm_fuel_gauge' field is valid */
+#define PDSM_DIMM_HEALTH_RUN_GAUGE_VALID 1
+
+/* Indicate that the 'dimm_dsc' field is valid */
+#define PDSM_DIMM_DSC_VALID 2
+
+/*
+ * Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH
+ * Various flags indicate the health status of the dimm.
+ *
+ * extension_flags : Any extension fields present in the struct.
+ * dimm_unarmed : Dimm not armed. So contents wont persist.
+ * dimm_bad_shutdown : Previous shutdown did not persist contents.
+ * dimm_bad_restore : Contents from previous shutdown werent restored.
+ * dimm_scrubbed : Contents of the dimm have been scrubbed.
+ * dimm_locked : Contents of the dimm cant be modified until CEC reboot
+ * dimm_encrypted : Contents of dimm are encrypted.
+ * dimm_health : Dimm health indicator. One of PAPR_PDSM_DIMM_XXXX
+ * dimm_fuel_gauge : Life remaining of DIMM as a percentage from 0-100
+ */
+struct nd_papr_pdsm_health {
+ union {
+ struct {
+ __u32 extension_flags;
+ __u8 dimm_unarmed;
+ __u8 dimm_bad_shutdown;
+ __u8 dimm_bad_restore;
+ __u8 dimm_scrubbed;
+ __u8 dimm_locked;
+ __u8 dimm_encrypted;
+ __u16 dimm_health;
+
+ /* Extension flag PDSM_DIMM_HEALTH_RUN_GAUGE_VALID */
+ __u16 dimm_fuel_gauge;
+
+ /* Extension flag PDSM_DIMM_DSC_VALID */
+ __u64 dimm_dsc;
+ };
+ __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
+ };
+};
+
+/* Flags for injecting specific smart errors */
+#define PDSM_SMART_INJECT_HEALTH_FATAL (1 << 0)
+#define PDSM_SMART_INJECT_BAD_SHUTDOWN (1 << 1)
+
+struct nd_papr_pdsm_smart_inject {
+ union {
+ struct {
+ /* One or more of PDSM_SMART_INJECT_ */
+ __u32 flags;
+ __u8 fatal_enable;
+ __u8 unsafe_shutdown_enable;
+ };
+ __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
+ };
+};
+
+/*
+ * Methods to be embedded in ND_CMD_CALL request. These are sent to the kernel
+ * via 'nd_cmd_pkg.nd_command' member of the ioctl struct
+ */
+enum papr_pdsm {
+ PAPR_PDSM_MIN = 0x0,
+ PAPR_PDSM_HEALTH,
+ PAPR_PDSM_SMART_INJECT,
+ PAPR_PDSM_MAX,
+};
+
+/* Maximal union that can hold all possible payload types */
+union nd_pdsm_payload {
+ struct nd_papr_pdsm_health health;
+ struct nd_papr_pdsm_smart_inject smart_inject;
+ __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
+} __packed;
+
+/*
+ * PDSM-header + payload expected with ND_CMD_CALL ioctl from libnvdimm
+ * Valid member of union 'payload' is identified via 'nd_cmd_pkg.nd_command'
+ * that should always precede this struct when sent to papr_scm via CMD_CALL
+ * interface.
+ */
+struct nd_pkg_pdsm {
+ __s32 cmd_status; /* Out: Sub-cmd status returned back */
+ __u16 reserved[2]; /* Ignored and to be set as '0' */
+ union nd_pdsm_payload payload;
+} __packed;
+
+#endif /* _UAPI_ASM_POWERPC_PAPR_PDSM_H_ */
diff --git a/arch/powerpc/include/uapi/asm/perf_event.h b/arch/powerpc/include/uapi/asm/perf_event.h
new file mode 100644
index 000000000..ce488e48d
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/perf_event.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright 2013 Michael Ellerman, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2 of the
+ * License.
+ */
+
+#ifndef _UAPI_ASM_POWERPC_PERF_EVENT_H
+#define _UAPI_ASM_POWERPC_PERF_EVENT_H
+
+/*
+ * We use bit 63 of perf_event_attr.config as a flag to request EBB.
+ */
+#define PERF_EVENT_CONFIG_EBB_SHIFT 63
+
+#endif /* _UAPI_ASM_POWERPC_PERF_EVENT_H */
diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000..749a2e3af
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_POWERPC_PERF_REGS_H
+#define _UAPI_ASM_POWERPC_PERF_REGS_H
+
+enum perf_event_powerpc_regs {
+ PERF_REG_POWERPC_R0,
+ PERF_REG_POWERPC_R1,
+ PERF_REG_POWERPC_R2,
+ PERF_REG_POWERPC_R3,
+ PERF_REG_POWERPC_R4,
+ PERF_REG_POWERPC_R5,
+ PERF_REG_POWERPC_R6,
+ PERF_REG_POWERPC_R7,
+ PERF_REG_POWERPC_R8,
+ PERF_REG_POWERPC_R9,
+ PERF_REG_POWERPC_R10,
+ PERF_REG_POWERPC_R11,
+ PERF_REG_POWERPC_R12,
+ PERF_REG_POWERPC_R13,
+ PERF_REG_POWERPC_R14,
+ PERF_REG_POWERPC_R15,
+ PERF_REG_POWERPC_R16,
+ PERF_REG_POWERPC_R17,
+ PERF_REG_POWERPC_R18,
+ PERF_REG_POWERPC_R19,
+ PERF_REG_POWERPC_R20,
+ PERF_REG_POWERPC_R21,
+ PERF_REG_POWERPC_R22,
+ PERF_REG_POWERPC_R23,
+ PERF_REG_POWERPC_R24,
+ PERF_REG_POWERPC_R25,
+ PERF_REG_POWERPC_R26,
+ PERF_REG_POWERPC_R27,
+ PERF_REG_POWERPC_R28,
+ PERF_REG_POWERPC_R29,
+ PERF_REG_POWERPC_R30,
+ PERF_REG_POWERPC_R31,
+ PERF_REG_POWERPC_NIP,
+ PERF_REG_POWERPC_MSR,
+ PERF_REG_POWERPC_ORIG_R3,
+ PERF_REG_POWERPC_CTR,
+ PERF_REG_POWERPC_LINK,
+ PERF_REG_POWERPC_XER,
+ PERF_REG_POWERPC_CCR,
+ PERF_REG_POWERPC_SOFTE,
+ PERF_REG_POWERPC_TRAP,
+ PERF_REG_POWERPC_DAR,
+ PERF_REG_POWERPC_DSISR,
+ PERF_REG_POWERPC_SIER,
+ PERF_REG_POWERPC_MMCRA,
+ /* Extended registers */
+ PERF_REG_POWERPC_MMCR0,
+ PERF_REG_POWERPC_MMCR1,
+ PERF_REG_POWERPC_MMCR2,
+ PERF_REG_POWERPC_MMCR3,
+ PERF_REG_POWERPC_SIER2,
+ PERF_REG_POWERPC_SIER3,
+ PERF_REG_POWERPC_PMC1,
+ PERF_REG_POWERPC_PMC2,
+ PERF_REG_POWERPC_PMC3,
+ PERF_REG_POWERPC_PMC4,
+ PERF_REG_POWERPC_PMC5,
+ PERF_REG_POWERPC_PMC6,
+ PERF_REG_POWERPC_SDAR,
+ PERF_REG_POWERPC_SIAR,
+ /* Max mask value for interrupt regs w/o extended regs */
+ PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1,
+ /* Max mask value for interrupt regs including extended regs */
+ PERF_REG_EXTENDED_MAX = PERF_REG_POWERPC_SIAR + 1,
+};
+
+#define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1)
+
+/*
+ * PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300
+ * includes 11 SPRS from MMCR0 to SIAR excluding the
+ * unsupported SPRS MMCR3, SIER2 and SIER3.
+ */
+#define PERF_REG_PMU_MASK_300 \
+ ((1ULL << PERF_REG_POWERPC_MMCR0) | (1ULL << PERF_REG_POWERPC_MMCR1) | \
+ (1ULL << PERF_REG_POWERPC_MMCR2) | (1ULL << PERF_REG_POWERPC_PMC1) | \
+ (1ULL << PERF_REG_POWERPC_PMC2) | (1ULL << PERF_REG_POWERPC_PMC3) | \
+ (1ULL << PERF_REG_POWERPC_PMC4) | (1ULL << PERF_REG_POWERPC_PMC5) | \
+ (1ULL << PERF_REG_POWERPC_PMC6) | (1ULL << PERF_REG_POWERPC_SDAR) | \
+ (1ULL << PERF_REG_POWERPC_SIAR))
+
+/*
+ * PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31
+ * includes 14 SPRs from MMCR0 to SIAR.
+ */
+#define PERF_REG_PMU_MASK_31 \
+ (PERF_REG_PMU_MASK_300 | (1ULL << PERF_REG_POWERPC_MMCR3) | \
+ (1ULL << PERF_REG_POWERPC_SIER2) | (1ULL << PERF_REG_POWERPC_SIER3))
+
+#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/arch/powerpc/include/uapi/asm/posix_types.h b/arch/powerpc/include/uapi/asm/posix_types.h
new file mode 100644
index 000000000..9c0342312
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/posix_types.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_POSIX_TYPES_H
+#define _ASM_POWERPC_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+#ifdef __powerpc64__
+typedef unsigned long __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
+#else
+typedef short __kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+#endif
+
+#include <asm-generic/posix_types.h>
+
+#endif /* _ASM_POWERPC_POSIX_TYPES_H */
diff --git a/arch/powerpc/include/uapi/asm/ps3fb.h b/arch/powerpc/include/uapi/asm/ps3fb.h
new file mode 100644
index 000000000..fd7e3a0d3
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/ps3fb.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006, 2007 Sony Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _ASM_POWERPC_PS3FB_H_
+#define _ASM_POWERPC_PS3FB_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* ioctl */
+#define PS3FB_IOCTL_SETMODE _IOW('r', 1, int) /* set video mode */
+#define PS3FB_IOCTL_GETMODE _IOR('r', 2, int) /* get video mode */
+#define PS3FB_IOCTL_SCREENINFO _IOR('r', 3, int) /* get screen info */
+#define PS3FB_IOCTL_ON _IO('r', 4) /* use IOCTL_FSEL */
+#define PS3FB_IOCTL_OFF _IO('r', 5) /* return to normal-flip */
+#define PS3FB_IOCTL_FSEL _IOW('r', 6, int) /* blit and flip request */
+
+#ifndef FBIO_WAITFORVSYNC
+#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) /* wait for vsync */
+#endif
+
+struct ps3fb_ioctl_res {
+ __u32 xres; /* frame buffer x_size */
+ __u32 yres; /* frame buffer y_size */
+ __u32 xoff; /* margine x */
+ __u32 yoff; /* margine y */
+ __u32 num_frames; /* num of frame buffers */
+};
+
+#endif /* _ASM_POWERPC_PS3FB_H_ */
diff --git a/arch/powerpc/include/uapi/asm/ptrace.h b/arch/powerpc/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000..7004cfea3
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/ptrace.h
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ *
+ * this should only contain volatile regs
+ * since we can keep non-volatile in the thread_struct
+ * should set this up when only volatiles are saved
+ * by intr code.
+ *
+ * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
+ * that the overall structure is a multiple of 16 bytes in length.
+ *
+ * Note that the offsets of the fields in this struct correspond with
+ * the PT_* values below. This simplifies arch/powerpc/kernel/ptrace.c.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UAPI_ASM_POWERPC_PTRACE_H
+#define _UAPI_ASM_POWERPC_PTRACE_H
+
+
+#include <linux/types.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef __KERNEL__
+struct user_pt_regs
+#else
+struct pt_regs
+#endif
+{
+ unsigned long gpr[32];
+ unsigned long nip;
+ unsigned long msr;
+ unsigned long orig_gpr3; /* Used for restarting system calls */
+ unsigned long ctr;
+ unsigned long link;
+ unsigned long xer;
+ unsigned long ccr;
+#ifdef __powerpc64__
+ unsigned long softe; /* Soft enabled/disabled */
+#else
+ unsigned long mq; /* 601 only (not used at present) */
+ /* Used on APUS to hold IPL value. */
+#endif
+ unsigned long trap; /* Reason for being here */
+ /* N.B. for critical exceptions on 4xx, the dar and dsisr
+ fields are overloaded to hold srr0 and srr1. */
+ unsigned long dar; /* Fault registers */
+ unsigned long dsisr; /* on 4xx/Book-E used for ESR */
+ unsigned long result; /* Result of a system call */
+};
+
+#endif /* __ASSEMBLY__ */
+
+
+/*
+ * Offsets used by 'ptrace' system call interface.
+ * These can't be changed without breaking binary compatibility
+ * with MkLinux, etc.
+ */
+#define PT_R0 0
+#define PT_R1 1
+#define PT_R2 2
+#define PT_R3 3
+#define PT_R4 4
+#define PT_R5 5
+#define PT_R6 6
+#define PT_R7 7
+#define PT_R8 8
+#define PT_R9 9
+#define PT_R10 10
+#define PT_R11 11
+#define PT_R12 12
+#define PT_R13 13
+#define PT_R14 14
+#define PT_R15 15
+#define PT_R16 16
+#define PT_R17 17
+#define PT_R18 18
+#define PT_R19 19
+#define PT_R20 20
+#define PT_R21 21
+#define PT_R22 22
+#define PT_R23 23
+#define PT_R24 24
+#define PT_R25 25
+#define PT_R26 26
+#define PT_R27 27
+#define PT_R28 28
+#define PT_R29 29
+#define PT_R30 30
+#define PT_R31 31
+
+#define PT_NIP 32
+#define PT_MSR 33
+#define PT_ORIG_R3 34
+#define PT_CTR 35
+#define PT_LNK 36
+#define PT_XER 37
+#define PT_CCR 38
+#ifndef __powerpc64__
+#define PT_MQ 39
+#else
+#define PT_SOFTE 39
+#endif
+#define PT_TRAP 40
+#define PT_DAR 41
+#define PT_DSISR 42
+#define PT_RESULT 43
+#define PT_DSCR 44
+#define PT_REGS_COUNT 44
+
+#define PT_FPR0 48 /* each FP reg occupies 2 slots in this space */
+
+#ifndef __powerpc64__
+
+#define PT_FPR31 (PT_FPR0 + 2*31)
+#define PT_FPSCR (PT_FPR0 + 2*32 + 1)
+
+#else /* __powerpc64__ */
+
+#define PT_FPSCR (PT_FPR0 + 32) /* each FP reg occupies 1 slot in 64-bit space */
+
+
+#define PT_VR0 82 /* each Vector reg occupies 2 slots in 64-bit */
+#define PT_VSCR (PT_VR0 + 32*2 + 1)
+#define PT_VRSAVE (PT_VR0 + 33*2)
+
+
+/*
+ * Only store first 32 VSRs here. The second 32 VSRs in VR0-31
+ */
+#define PT_VSR0 150 /* each VSR reg occupies 2 slots in 64-bit */
+#define PT_VSR31 (PT_VSR0 + 2*31)
+#endif /* __powerpc64__ */
+
+/*
+ * Get/set all the altivec registers v0..v31, vscr, vrsave, in one go.
+ * The transfer totals 34 quadword. Quadwords 0-31 contain the
+ * corresponding vector registers. Quadword 32 contains the vscr as the
+ * last word (offset 12) within that quadword. Quadword 33 contains the
+ * vrsave as the first word (offset 0) within the quadword.
+ *
+ * This definition of the VMX state is compatible with the current PPC32
+ * ptrace interface. This allows signal handling and ptrace to use the same
+ * structures. This also simplifies the implementation of a bi-arch
+ * (combined (32- and 64-bit) gdb.
+ */
+#define PTRACE_GETVRREGS 0x12
+#define PTRACE_SETVRREGS 0x13
+
+/* Get/set all the upper 32-bits of the SPE registers, accumulator, and
+ * spefscr, in one go */
+#define PTRACE_GETEVRREGS 0x14
+#define PTRACE_SETEVRREGS 0x15
+
+/* Get the first 32 128bit VSX registers */
+#define PTRACE_GETVSRREGS 0x1b
+#define PTRACE_SETVSRREGS 0x1c
+
+/* Syscall emulation defines */
+#define PTRACE_SYSEMU 0x1d
+#define PTRACE_SYSEMU_SINGLESTEP 0x1e
+
+/*
+ * Get or set a debug register. The first 16 are DABR registers and the
+ * second 16 are IABR registers.
+ */
+#define PTRACE_GET_DEBUGREG 0x19
+#define PTRACE_SET_DEBUGREG 0x1a
+
+/* (new) PTRACE requests using the same numbers as x86 and the same
+ * argument ordering. Additionally, they support more registers too
+ */
+#define PTRACE_GETREGS 0xc
+#define PTRACE_SETREGS 0xd
+#define PTRACE_GETFPREGS 0xe
+#define PTRACE_SETFPREGS 0xf
+#define PTRACE_GETREGS64 0x16
+#define PTRACE_SETREGS64 0x17
+
+/* Calls to trace a 64bit program from a 32bit program */
+#define PPC_PTRACE_PEEKTEXT_3264 0x95
+#define PPC_PTRACE_PEEKDATA_3264 0x94
+#define PPC_PTRACE_POKETEXT_3264 0x93
+#define PPC_PTRACE_POKEDATA_3264 0x92
+#define PPC_PTRACE_PEEKUSR_3264 0x91
+#define PPC_PTRACE_POKEUSR_3264 0x90
+
+#define PTRACE_SINGLEBLOCK 0x100 /* resume execution until next branch */
+
+#define PPC_PTRACE_GETHWDBGINFO 0x89
+#define PPC_PTRACE_SETHWDEBUG 0x88
+#define PPC_PTRACE_DELHWDEBUG 0x87
+
+#ifndef __ASSEMBLY__
+
+struct ppc_debug_info {
+ __u32 version; /* Only version 1 exists to date */
+ __u32 num_instruction_bps;
+ __u32 num_data_bps;
+ __u32 num_condition_regs;
+ __u32 data_bp_alignment;
+ __u32 sizeof_condition; /* size of the DVC register */
+ __u64 features;
+};
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * features will have bits indication whether there is support for:
+ */
+#define PPC_DEBUG_FEATURE_INSN_BP_RANGE 0x0000000000000001
+#define PPC_DEBUG_FEATURE_INSN_BP_MASK 0x0000000000000002
+#define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x0000000000000004
+#define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x0000000000000008
+#define PPC_DEBUG_FEATURE_DATA_BP_DAWR 0x0000000000000010
+#define PPC_DEBUG_FEATURE_DATA_BP_ARCH_31 0x0000000000000020
+
+#ifndef __ASSEMBLY__
+
+struct ppc_hw_breakpoint {
+ __u32 version; /* currently, version must be 1 */
+ __u32 trigger_type; /* only some combinations allowed */
+ __u32 addr_mode; /* address match mode */
+ __u32 condition_mode; /* break/watchpoint condition flags */
+ __u64 addr; /* break/watchpoint address */
+ __u64 addr2; /* range end or mask */
+ __u64 condition_value; /* contents of the DVC register */
+};
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Trigger Type
+ */
+#define PPC_BREAKPOINT_TRIGGER_EXECUTE 0x00000001
+#define PPC_BREAKPOINT_TRIGGER_READ 0x00000002
+#define PPC_BREAKPOINT_TRIGGER_WRITE 0x00000004
+#define PPC_BREAKPOINT_TRIGGER_RW \
+ (PPC_BREAKPOINT_TRIGGER_READ | PPC_BREAKPOINT_TRIGGER_WRITE)
+
+/*
+ * Address Mode
+ */
+#define PPC_BREAKPOINT_MODE_EXACT 0x00000000
+#define PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE 0x00000001
+#define PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE 0x00000002
+#define PPC_BREAKPOINT_MODE_MASK 0x00000003
+
+/*
+ * Condition Mode
+ */
+#define PPC_BREAKPOINT_CONDITION_MODE 0x00000003
+#define PPC_BREAKPOINT_CONDITION_NONE 0x00000000
+#define PPC_BREAKPOINT_CONDITION_AND 0x00000001
+#define PPC_BREAKPOINT_CONDITION_EXACT PPC_BREAKPOINT_CONDITION_AND
+#define PPC_BREAKPOINT_CONDITION_OR 0x00000002
+#define PPC_BREAKPOINT_CONDITION_AND_OR 0x00000003
+#define PPC_BREAKPOINT_CONDITION_BE_ALL 0x00ff0000
+#define PPC_BREAKPOINT_CONDITION_BE_SHIFT 16
+#define PPC_BREAKPOINT_CONDITION_BE(n) \
+ (1<<((n)+PPC_BREAKPOINT_CONDITION_BE_SHIFT))
+
+#endif /* _UAPI_ASM_POWERPC_PTRACE_H */
diff --git a/arch/powerpc/include/uapi/asm/sembuf.h b/arch/powerpc/include/uapi/asm/sembuf.h
new file mode 100644
index 000000000..85e96ccb5
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/sembuf.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_SEMBUF_H
+#define _ASM_POWERPC_SEMBUF_H
+
+#include <asm/ipcbuf.h>
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * The semid64_ds structure for PPC architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 32/64-bit values
+ */
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+#ifndef __powerpc64__
+ unsigned long sem_otime_high;
+ unsigned long sem_otime; /* last semop time */
+ unsigned long sem_ctime_high;
+ unsigned long sem_ctime; /* last change time */
+#else
+ long sem_otime; /* last semop time */
+ long sem_ctime; /* last change time */
+#endif
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _ASM_POWERPC_SEMBUF_H */
diff --git a/arch/powerpc/include/uapi/asm/setup.h b/arch/powerpc/include/uapi/asm/setup.h
new file mode 100644
index 000000000..c54940b09
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/setup.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_POWERPC_SETUP_H
+#define _UAPI_ASM_POWERPC_SETUP_H
+
+#define COMMAND_LINE_SIZE 2048
+
+#endif /* _UAPI_ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/uapi/asm/shmbuf.h b/arch/powerpc/include/uapi/asm/shmbuf.h
new file mode 100644
index 000000000..439a3a02b
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/shmbuf.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_SHMBUF_H
+#define _ASM_POWERPC_SHMBUF_H
+
+#include <asm/ipcbuf.h>
+#include <asm/posix_types.h>
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * The shmid64_ds structure for PPC architecture.
+ *
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+#ifdef __powerpc64__
+ long shm_atime; /* last attach time */
+ long shm_dtime; /* last detach time */
+ long shm_ctime; /* last change time */
+#else
+ unsigned long shm_atime_high;
+ unsigned long shm_atime; /* last attach time */
+ unsigned long shm_dtime_high;
+ unsigned long shm_dtime; /* last detach time */
+ unsigned long shm_ctime_high;
+ unsigned long shm_ctime; /* last change time */
+ unsigned long __unused4;
+#endif
+ __kernel_size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused5;
+ unsigned long __unused6;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _ASM_POWERPC_SHMBUF_H */
diff --git a/arch/powerpc/include/uapi/asm/sigcontext.h b/arch/powerpc/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000..630aeda56
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/sigcontext.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_SIGCONTEXT_H
+#define _ASM_POWERPC_SIGCONTEXT_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/compiler.h>
+#include <asm/ptrace.h>
+#ifdef __powerpc64__
+#include <asm/elf.h>
+#endif
+
+struct sigcontext {
+ unsigned long _unused[4];
+ int signal;
+#ifdef __powerpc64__
+ int _pad0;
+#endif
+ unsigned long handler;
+ unsigned long oldmask;
+#ifdef __KERNEL__
+ struct user_pt_regs __user *regs;
+#else
+ struct pt_regs *regs;
+#endif
+#ifdef __powerpc64__
+ elf_gregset_t gp_regs;
+ elf_fpregset_t fp_regs;
+/*
+ * To maintain compatibility with current implementations the sigcontext is
+ * extended by appending a pointer (v_regs) to a quadword type (elf_vrreg_t)
+ * followed by an unstructured (vmx_reserve) field of 101 doublewords. This
+ * allows the array of vector registers to be quadword aligned independent of
+ * the alignment of the containing sigcontext or ucontext. It is the
+ * responsibility of the code setting the sigcontext to set this pointer to
+ * either NULL (if this processor does not support the VMX feature) or the
+ * address of the first quadword within the allocated (vmx_reserve) area.
+ *
+ * The pointer (v_regs) of vector type (elf_vrreg_t) is type compatible with
+ * an array of 34 quadword entries (elf_vrregset_t). The entries with
+ * indexes 0-31 contain the corresponding vector registers. The entry with
+ * index 32 contains the vscr as the last word (offset 12) within the
+ * quadword. This allows the vscr to be stored as either a quadword (since
+ * it must be copied via a vector register to/from storage) or as a word.
+ * The entry with index 33 contains the vrsave as the first word (offset 0)
+ * within the quadword.
+ *
+ * Part of the VSX data is stored here also by extending vmx_restore
+ * by an additional 32 double words. Architecturally the layout of
+ * the VSR registers and how they overlap on top of the legacy FPR and
+ * VR registers is shown below:
+ *
+ * VSR doubleword 0 VSR doubleword 1
+ * ----------------------------------------------------------------
+ * VSR[0] | FPR[0] | |
+ * ----------------------------------------------------------------
+ * VSR[1] | FPR[1] | |
+ * ----------------------------------------------------------------
+ * | ... | |
+ * | ... | |
+ * ----------------------------------------------------------------
+ * VSR[30] | FPR[30] | |
+ * ----------------------------------------------------------------
+ * VSR[31] | FPR[31] | |
+ * ----------------------------------------------------------------
+ * VSR[32] | VR[0] |
+ * ----------------------------------------------------------------
+ * VSR[33] | VR[1] |
+ * ----------------------------------------------------------------
+ * | ... |
+ * | ... |
+ * ----------------------------------------------------------------
+ * VSR[62] | VR[30] |
+ * ----------------------------------------------------------------
+ * VSR[63] | VR[31] |
+ * ----------------------------------------------------------------
+ *
+ * FPR/VSR 0-31 doubleword 0 is stored in fp_regs, and VMX/VSR 32-63
+ * is stored at the start of vmx_reserve. vmx_reserve is extended for
+ * backwards compatility to store VSR 0-31 doubleword 1 after the VMX
+ * registers and vscr/vrsave.
+ */
+ elf_vrreg_t __user *v_regs;
+ long vmx_reserve[ELF_NVRREG + ELF_NVRREG + 1 + 32];
+#endif
+};
+
+#endif /* _ASM_POWERPC_SIGCONTEXT_H */
diff --git a/arch/powerpc/include/uapi/asm/signal.h b/arch/powerpc/include/uapi/asm/signal.h
new file mode 100644
index 000000000..a5dfe84f5
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/signal.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_POWERPC_SIGNAL_H
+#define _UAPI_ASM_POWERPC_SIGNAL_H
+
+#include <linux/types.h>
+
+#define _NSIG 64
+#ifdef __powerpc64__
+#define _NSIG_BPW 64
+#else
+#define _NSIG_BPW 32
+#endif
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX _NSIG
+
+#define SA_RESTORER 0x04000000U
+
+#ifdef __powerpc64__
+#define MINSIGSTKSZ 8192
+#define SIGSTKSZ 32768
+#else
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+#endif
+
+#include <asm-generic/signal-defs.h>
+
+#ifndef __KERNEL__
+struct old_sigaction {
+ __sighandler_t sa_handler;
+ old_sigset_t sa_mask;
+ unsigned long sa_flags;
+ __sigrestore_t sa_restorer;
+};
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+};
+#endif
+
+typedef struct sigaltstack {
+ void __user *ss_sp;
+ int ss_flags;
+ __kernel_size_t ss_size;
+} stack_t;
+
+
+#ifndef __powerpc64__
+/*
+ * These are parameters to dbg_sigreturn syscall. They enable or
+ * disable certain debugging things that can be done from signal
+ * handlers. The dbg_sigreturn syscall *must* be called from a
+ * SA_SIGINFO signal so the ucontext can be passed to it. It takes an
+ * array of struct sig_dbg_op, which has the debug operations to
+ * perform before returning from the signal.
+ */
+struct sig_dbg_op {
+ int dbg_type;
+ unsigned long dbg_value;
+};
+
+/* Enable or disable single-stepping. The value sets the state. */
+#define SIG_DBG_SINGLE_STEPPING 1
+
+/* Enable or disable branch tracing. The value sets the state. */
+#define SIG_DBG_BRANCH_TRACING 2
+#endif /* ! __powerpc64__ */
+
+#endif /* _UAPI_ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
new file mode 100644
index 000000000..12aa0c43e
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_SOCKET_H
+#define _ASM_POWERPC_SOCKET_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define SO_RCVLOWAT 16
+#define SO_SNDLOWAT 17
+#define SO_RCVTIMEO_OLD 18
+#define SO_SNDTIMEO_OLD 19
+#define SO_PASSCRED 20
+#define SO_PEERCRED 21
+
+#include <asm-generic/socket.h>
+
+#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/include/uapi/asm/spu_info.h b/arch/powerpc/include/uapi/asm/spu_info.h
new file mode 100644
index 000000000..45f971505
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/spu_info.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * SPU info structures
+ *
+ * (C) Copyright 2006 IBM Corp.
+ *
+ * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
+ */
+
+#ifndef _UAPI_SPU_INFO_H
+#define _UAPI_SPU_INFO_H
+
+#include <linux/types.h>
+
+#ifndef __KERNEL__
+struct mfc_cq_sr {
+ __u64 mfc_cq_data0_RW;
+ __u64 mfc_cq_data1_RW;
+ __u64 mfc_cq_data2_RW;
+ __u64 mfc_cq_data3_RW;
+};
+#endif /* __KERNEL__ */
+
+struct spu_dma_info {
+ __u64 dma_info_type;
+ __u64 dma_info_mask;
+ __u64 dma_info_status;
+ __u64 dma_info_stall_and_notify;
+ __u64 dma_info_atomic_command_status;
+ struct mfc_cq_sr dma_info_command_data[16];
+};
+
+struct spu_proxydma_info {
+ __u64 proxydma_info_type;
+ __u64 proxydma_info_mask;
+ __u64 proxydma_info_status;
+ struct mfc_cq_sr proxydma_info_command_data[8];
+};
+
+#endif /* _UAPI_SPU_INFO_H */
diff --git a/arch/powerpc/include/uapi/asm/stat.h b/arch/powerpc/include/uapi/asm/stat.h
new file mode 100644
index 000000000..d50901664
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/stat.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_STAT_H
+#define _ASM_POWERPC_STAT_H
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+
+#define STAT_HAVE_NSEC 1
+
+#ifndef __powerpc64__
+struct __old_kernel_stat {
+ unsigned short st_dev;
+ unsigned short st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+};
+#endif /* !__powerpc64__ */
+
+struct stat {
+ unsigned long st_dev;
+ __kernel_ino_t st_ino;
+#ifdef __powerpc64__
+ unsigned long st_nlink;
+ __kernel_mode_t st_mode;
+#else
+ __kernel_mode_t st_mode;
+ unsigned short st_nlink;
+#endif
+ __kernel_uid32_t st_uid;
+ __kernel_gid32_t st_gid;
+ unsigned long st_rdev;
+ long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long __unused4;
+ unsigned long __unused5;
+#ifdef __powerpc64__
+ unsigned long __unused6;
+#endif
+};
+
+/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
+struct stat64 {
+ unsigned long long st_dev; /* Device. */
+ unsigned long long st_ino; /* File serial number. */
+ unsigned int st_mode; /* File mode. */
+ unsigned int st_nlink; /* Link count. */
+ unsigned int st_uid; /* User ID of the file's owner. */
+ unsigned int st_gid; /* Group ID of the file's group. */
+ unsigned long long st_rdev; /* Device number, if device. */
+ unsigned short __pad2;
+ long long st_size; /* Size of file, in bytes. */
+ int st_blksize; /* Optimal block size for I/O. */
+ long long st_blocks; /* Number 512-byte blocks allocated. */
+ int st_atime; /* Time of last access. */
+ unsigned int st_atime_nsec;
+ int st_mtime; /* Time of last modification. */
+ unsigned int st_mtime_nsec;
+ int st_ctime; /* Time of last status change. */
+ unsigned int st_ctime_nsec;
+ unsigned int __unused4;
+ unsigned int __unused5;
+};
+
+#endif /* _ASM_POWERPC_STAT_H */
diff --git a/arch/powerpc/include/uapi/asm/swab.h b/arch/powerpc/include/uapi/asm/swab.h
new file mode 100644
index 000000000..17b16c44d
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/swab.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UAPI_ASM_POWERPC_SWAB_H
+#define _UAPI_ASM_POWERPC_SWAB_H
+
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#ifdef __GNUC__
+
+#ifndef __powerpc64__
+#define __SWAB_64_THRU_32__
+#endif /* __powerpc64__ */
+
+
+#endif /* __GNUC__ */
+
+#endif /* _UAPI_ASM_POWERPC_SWAB_H */
diff --git a/arch/powerpc/include/uapi/asm/termbits.h b/arch/powerpc/include/uapi/asm/termbits.h
new file mode 100644
index 000000000..21dc86dcb
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/termbits.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_TERMBITS_H
+#define _ASM_POWERPC_TERMBITS_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm-generic/termbits-common.h>
+
+typedef unsigned int tcflag_t;
+
+/*
+ * termios type and macro definitions. Be careful about adding stuff
+ * to this file since it's used in GNU libc and there are strict rules
+ * concerning namespace pollution.
+ */
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_cc[NCCS]; /* control characters */
+ cc_t c_line; /* line discipline (== c_cc[19]) */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+/* For PowerPC the termios and ktermios are the same */
+
+struct ktermios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_cc[NCCS]; /* control characters */
+ cc_t c_line; /* line discipline (== c_cc[19]) */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VMIN 5
+#define VEOL 6
+#define VTIME 7
+#define VEOL2 8
+#define VSWTC 9
+#define VWERASE 10
+#define VREPRINT 11
+#define VSUSP 12
+#define VSTART 13
+#define VSTOP 14
+#define VLNEXT 15
+#define VDISCARD 16
+
+/* c_iflag bits */
+#define IXON 0x0200
+#define IXOFF 0x0400
+#define IUCLC 0x1000
+#define IMAXBEL 0x2000
+#define IUTF8 0x4000
+
+/* c_oflag bits */
+#define ONLCR 0x00002
+#define OLCUC 0x00004
+#define NLDLY 0x00300
+#define NL0 0x00000
+#define NL1 0x00100
+#define NL2 0x00200
+#define NL3 0x00300
+#define TABDLY 0x00c00
+#define TAB0 0x00000
+#define TAB1 0x00400
+#define TAB2 0x00800
+#define TAB3 0x00c00
+#define XTABS 0x00c00 /* required by POSIX to == TAB3 */
+#define CRDLY 0x03000
+#define CR0 0x00000
+#define CR1 0x01000
+#define CR2 0x02000
+#define CR3 0x03000
+#define FFDLY 0x04000
+#define FF0 0x00000
+#define FF1 0x04000
+#define BSDLY 0x08000
+#define BS0 0x00000
+#define BS1 0x08000
+#define VTDLY 0x10000
+#define VT0 0x00000
+#define VT1 0x10000
+
+/* c_cflag bit meaning */
+#define CBAUD 0x000000ff
+#define CBAUDEX 0x00000000
+#define BOTHER 0x0000001f
+#define B57600 0x00000010
+#define B115200 0x00000011
+#define B230400 0x00000012
+#define B460800 0x00000013
+#define B500000 0x00000014
+#define B576000 0x00000015
+#define B921600 0x00000016
+#define B1000000 0x00000017
+#define B1152000 0x00000018
+#define B1500000 0x00000019
+#define B2000000 0x0000001a
+#define B2500000 0x0000001b
+#define B3000000 0x0000001c
+#define B3500000 0x0000001d
+#define B4000000 0x0000001e
+#define CSIZE 0x00000300
+#define CS5 0x00000000
+#define CS6 0x00000100
+#define CS7 0x00000200
+#define CS8 0x00000300
+#define CSTOPB 0x00000400
+#define CREAD 0x00000800
+#define PARENB 0x00001000
+#define PARODD 0x00002000
+#define HUPCL 0x00004000
+#define CLOCAL 0x00008000
+#define CIBAUD 0x00ff0000
+
+/* c_lflag bits */
+#define ISIG 0x00000080
+#define ICANON 0x00000100
+#define XCASE 0x00004000
+#define ECHO 0x00000008
+#define ECHOE 0x00000002
+#define ECHOK 0x00000004
+#define ECHONL 0x00000010
+#define NOFLSH 0x80000000
+#define TOSTOP 0x00400000
+#define ECHOCTL 0x00000040
+#define ECHOPRT 0x00000020
+#define ECHOKE 0x00000001
+#define FLUSHO 0x00800000
+#define PENDIN 0x20000000
+#define IEXTEN 0x00000400
+#define EXTPROC 0x10000000
+
+/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif /* _ASM_POWERPC_TERMBITS_H */
diff --git a/arch/powerpc/include/uapi/asm/termios.h b/arch/powerpc/include/uapi/asm/termios.h
new file mode 100644
index 000000000..5d07fc89b
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/termios.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Liberally adapted from alpha/termios.h. In particular, the c_cc[]
+ * fields have been reordered so that termio & termios share the
+ * common subset in the same order (for brain dead programs that don't
+ * know or care about the differences).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UAPI_ASM_POWERPC_TERMIOS_H
+#define _UAPI_ASM_POWERPC_TERMIOS_H
+
+
+#include <asm/ioctls.h>
+#include <asm/termbits.h>
+
+struct sgttyb {
+ char sg_ispeed;
+ char sg_ospeed;
+ char sg_erase;
+ char sg_kill;
+ short sg_flags;
+};
+
+struct tchars {
+ char t_intrc;
+ char t_quitc;
+ char t_startc;
+ char t_stopc;
+ char t_eofc;
+ char t_brkc;
+};
+
+struct ltchars {
+ char t_suspc;
+ char t_dsuspc;
+ char t_rprntc;
+ char t_flushc;
+ char t_werasc;
+ char t_lnextc;
+};
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 10
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+/* c_cc characters */
+#define _VINTR 0
+#define _VQUIT 1
+#define _VERASE 2
+#define _VKILL 3
+#define _VEOF 4
+#define _VMIN 5
+#define _VEOL 6
+#define _VTIME 7
+#define _VEOL2 8
+#define _VSWTC 9
+
+
+
+#endif /* _UAPI_ASM_POWERPC_TERMIOS_H */
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
new file mode 100644
index 000000000..e1bf0e2fa
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_TM_H
+#define _ASM_POWERPC_TM_H
+
+/* Reason codes describing kernel causes for transaction aborts. By
+ * convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
+ * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor.
+ */
+#define TM_CAUSE_PERSISTENT 0x01
+#define TM_CAUSE_KVM_RESCHED 0xe0 /* From PAPR */
+#define TM_CAUSE_KVM_FAC_UNAV 0xe2 /* From PAPR */
+#define TM_CAUSE_RESCHED 0xde
+#define TM_CAUSE_TLBI 0xdc
+#define TM_CAUSE_FAC_UNAV 0xda
+#define TM_CAUSE_SYSCALL 0xd8
+#define TM_CAUSE_MISC 0xd6 /* future use */
+#define TM_CAUSE_SIGNAL 0xd4
+#define TM_CAUSE_ALIGNMENT 0xd2
+#define TM_CAUSE_EMULATE 0xd0
+
+#endif
diff --git a/arch/powerpc/include/uapi/asm/types.h b/arch/powerpc/include/uapi/asm/types.h
new file mode 100644
index 000000000..327616fb7
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/types.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * This file is never included by application software unless
+ * explicitly requested (e.g., via linux/types.h) in which case the
+ * application is Linux specific so (user-) name space pollution is
+ * not a major issue. However, for interoperability, libraries still
+ * need to be careful to avoid a name clashes.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UAPI_ASM_POWERPC_TYPES_H
+#define _UAPI_ASM_POWERPC_TYPES_H
+
+/*
+ * This is here because we used to use l64 for 64bit powerpc
+ * and we don't want to impact user mode with our change to ll64
+ * in the kernel.
+ *
+ * However, some user programs are fine with this. They can
+ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
+ */
+#if !defined(__SANE_USERSPACE_TYPES__) && defined(__powerpc64__) && !defined(__KERNEL__)
+# include <asm-generic/int-l64.h>
+#else
+# include <asm-generic/int-ll64.h>
+#endif
+
+#ifndef __ASSEMBLY__
+
+
+typedef struct {
+ __u32 u[4];
+} __attribute__((aligned(16))) __vector128;
+
+#endif /* __ASSEMBLY__ */
+
+
+#endif /* _UAPI_ASM_POWERPC_TYPES_H */
diff --git a/arch/powerpc/include/uapi/asm/ucontext.h b/arch/powerpc/include/uapi/asm/ucontext.h
new file mode 100644
index 000000000..6f14a96d4
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/ucontext.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_UCONTEXT_H
+#define _ASM_POWERPC_UCONTEXT_H
+
+#ifdef __powerpc64__
+#include <asm/sigcontext.h>
+#else
+#include <asm/elf.h>
+#endif
+#include <asm/signal.h>
+
+#ifndef __powerpc64__
+struct mcontext {
+ elf_gregset_t mc_gregs;
+ elf_fpregset_t mc_fregs;
+ unsigned long mc_pad[2];
+ elf_vrregset_t mc_vregs __attribute__((__aligned__(16)));
+};
+#endif
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext __user *uc_link;
+ stack_t uc_stack;
+#ifndef __powerpc64__
+ int uc_pad[7];
+ struct mcontext __user *uc_regs;/* points to uc_mcontext field */
+#endif
+ sigset_t uc_sigmask;
+ /* glibc has 1024-bit signal masks, ours are 64-bit */
+#ifdef __powerpc64__
+ sigset_t __unused[15]; /* Allow for uc_sigmask growth */
+ struct sigcontext uc_mcontext; /* last for extensibility */
+#else
+ int uc_maskext[30];
+ int uc_pad2[3];
+ struct mcontext uc_mcontext;
+#endif
+};
+
+#endif /* _ASM_POWERPC_UCONTEXT_H */
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
new file mode 100644
index 000000000..5f84e3dc9
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * This file contains the system call numbers.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UAPI_ASM_POWERPC_UNISTD_H_
+#define _UAPI_ASM_POWERPC_UNISTD_H_
+
+#ifndef __powerpc64__
+#include <asm/unistd_32.h>
+#else
+#include <asm/unistd_64.h>
+#endif
+
+#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/include/uapi/asm/vas-api.h b/arch/powerpc/include/uapi/asm/vas-api.h
new file mode 100644
index 000000000..7c81301ec
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/vas-api.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright 2019 IBM Corp.
+ */
+
+#ifndef _UAPI_MISC_VAS_H
+#define _UAPI_MISC_VAS_H
+
+#include <linux/types.h>
+
+#include <asm/ioctl.h>
+
+#define VAS_MAGIC 'v'
+#define VAS_TX_WIN_OPEN _IOW(VAS_MAGIC, 0x20, struct vas_tx_win_open_attr)
+
+/* Flags to VAS TX open window ioctl */
+/* To allocate a window with QoS credit, otherwise use default credit */
+#define VAS_TX_WIN_FLAG_QOS_CREDIT 0x0000000000000001
+
+struct vas_tx_win_open_attr {
+ __u32 version;
+ __s16 vas_id; /* specific instance of vas or -1 for default */
+ __u16 reserved1;
+ __u64 flags;
+ __u64 reserved2[6];
+};
+
+#endif /* _UAPI_MISC_VAS_H */