From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- arch/s390/include/asm/Kbuild | 9 + arch/s390/include/asm/abs_lowcore.h | 27 + arch/s390/include/asm/airq.h | 110 ++ arch/s390/include/asm/alternative-asm.h | 56 + arch/s390/include/asm/alternative.h | 120 ++ arch/s390/include/asm/ap.h | 555 ++++++++ arch/s390/include/asm/appldata.h | 68 + arch/s390/include/asm/archrandom.h | 38 + arch/s390/include/asm/asm-const.h | 12 + arch/s390/include/asm/asm-extable.h | 92 ++ arch/s390/include/asm/asm-prototypes.h | 13 + arch/s390/include/asm/atomic.h | 150 +++ arch/s390/include/asm/atomic_ops.h | 201 +++ arch/s390/include/asm/barrier.h | 82 ++ arch/s390/include/asm/bitops.h | 378 ++++++ arch/s390/include/asm/boot_data.h | 18 + arch/s390/include/asm/bug.h | 71 + arch/s390/include/asm/cache.h | 19 + arch/s390/include/asm/ccwdev.h | 237 ++++ arch/s390/include/asm/ccwgroup.h | 75 ++ arch/s390/include/asm/checksum.h | 133 ++ arch/s390/include/asm/chpid.h | 51 + arch/s390/include/asm/chsc.h | 69 + arch/s390/include/asm/cio.h | 378 ++++++ arch/s390/include/asm/clocksource.h | 7 + arch/s390/include/asm/clp.h | 59 + arch/s390/include/asm/cmb.h | 14 + arch/s390/include/asm/cmpxchg.h | 207 +++ arch/s390/include/asm/compat.h | 140 ++ arch/s390/include/asm/cpacf.h | 551 ++++++++ arch/s390/include/asm/cpcmd.h | 32 + arch/s390/include/asm/cpu.h | 28 + arch/s390/include/asm/cpu_mf-insn.h | 22 + arch/s390/include/asm/cpu_mf.h | 277 ++++ arch/s390/include/asm/cpufeature.h | 23 + arch/s390/include/asm/cputime.h | 21 + arch/s390/include/asm/crw.h | 55 + arch/s390/include/asm/css_chars.h | 47 + arch/s390/include/asm/ctl_reg.h | 146 ++ arch/s390/include/asm/current.h | 19 + arch/s390/include/asm/debug.h | 490 +++++++ arch/s390/include/asm/delay.h | 24 + arch/s390/include/asm/diag.h | 351 +++++ arch/s390/include/asm/dis.h | 30 + arch/s390/include/asm/dma.h | 14 + arch/s390/include/asm/dwarf.h | 37 + arch/s390/include/asm/eadm.h | 120 ++ arch/s390/include/asm/ebcdic.h | 47 + arch/s390/include/asm/elf.h | 302 +++++ arch/s390/include/asm/entry-common.h | 63 + arch/s390/include/asm/exec.h | 13 + arch/s390/include/asm/extable.h | 72 + arch/s390/include/asm/extmem.h | 32 + arch/s390/include/asm/facility.h | 114 ++ arch/s390/include/asm/fcx.h | 312 +++++ arch/s390/include/asm/fpu/api.h | 119 ++ arch/s390/include/asm/fpu/internal.h | 62 + arch/s390/include/asm/fpu/types.h | 38 + arch/s390/include/asm/ftrace.h | 169 +++ arch/s390/include/asm/ftrace.lds.h | 21 + arch/s390/include/asm/futex.h | 81 ++ arch/s390/include/asm/gmap.h | 188 +++ arch/s390/include/asm/hardirq.h | 28 + arch/s390/include/asm/hugetlb.h | 147 ++ arch/s390/include/asm/hw_irq.h | 11 + arch/s390/include/asm/idals.h | 244 ++++ arch/s390/include/asm/idle.h | 27 + arch/s390/include/asm/io.h | 80 ++ arch/s390/include/asm/ipl.h | 170 +++ arch/s390/include/asm/irq.h | 123 ++ arch/s390/include/asm/irq_work.h | 12 + arch/s390/include/asm/irqflags.h | 78 ++ arch/s390/include/asm/isc.h | 31 + arch/s390/include/asm/itcw.h | 31 + arch/s390/include/asm/jump_label.h | 55 + arch/s390/include/asm/kasan.h | 18 + arch/s390/include/asm/kdebug.h | 28 + arch/s390/include/asm/kexec.h | 110 ++ arch/s390/include/asm/kfence.h | 42 + arch/s390/include/asm/kprobes.h | 81 ++ arch/s390/include/asm/kvm_host.h | 1060 +++++++++++++++ arch/s390/include/asm/kvm_para.h | 123 ++ arch/s390/include/asm/linkage.h | 10 + arch/s390/include/asm/lowcore.h | 232 ++++ arch/s390/include/asm/maccess.h | 20 + arch/s390/include/asm/mem_encrypt.h | 12 + arch/s390/include/asm/mmu.h | 45 + arch/s390/include/asm/mmu_context.h | 128 ++ arch/s390/include/asm/mmzone.h | 17 + arch/s390/include/asm/module.h | 41 + arch/s390/include/asm/msi.h | 17 + arch/s390/include/asm/nmi.h | 108 ++ arch/s390/include/asm/nospec-branch.h | 22 + arch/s390/include/asm/nospec-insn.h | 132 ++ arch/s390/include/asm/numa.h | 25 + arch/s390/include/asm/os_info.h | 54 + arch/s390/include/asm/page-states.h | 21 + arch/s390/include/asm/page.h | 218 +++ arch/s390/include/asm/pai.h | 84 ++ arch/s390/include/asm/pci.h | 327 +++++ arch/s390/include/asm/pci_clp.h | 217 +++ arch/s390/include/asm/pci_debug.h | 30 + arch/s390/include/asm/pci_dma.h | 198 +++ arch/s390/include/asm/pci_insn.h | 159 +++ arch/s390/include/asm/pci_io.h | 206 +++ arch/s390/include/asm/percpu.h | 185 +++ arch/s390/include/asm/perf_event.h | 81 ++ arch/s390/include/asm/pfault.h | 26 + arch/s390/include/asm/pgalloc.h | 157 +++ arch/s390/include/asm/pgtable.h | 1872 ++++++++++++++++++++++++++ arch/s390/include/asm/physmem_info.h | 172 +++ arch/s390/include/asm/pkey.h | 28 + arch/s390/include/asm/pnet.h | 15 + arch/s390/include/asm/preempt.h | 139 ++ arch/s390/include/asm/processor.h | 380 ++++++ arch/s390/include/asm/ptdump.h | 14 + arch/s390/include/asm/ptrace.h | 263 ++++ arch/s390/include/asm/purgatory.h | 17 + arch/s390/include/asm/qdio.h | 364 +++++ arch/s390/include/asm/runtime_instr.h | 28 + arch/s390/include/asm/rwonce.h | 31 + arch/s390/include/asm/schid.h | 22 + arch/s390/include/asm/sclp.h | 164 +++ arch/s390/include/asm/scsw.h | 1050 +++++++++++++++ arch/s390/include/asm/seccomp.h | 28 + arch/s390/include/asm/sections.h | 29 + arch/s390/include/asm/set_memory.h | 66 + arch/s390/include/asm/setup.h | 161 +++ arch/s390/include/asm/signal.h | 26 + arch/s390/include/asm/sigp.h | 72 + arch/s390/include/asm/smp.h | 70 + arch/s390/include/asm/softirq_stack.h | 14 + arch/s390/include/asm/sparsemem.h | 8 + arch/s390/include/asm/spinlock.h | 148 ++ arch/s390/include/asm/spinlock_types.h | 22 + arch/s390/include/asm/stacktrace.h | 241 ++++ arch/s390/include/asm/stp.h | 98 ++ arch/s390/include/asm/string.h | 206 +++ arch/s390/include/asm/switch_to.h | 49 + arch/s390/include/asm/syscall.h | 154 +++ arch/s390/include/asm/syscall_wrapper.h | 140 ++ arch/s390/include/asm/sysinfo.h | 201 +++ arch/s390/include/asm/text-patching.h | 16 + arch/s390/include/asm/thread_info.h | 106 ++ arch/s390/include/asm/timex.h | 281 ++++ arch/s390/include/asm/tlb.h | 137 ++ arch/s390/include/asm/tlbflush.h | 129 ++ arch/s390/include/asm/topology.h | 101 ++ arch/s390/include/asm/tpi.h | 37 + arch/s390/include/asm/trace/diag.h | 44 + arch/s390/include/asm/trace/zcrypt.h | 123 ++ arch/s390/include/asm/types.h | 19 + arch/s390/include/asm/uaccess.h | 601 +++++++++ arch/s390/include/asm/unistd.h | 40 + arch/s390/include/asm/unwind.h | 98 ++ arch/s390/include/asm/uprobes.h | 33 + arch/s390/include/asm/user.h | 71 + arch/s390/include/asm/uv.h | 517 +++++++ arch/s390/include/asm/vdso.h | 34 + arch/s390/include/asm/vdso/clocksource.h | 8 + arch/s390/include/asm/vdso/data.h | 13 + arch/s390/include/asm/vdso/gettimeofday.h | 63 + arch/s390/include/asm/vdso/processor.h | 7 + arch/s390/include/asm/vdso/vsyscall.h | 26 + arch/s390/include/asm/vmalloc.h | 4 + arch/s390/include/asm/vmlinux.lds.h | 33 + arch/s390/include/asm/vtime.h | 21 + arch/s390/include/asm/vtimer.h | 30 + arch/s390/include/asm/vx-insn-asm.h | 681 ++++++++++ arch/s390/include/asm/vx-insn.h | 19 + arch/s390/include/asm/xor.h | 21 + arch/s390/include/uapi/asm/Kbuild | 4 + arch/s390/include/uapi/asm/auxvec.h | 9 + arch/s390/include/uapi/asm/bitsperlong.h | 14 + arch/s390/include/uapi/asm/bpf_perf_event.h | 9 + arch/s390/include/uapi/asm/byteorder.h | 7 + arch/s390/include/uapi/asm/chpid.h | 23 + arch/s390/include/uapi/asm/chsc.h | 144 ++ arch/s390/include/uapi/asm/clp.h | 29 + arch/s390/include/uapi/asm/cmb.h | 54 + arch/s390/include/uapi/asm/dasd.h | 354 +++++ arch/s390/include/uapi/asm/fs3270.h | 25 + arch/s390/include/uapi/asm/guarded_storage.h | 78 ++ arch/s390/include/uapi/asm/hwctrset.h | 51 + arch/s390/include/uapi/asm/hypfs.h | 55 + arch/s390/include/uapi/asm/ioctls.h | 9 + arch/s390/include/uapi/asm/ipcbuf.h | 34 + arch/s390/include/uapi/asm/ipl.h | 208 +++ arch/s390/include/uapi/asm/kvm.h | 308 +++++ arch/s390/include/uapi/asm/kvm_para.h | 8 + arch/s390/include/uapi/asm/kvm_perf.h | 22 + arch/s390/include/uapi/asm/monwriter.h | 32 + arch/s390/include/uapi/asm/perf_regs.h | 44 + arch/s390/include/uapi/asm/pkey.h | 452 +++++++ arch/s390/include/uapi/asm/posix_types.h | 58 + arch/s390/include/uapi/asm/ptrace.h | 455 +++++++ arch/s390/include/uapi/asm/qeth.h | 116 ++ arch/s390/include/uapi/asm/raw3270.h | 75 ++ arch/s390/include/uapi/asm/runtime_instr.h | 74 + arch/s390/include/uapi/asm/schid.h | 20 + arch/s390/include/uapi/asm/sclp_ctl.h | 25 + arch/s390/include/uapi/asm/setup.h | 1 + arch/s390/include/uapi/asm/sie.h | 252 ++++ arch/s390/include/uapi/asm/sigcontext.h | 85 ++ arch/s390/include/uapi/asm/signal.h | 115 ++ arch/s390/include/uapi/asm/stat.h | 104 ++ arch/s390/include/uapi/asm/statfs.h | 51 + arch/s390/include/uapi/asm/sthyi.h | 7 + arch/s390/include/uapi/asm/tape390.h | 103 ++ arch/s390/include/uapi/asm/types.h | 30 + arch/s390/include/uapi/asm/ucontext.h | 41 + arch/s390/include/uapi/asm/unistd.h | 17 + arch/s390/include/uapi/asm/uvdevice.h | 102 ++ arch/s390/include/uapi/asm/virtio-ccw.h | 18 + arch/s390/include/uapi/asm/vmcp.h | 25 + arch/s390/include/uapi/asm/vtoc.h | 214 +++ arch/s390/include/uapi/asm/zcrypt.h | 373 +++++ 217 files changed, 26519 insertions(+) create mode 100644 arch/s390/include/asm/Kbuild create mode 100644 arch/s390/include/asm/abs_lowcore.h create mode 100644 arch/s390/include/asm/airq.h create mode 100644 arch/s390/include/asm/alternative-asm.h create mode 100644 arch/s390/include/asm/alternative.h create mode 100644 arch/s390/include/asm/ap.h create mode 100644 arch/s390/include/asm/appldata.h create mode 100644 arch/s390/include/asm/archrandom.h create mode 100644 arch/s390/include/asm/asm-const.h create mode 100644 arch/s390/include/asm/asm-extable.h create mode 100644 arch/s390/include/asm/asm-prototypes.h create mode 100644 arch/s390/include/asm/atomic.h create mode 100644 arch/s390/include/asm/atomic_ops.h create mode 100644 arch/s390/include/asm/barrier.h create mode 100644 arch/s390/include/asm/bitops.h create mode 100644 arch/s390/include/asm/boot_data.h create mode 100644 arch/s390/include/asm/bug.h create mode 100644 arch/s390/include/asm/cache.h create mode 100644 arch/s390/include/asm/ccwdev.h create mode 100644 arch/s390/include/asm/ccwgroup.h create mode 100644 arch/s390/include/asm/checksum.h create mode 100644 arch/s390/include/asm/chpid.h create mode 100644 arch/s390/include/asm/chsc.h create mode 100644 arch/s390/include/asm/cio.h create mode 100644 arch/s390/include/asm/clocksource.h create mode 100644 arch/s390/include/asm/clp.h create mode 100644 arch/s390/include/asm/cmb.h create mode 100644 arch/s390/include/asm/cmpxchg.h create mode 100644 arch/s390/include/asm/compat.h create mode 100644 arch/s390/include/asm/cpacf.h create mode 100644 arch/s390/include/asm/cpcmd.h create mode 100644 arch/s390/include/asm/cpu.h create mode 100644 arch/s390/include/asm/cpu_mf-insn.h create mode 100644 arch/s390/include/asm/cpu_mf.h create mode 100644 arch/s390/include/asm/cpufeature.h create mode 100644 arch/s390/include/asm/cputime.h create mode 100644 arch/s390/include/asm/crw.h create mode 100644 arch/s390/include/asm/css_chars.h create mode 100644 arch/s390/include/asm/ctl_reg.h create mode 100644 arch/s390/include/asm/current.h create mode 100644 arch/s390/include/asm/debug.h create mode 100644 arch/s390/include/asm/delay.h create mode 100644 arch/s390/include/asm/diag.h create mode 100644 arch/s390/include/asm/dis.h create mode 100644 arch/s390/include/asm/dma.h create mode 100644 arch/s390/include/asm/dwarf.h create mode 100644 arch/s390/include/asm/eadm.h create mode 100644 arch/s390/include/asm/ebcdic.h create mode 100644 arch/s390/include/asm/elf.h create mode 100644 arch/s390/include/asm/entry-common.h create mode 100644 arch/s390/include/asm/exec.h create mode 100644 arch/s390/include/asm/extable.h create mode 100644 arch/s390/include/asm/extmem.h create mode 100644 arch/s390/include/asm/facility.h create mode 100644 arch/s390/include/asm/fcx.h create mode 100644 arch/s390/include/asm/fpu/api.h create mode 100644 arch/s390/include/asm/fpu/internal.h create mode 100644 arch/s390/include/asm/fpu/types.h create mode 100644 arch/s390/include/asm/ftrace.h create mode 100644 arch/s390/include/asm/ftrace.lds.h create mode 100644 arch/s390/include/asm/futex.h create mode 100644 arch/s390/include/asm/gmap.h create mode 100644 arch/s390/include/asm/hardirq.h create mode 100644 arch/s390/include/asm/hugetlb.h create mode 100644 arch/s390/include/asm/hw_irq.h create mode 100644 arch/s390/include/asm/idals.h create mode 100644 arch/s390/include/asm/idle.h create mode 100644 arch/s390/include/asm/io.h create mode 100644 arch/s390/include/asm/ipl.h create mode 100644 arch/s390/include/asm/irq.h create mode 100644 arch/s390/include/asm/irq_work.h create mode 100644 arch/s390/include/asm/irqflags.h create mode 100644 arch/s390/include/asm/isc.h create mode 100644 arch/s390/include/asm/itcw.h create mode 100644 arch/s390/include/asm/jump_label.h create mode 100644 arch/s390/include/asm/kasan.h create mode 100644 arch/s390/include/asm/kdebug.h create mode 100644 arch/s390/include/asm/kexec.h create mode 100644 arch/s390/include/asm/kfence.h create mode 100644 arch/s390/include/asm/kprobes.h create mode 100644 arch/s390/include/asm/kvm_host.h create mode 100644 arch/s390/include/asm/kvm_para.h create mode 100644 arch/s390/include/asm/linkage.h create mode 100644 arch/s390/include/asm/lowcore.h create mode 100644 arch/s390/include/asm/maccess.h create mode 100644 arch/s390/include/asm/mem_encrypt.h create mode 100644 arch/s390/include/asm/mmu.h create mode 100644 arch/s390/include/asm/mmu_context.h create mode 100644 arch/s390/include/asm/mmzone.h create mode 100644 arch/s390/include/asm/module.h create mode 100644 arch/s390/include/asm/msi.h create mode 100644 arch/s390/include/asm/nmi.h create mode 100644 arch/s390/include/asm/nospec-branch.h create mode 100644 arch/s390/include/asm/nospec-insn.h create mode 100644 arch/s390/include/asm/numa.h create mode 100644 arch/s390/include/asm/os_info.h create mode 100644 arch/s390/include/asm/page-states.h create mode 100644 arch/s390/include/asm/page.h create mode 100644 arch/s390/include/asm/pai.h create mode 100644 arch/s390/include/asm/pci.h create mode 100644 arch/s390/include/asm/pci_clp.h create mode 100644 arch/s390/include/asm/pci_debug.h create mode 100644 arch/s390/include/asm/pci_dma.h create mode 100644 arch/s390/include/asm/pci_insn.h create mode 100644 arch/s390/include/asm/pci_io.h create mode 100644 arch/s390/include/asm/percpu.h create mode 100644 arch/s390/include/asm/perf_event.h create mode 100644 arch/s390/include/asm/pfault.h create mode 100644 arch/s390/include/asm/pgalloc.h create mode 100644 arch/s390/include/asm/pgtable.h create mode 100644 arch/s390/include/asm/physmem_info.h create mode 100644 arch/s390/include/asm/pkey.h create mode 100644 arch/s390/include/asm/pnet.h create mode 100644 arch/s390/include/asm/preempt.h create mode 100644 arch/s390/include/asm/processor.h create mode 100644 arch/s390/include/asm/ptdump.h create mode 100644 arch/s390/include/asm/ptrace.h create mode 100644 arch/s390/include/asm/purgatory.h create mode 100644 arch/s390/include/asm/qdio.h create mode 100644 arch/s390/include/asm/runtime_instr.h create mode 100644 arch/s390/include/asm/rwonce.h create mode 100644 arch/s390/include/asm/schid.h create mode 100644 arch/s390/include/asm/sclp.h create mode 100644 arch/s390/include/asm/scsw.h create mode 100644 arch/s390/include/asm/seccomp.h create mode 100644 arch/s390/include/asm/sections.h create mode 100644 arch/s390/include/asm/set_memory.h create mode 100644 arch/s390/include/asm/setup.h create mode 100644 arch/s390/include/asm/signal.h create mode 100644 arch/s390/include/asm/sigp.h create mode 100644 arch/s390/include/asm/smp.h create mode 100644 arch/s390/include/asm/softirq_stack.h create mode 100644 arch/s390/include/asm/sparsemem.h create mode 100644 arch/s390/include/asm/spinlock.h create mode 100644 arch/s390/include/asm/spinlock_types.h create mode 100644 arch/s390/include/asm/stacktrace.h create mode 100644 arch/s390/include/asm/stp.h create mode 100644 arch/s390/include/asm/string.h create mode 100644 arch/s390/include/asm/switch_to.h create mode 100644 arch/s390/include/asm/syscall.h create mode 100644 arch/s390/include/asm/syscall_wrapper.h create mode 100644 arch/s390/include/asm/sysinfo.h create mode 100644 arch/s390/include/asm/text-patching.h create mode 100644 arch/s390/include/asm/thread_info.h create mode 100644 arch/s390/include/asm/timex.h create mode 100644 arch/s390/include/asm/tlb.h create mode 100644 arch/s390/include/asm/tlbflush.h create mode 100644 arch/s390/include/asm/topology.h create mode 100644 arch/s390/include/asm/tpi.h create mode 100644 arch/s390/include/asm/trace/diag.h create mode 100644 arch/s390/include/asm/trace/zcrypt.h create mode 100644 arch/s390/include/asm/types.h create mode 100644 arch/s390/include/asm/uaccess.h create mode 100644 arch/s390/include/asm/unistd.h create mode 100644 arch/s390/include/asm/unwind.h create mode 100644 arch/s390/include/asm/uprobes.h create mode 100644 arch/s390/include/asm/user.h create mode 100644 arch/s390/include/asm/uv.h create mode 100644 arch/s390/include/asm/vdso.h create mode 100644 arch/s390/include/asm/vdso/clocksource.h create mode 100644 arch/s390/include/asm/vdso/data.h create mode 100644 arch/s390/include/asm/vdso/gettimeofday.h create mode 100644 arch/s390/include/asm/vdso/processor.h create mode 100644 arch/s390/include/asm/vdso/vsyscall.h create mode 100644 arch/s390/include/asm/vmalloc.h create mode 100644 arch/s390/include/asm/vmlinux.lds.h create mode 100644 arch/s390/include/asm/vtime.h create mode 100644 arch/s390/include/asm/vtimer.h create mode 100644 arch/s390/include/asm/vx-insn-asm.h create mode 100644 arch/s390/include/asm/vx-insn.h create mode 100644 arch/s390/include/asm/xor.h create mode 100644 arch/s390/include/uapi/asm/Kbuild create mode 100644 arch/s390/include/uapi/asm/auxvec.h create mode 100644 arch/s390/include/uapi/asm/bitsperlong.h create mode 100644 arch/s390/include/uapi/asm/bpf_perf_event.h create mode 100644 arch/s390/include/uapi/asm/byteorder.h create mode 100644 arch/s390/include/uapi/asm/chpid.h create mode 100644 arch/s390/include/uapi/asm/chsc.h create mode 100644 arch/s390/include/uapi/asm/clp.h create mode 100644 arch/s390/include/uapi/asm/cmb.h create mode 100644 arch/s390/include/uapi/asm/dasd.h create mode 100644 arch/s390/include/uapi/asm/fs3270.h create mode 100644 arch/s390/include/uapi/asm/guarded_storage.h create mode 100644 arch/s390/include/uapi/asm/hwctrset.h create mode 100644 arch/s390/include/uapi/asm/hypfs.h create mode 100644 arch/s390/include/uapi/asm/ioctls.h create mode 100644 arch/s390/include/uapi/asm/ipcbuf.h create mode 100644 arch/s390/include/uapi/asm/ipl.h create mode 100644 arch/s390/include/uapi/asm/kvm.h create mode 100644 arch/s390/include/uapi/asm/kvm_para.h create mode 100644 arch/s390/include/uapi/asm/kvm_perf.h create mode 100644 arch/s390/include/uapi/asm/monwriter.h create mode 100644 arch/s390/include/uapi/asm/perf_regs.h create mode 100644 arch/s390/include/uapi/asm/pkey.h create mode 100644 arch/s390/include/uapi/asm/posix_types.h create mode 100644 arch/s390/include/uapi/asm/ptrace.h create mode 100644 arch/s390/include/uapi/asm/qeth.h create mode 100644 arch/s390/include/uapi/asm/raw3270.h create mode 100644 arch/s390/include/uapi/asm/runtime_instr.h create mode 100644 arch/s390/include/uapi/asm/schid.h create mode 100644 arch/s390/include/uapi/asm/sclp_ctl.h create mode 100644 arch/s390/include/uapi/asm/setup.h create mode 100644 arch/s390/include/uapi/asm/sie.h create mode 100644 arch/s390/include/uapi/asm/sigcontext.h create mode 100644 arch/s390/include/uapi/asm/signal.h create mode 100644 arch/s390/include/uapi/asm/stat.h create mode 100644 arch/s390/include/uapi/asm/statfs.h create mode 100644 arch/s390/include/uapi/asm/sthyi.h create mode 100644 arch/s390/include/uapi/asm/tape390.h create mode 100644 arch/s390/include/uapi/asm/types.h create mode 100644 arch/s390/include/uapi/asm/ucontext.h create mode 100644 arch/s390/include/uapi/asm/unistd.h create mode 100644 arch/s390/include/uapi/asm/uvdevice.h create mode 100644 arch/s390/include/uapi/asm/virtio-ccw.h create mode 100644 arch/s390/include/uapi/asm/vmcp.h create mode 100644 arch/s390/include/uapi/asm/vtoc.h create mode 100644 arch/s390/include/uapi/asm/zcrypt.h (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild new file mode 100644 index 000000000..4b904110d --- /dev/null +++ b/arch/s390/include/asm/Kbuild @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +generated-y += dis-defs.h +generated-y += facility-defs.h +generated-y += syscall_table.h +generated-y += unistd_nr.h + +generic-y += asm-offsets.h +generic-y += kvm_types.h +generic-y += mcs_spinlock.h diff --git a/arch/s390/include/asm/abs_lowcore.h b/arch/s390/include/asm/abs_lowcore.h new file mode 100644 index 000000000..6f264b79e --- /dev/null +++ b/arch/s390/include/asm/abs_lowcore.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_ABS_LOWCORE_H +#define _ASM_S390_ABS_LOWCORE_H + +#include + +#define ABS_LOWCORE_MAP_SIZE (NR_CPUS * sizeof(struct lowcore)) + +extern unsigned long __abs_lowcore; + +int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc); +void abs_lowcore_unmap(int cpu); + +static inline struct lowcore *get_abs_lowcore(void) +{ + int cpu; + + cpu = get_cpu(); + return ((struct lowcore *)__abs_lowcore) + cpu; +} + +static inline void put_abs_lowcore(struct lowcore *lc) +{ + put_cpu(); +} + +#endif /* _ASM_S390_ABS_LOWCORE_H */ diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h new file mode 100644 index 000000000..c4c28c260 --- /dev/null +++ b/arch/s390/include/asm/airq.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2002, 2007 + * Author(s): Ingo Adlung + * Cornelia Huck + * Arnd Bergmann + * Peter Oberparleiter + */ + +#ifndef _ASM_S390_AIRQ_H +#define _ASM_S390_AIRQ_H + +#include +#include +#include + +struct airq_struct { + struct hlist_node list; /* Handler queueing. */ + void (*handler)(struct airq_struct *airq, struct tpi_info *tpi_info); + u8 *lsi_ptr; /* Local-Summary-Indicator pointer */ + u8 isc; /* Interrupt-subclass */ + u8 flags; +}; + +#define AIRQ_PTR_ALLOCATED 0x01 + +int register_adapter_interrupt(struct airq_struct *airq); +void unregister_adapter_interrupt(struct airq_struct *airq); + +/* Adapter interrupt bit vector */ +struct airq_iv { + unsigned long *vector; /* Adapter interrupt bit vector */ + dma_addr_t vector_dma; /* Adapter interrupt bit vector dma */ + unsigned long *avail; /* Allocation bit mask for the bit vector */ + unsigned long *bitlock; /* Lock bit mask for the bit vector */ + unsigned long *ptr; /* Pointer associated with each bit */ + unsigned int *data; /* 32 bit value associated with each bit */ + unsigned long bits; /* Number of bits in the vector */ + unsigned long end; /* Number of highest allocated bit + 1 */ + unsigned long flags; /* Allocation flags */ + spinlock_t lock; /* Lock to protect alloc & free */ +}; + +#define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */ +#define AIRQ_IV_BITLOCK 2 /* Allocate the lock bit mask */ +#define AIRQ_IV_PTR 4 /* Allocate the ptr array */ +#define AIRQ_IV_DATA 8 /* Allocate the data array */ +#define AIRQ_IV_CACHELINE 16 /* Cacheline alignment for the vector */ +#define AIRQ_IV_GUESTVEC 32 /* Vector is a pinned guest page */ + +struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags, + unsigned long *vec); +void airq_iv_release(struct airq_iv *iv); +unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num); +void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num); +unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, + unsigned long end); + +static inline unsigned long airq_iv_alloc_bit(struct airq_iv *iv) +{ + return airq_iv_alloc(iv, 1); +} + +static inline void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit) +{ + airq_iv_free(iv, bit, 1); +} + +static inline unsigned long airq_iv_end(struct airq_iv *iv) +{ + return iv->end; +} + +static inline void airq_iv_lock(struct airq_iv *iv, unsigned long bit) +{ + const unsigned long be_to_le = BITS_PER_LONG - 1; + bit_spin_lock(bit ^ be_to_le, iv->bitlock); +} + +static inline void airq_iv_unlock(struct airq_iv *iv, unsigned long bit) +{ + const unsigned long be_to_le = BITS_PER_LONG - 1; + bit_spin_unlock(bit ^ be_to_le, iv->bitlock); +} + +static inline void airq_iv_set_data(struct airq_iv *iv, unsigned long bit, + unsigned int data) +{ + iv->data[bit] = data; +} + +static inline unsigned int airq_iv_get_data(struct airq_iv *iv, + unsigned long bit) +{ + return iv->data[bit]; +} + +static inline void airq_iv_set_ptr(struct airq_iv *iv, unsigned long bit, + unsigned long ptr) +{ + iv->ptr[bit] = ptr; +} + +static inline unsigned long airq_iv_get_ptr(struct airq_iv *iv, + unsigned long bit) +{ + return iv->ptr[bit]; +} + +#endif /* _ASM_S390_AIRQ_H */ diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h new file mode 100644 index 000000000..7db046596 --- /dev/null +++ b/arch/s390/include/asm/alternative-asm.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_ALTERNATIVE_ASM_H +#define _ASM_S390_ALTERNATIVE_ASM_H + +#ifdef __ASSEMBLY__ + +/* + * Issue one struct alt_instr descriptor entry (need to put it into + * the section .altinstructions, see below). This entry contains + * enough information for the alternatives patching code to patch an + * instruction. See apply_alternatives(). + */ +.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature + .long \orig_start - . + .long \alt_start - . + .word \feature + .byte \orig_end - \orig_start + .org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start ) + .org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start ) +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. + */ +.macro ALTERNATIVE oldinstr, newinstr, feature + .pushsection .altinstr_replacement,"ax" +770: \newinstr +771: .popsection +772: \oldinstr +773: .pushsection .altinstructions,"a" + alt_entry 772b, 773b, 770b, 771b, \feature + .popsection +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. + */ +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 + .pushsection .altinstr_replacement,"ax" +770: \newinstr1 +771: \newinstr2 +772: .popsection +773: \oldinstr +774: .pushsection .altinstructions,"a" + alt_entry 773b, 774b, 770b, 771b,\feature1 + alt_entry 773b, 774b, 771b, 772b,\feature2 + .popsection +.endm + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_S390_ALTERNATIVE_ASM_H */ diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h new file mode 100644 index 000000000..904dd049f --- /dev/null +++ b/arch/s390/include/asm/alternative.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_ALTERNATIVE_H +#define _ASM_S390_ALTERNATIVE_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +struct alt_instr { + s32 instr_offset; /* original instruction */ + s32 repl_offset; /* offset to replacement instruction */ + u16 facility; /* facility bit set for replacement */ + u8 instrlen; /* length of original instruction */ +} __packed; + +void apply_alternative_instructions(void); +void apply_alternatives(struct alt_instr *start, struct alt_instr *end); + +/* + * +---------------------------------+ + * |661: |662: + * | oldinstr | + * +---------------------------------+ + * + * .altinstr_replacement section + * +---------------------------------+ + * |6641: |6651: + * | alternative instr 1 | + * +---------------------------------+ + * |6642: |6652: + * | alternative instr 2 | + * +---------------------------------+ + * + * .altinstructions section + * +---------------------------------+ + * | alt_instr entries for each | + * | alternative instr | + * +---------------------------------+ + */ + +#define b_altinstr(num) "664"#num +#define e_altinstr(num) "665"#num +#define oldinstr_len "662b-661b" +#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b" + +#define OLDINSTR(oldinstr) \ + "661:\n\t" oldinstr "\n662:\n" + +#define ALTINSTR_ENTRY(facility, num) \ + "\t.long 661b - .\n" /* old instruction */ \ + "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \ + "\t.word " __stringify(facility) "\n" /* facility bit */ \ + "\t.byte " oldinstr_len "\n" /* instruction len */ \ + "\t.org . - (" oldinstr_len ") + (" altinstr_len(num) ")\n" \ + "\t.org . - (" altinstr_len(num) ") + (" oldinstr_len ")\n" + +#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \ + b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" + +/* alternative assembly primitive: */ +#define ALTERNATIVE(oldinstr, altinstr, facility) \ + ".pushsection .altinstr_replacement, \"ax\"\n" \ + ALTINSTR_REPLACEMENT(altinstr, 1) \ + ".popsection\n" \ + OLDINSTR(oldinstr) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(facility, 1) \ + ".popsection\n" + +#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\ + ".pushsection .altinstr_replacement, \"ax\"\n" \ + ALTINSTR_REPLACEMENT(altinstr1, 1) \ + ALTINSTR_REPLACEMENT(altinstr2, 2) \ + ".popsection\n" \ + OLDINSTR(oldinstr) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(facility1, 1) \ + ALTINSTR_ENTRY(facility2, 2) \ + ".popsection\n" + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * oldinstr is padded with jump and nops at compile time if altinstr is + * longer. altinstr is padded with jump and nops at run-time during patching. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, altinstr, facility) \ + asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory") + +#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \ + asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \ + altinstr2, facility2) ::: "memory") + +/* Alternative inline assembly with input. */ +#define alternative_input(oldinstr, newinstr, feature, input...) \ + asm_inline volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ + : : input) + +/* Like alternative_input, but with a single output argument */ +#define alternative_io(oldinstr, altinstr, facility, output, input...) \ + asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) \ + : output : input) + +/* Use this macro if more than one output parameter is needed. */ +#define ASM_OUTPUT2(a...) a + +/* Use this macro if clobbers are needed without inputs. */ +#define ASM_NO_INPUT_CLOBBER(clobber...) : clobber + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_S390_ALTERNATIVE_H */ diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h new file mode 100644 index 000000000..40c2b82f0 --- /dev/null +++ b/arch/s390/include/asm/ap.h @@ -0,0 +1,555 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Adjunct processor (AP) interfaces + * + * Copyright IBM Corp. 2017 + * + * Author(s): Tony Krowiak + * Martin Schwidefsky + * Harald Freudenberger + */ + +#ifndef _ASM_S390_AP_H_ +#define _ASM_S390_AP_H_ + +#include +#include + +/** + * The ap_qid_t identifier of an ap queue. + * If the AP facilities test (APFT) facility is available, + * card and queue index are 8 bit values, otherwise + * card index is 6 bit and queue index a 4 bit value. + */ +typedef unsigned int ap_qid_t; + +#define AP_MKQID(_card, _queue) (((_card) & 0xff) << 8 | ((_queue) & 0xff)) +#define AP_QID_CARD(_qid) (((_qid) >> 8) & 0xff) +#define AP_QID_QUEUE(_qid) ((_qid) & 0xff) + +/** + * struct ap_queue_status - Holds the AP queue status. + * @queue_empty: Shows if queue is empty + * @replies_waiting: Waiting replies + * @queue_full: Is 1 if the queue is full + * @irq_enabled: Shows if interrupts are enabled for the AP + * @response_code: Holds the 8 bit response code + * + * The ap queue status word is returned by all three AP functions + * (PQAP, NQAP and DQAP). There's a set of flags in the first + * byte, followed by a 1 byte response code. + */ +struct ap_queue_status { + unsigned int queue_empty : 1; + unsigned int replies_waiting : 1; + unsigned int queue_full : 1; + unsigned int : 3; + unsigned int async : 1; + unsigned int irq_enabled : 1; + unsigned int response_code : 8; + unsigned int : 16; +}; + +/* + * AP queue status reg union to access the reg1 + * register with the lower 32 bits comprising the + * ap queue status. + */ +union ap_queue_status_reg { + unsigned long value; + struct { + u32 _pad; + struct ap_queue_status status; + }; +}; + +/** + * ap_intructions_available() - Test if AP instructions are available. + * + * Returns true if the AP instructions are installed, otherwise false. + */ +static inline bool ap_instructions_available(void) +{ + unsigned long reg0 = AP_MKQID(0, 0); + unsigned long reg1 = 0; + + asm volatile( + " lgr 0,%[reg0]\n" /* qid into gr0 */ + " lghi 1,0\n" /* 0 into gr1 */ + " lghi 2,0\n" /* 0 into gr2 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(TAPQ) */ + "0: la %[reg1],1\n" /* 1 into reg1 */ + "1:\n" + EX_TABLE(0b, 1b) + : [reg1] "+&d" (reg1) + : [reg0] "d" (reg0) + : "cc", "0", "1", "2"); + return reg1 != 0; +} + +/* TAPQ register GR2 response struct */ +struct ap_tapq_gr2 { + union { + unsigned long value; + struct { + unsigned int fac : 32; /* facility bits */ + unsigned int apinfo : 32; /* ap type, ... */ + }; + struct { + unsigned int s : 1; /* APSC */ + unsigned int m : 1; /* AP4KM */ + unsigned int c : 1; /* AP4KC */ + unsigned int mode : 3; + unsigned int n : 1; /* APXA */ + unsigned int : 1; + unsigned int class : 8; + unsigned int bs : 2; /* SE bind/assoc */ + unsigned int : 14; + unsigned int at : 8; /* ap type */ + unsigned int nd : 8; /* nr of domains */ + unsigned int : 4; + unsigned int ml : 4; /* apxl ml */ + unsigned int : 4; + unsigned int qd : 4; /* queue depth */ + }; + }; +}; + +/* + * Convenience defines to be used with the bs field from struct ap_tapq_gr2 + */ +#define AP_BS_Q_USABLE 0 +#define AP_BS_Q_USABLE_NO_SECURE_KEY 1 +#define AP_BS_Q_AVAIL_FOR_BINDING 2 +#define AP_BS_Q_UNUSABLE 3 + +/** + * ap_tapq(): Test adjunct processor queue. + * @qid: The AP queue number + * @info: Pointer to queue descriptor + * + * Returns AP queue status structure. + */ +static inline struct ap_queue_status ap_tapq(ap_qid_t qid, struct ap_tapq_gr2 *info) +{ + union ap_queue_status_reg reg1; + unsigned long reg2; + + asm volatile( + " lgr 0,%[qid]\n" /* qid into gr0 */ + " lghi 2,0\n" /* 0 into gr2 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(TAPQ) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr %[reg2],2\n" /* gr2 into reg2 */ + : [reg1] "=&d" (reg1.value), [reg2] "=&d" (reg2) + : [qid] "d" (qid) + : "cc", "0", "1", "2"); + if (info) + info->value = reg2; + return reg1.status; +} + +/** + * ap_test_queue(): Test adjunct processor queue. + * @qid: The AP queue number + * @tbit: Test facilities bit + * @info: Ptr to tapq gr2 struct + * + * Returns AP queue status structure. + */ +static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, int tbit, + struct ap_tapq_gr2 *info) +{ + if (tbit) + qid |= 1UL << 23; /* set T bit*/ + return ap_tapq(qid, info); +} + +/** + * ap_pqap_rapq(): Reset adjunct processor queue. + * @qid: The AP queue number + * @fbit: if != 0 set F bit + * + * Returns AP queue status structure. + */ +static inline struct ap_queue_status ap_rapq(ap_qid_t qid, int fbit) +{ + unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */ + union ap_queue_status_reg reg1; + + if (fbit) + reg0 |= 1UL << 22; + + asm volatile( + " lgr 0,%[reg0]\n" /* qid arg into gr0 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + : [reg1] "=&d" (reg1.value) + : [reg0] "d" (reg0) + : "cc", "0", "1"); + return reg1.status; +} + +/** + * ap_pqap_zapq(): Reset and zeroize adjunct processor queue. + * @qid: The AP queue number + * @fbit: if != 0 set F bit + * + * Returns AP queue status structure. + */ +static inline struct ap_queue_status ap_zapq(ap_qid_t qid, int fbit) +{ + unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */ + union ap_queue_status_reg reg1; + + if (fbit) + reg0 |= 1UL << 22; + + asm volatile( + " lgr 0,%[reg0]\n" /* qid arg into gr0 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + : [reg1] "=&d" (reg1.value) + : [reg0] "d" (reg0) + : "cc", "0", "1"); + return reg1.status; +} + +/** + * struct ap_config_info - convenience struct for AP crypto + * config info as returned by the ap_qci() function. + */ +struct ap_config_info { + unsigned int apsc : 1; /* S bit */ + unsigned int apxa : 1; /* N bit */ + unsigned int qact : 1; /* C bit */ + unsigned int rc8a : 1; /* R bit */ + unsigned int : 4; + unsigned int apsb : 1; /* B bit */ + unsigned int : 23; + unsigned char na; /* max # of APs - 1 */ + unsigned char nd; /* max # of Domains - 1 */ + unsigned char _reserved0[10]; + unsigned int apm[8]; /* AP ID mask */ + unsigned int aqm[8]; /* AP (usage) queue mask */ + unsigned int adm[8]; /* AP (control) domain mask */ + unsigned char _reserved1[16]; +} __aligned(8); + +/** + * ap_qci(): Get AP configuration data + * + * Returns 0 on success, or -EOPNOTSUPP. + */ +static inline int ap_qci(struct ap_config_info *config) +{ + unsigned long reg0 = 4UL << 24; /* fc 4UL is QCI */ + unsigned long reg1 = -EOPNOTSUPP; + struct ap_config_info *reg2 = config; + + asm volatile( + " lgr 0,%[reg0]\n" /* QCI fc into gr0 */ + " lgr 2,%[reg2]\n" /* ptr to config into gr2 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(QCI) */ + "0: la %[reg1],0\n" /* good case, QCI fc available */ + "1:\n" + EX_TABLE(0b, 1b) + : [reg1] "+&d" (reg1) + : [reg0] "d" (reg0), [reg2] "d" (reg2) + : "cc", "memory", "0", "2"); + + return reg1; +} + +/* + * struct ap_qirq_ctrl - convenient struct for easy invocation + * of the ap_aqic() function. This struct is passed as GR1 + * parameter to the PQAP(AQIC) instruction. For details please + * see the AR documentation. + */ +union ap_qirq_ctrl { + unsigned long value; + struct { + unsigned int : 8; + unsigned int zone : 8; /* zone info */ + unsigned int ir : 1; /* ir flag: enable (1) or disable (0) irq */ + unsigned int : 4; + unsigned int gisc : 3; /* guest isc field */ + unsigned int : 6; + unsigned int gf : 2; /* gisa format */ + unsigned int : 1; + unsigned int gisa : 27; /* gisa origin */ + unsigned int : 1; + unsigned int isc : 3; /* irq sub class */ + }; +}; + +/** + * ap_aqic(): Control interruption for a specific AP. + * @qid: The AP queue number + * @qirqctrl: struct ap_qirq_ctrl (64 bit value) + * @pa_ind: Physical address of the notification indicator byte + * + * Returns AP queue status. + */ +static inline struct ap_queue_status ap_aqic(ap_qid_t qid, + union ap_qirq_ctrl qirqctrl, + phys_addr_t pa_ind) +{ + unsigned long reg0 = qid | (3UL << 24); /* fc 3UL is AQIC */ + union ap_queue_status_reg reg1; + unsigned long reg2 = pa_ind; + + reg1.value = qirqctrl.value; + + asm volatile( + " lgr 0,%[reg0]\n" /* qid param into gr0 */ + " lgr 1,%[reg1]\n" /* irq ctrl into gr1 */ + " lgr 2,%[reg2]\n" /* ni addr into gr2 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(AQIC) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + : [reg1] "+&d" (reg1.value) + : [reg0] "d" (reg0), [reg2] "d" (reg2) + : "cc", "memory", "0", "1", "2"); + + return reg1.status; +} + +/* + * union ap_qact_ap_info - used together with the + * ap_aqic() function to provide a convenient way + * to handle the ap info needed by the qact function. + */ +union ap_qact_ap_info { + unsigned long val; + struct { + unsigned int : 3; + unsigned int mode : 3; + unsigned int : 26; + unsigned int cat : 8; + unsigned int : 8; + unsigned char ver[2]; + }; +}; + +/** + * ap_qact(): Query AP compatibility type. + * @qid: The AP queue number + * @apinfo: On input the info about the AP queue. On output the + * alternate AP queue info provided by the qact function + * in GR2 is stored in. + * + * Returns AP queue status. Check response_code field for failures. + */ +static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit, + union ap_qact_ap_info *apinfo) +{ + unsigned long reg0 = qid | (5UL << 24) | ((ifbit & 0x01) << 22); + union ap_queue_status_reg reg1; + unsigned long reg2; + + reg1.value = apinfo->val; + + asm volatile( + " lgr 0,%[reg0]\n" /* qid param into gr0 */ + " lgr 1,%[reg1]\n" /* qact in info into gr1 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(QACT) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr %[reg2],2\n" /* qact out info into reg2 */ + : [reg1] "+&d" (reg1.value), [reg2] "=&d" (reg2) + : [reg0] "d" (reg0) + : "cc", "0", "1", "2"); + apinfo->val = reg2; + return reg1.status; +} + +/* + * ap_bapq(): SE bind AP queue. + * @qid: The AP queue number + * + * Returns AP queue status structure. + * + * Invoking this function in a non-SE environment + * may case a specification exception. + */ +static inline struct ap_queue_status ap_bapq(ap_qid_t qid) +{ + unsigned long reg0 = qid | (7UL << 24); /* fc 7 is BAPQ */ + union ap_queue_status_reg reg1; + + asm volatile( + " lgr 0,%[reg0]\n" /* qid arg into gr0 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(BAPQ) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + : [reg1] "=&d" (reg1.value) + : [reg0] "d" (reg0) + : "cc", "0", "1"); + + return reg1.status; +} + +/* + * ap_aapq(): SE associate AP queue. + * @qid: The AP queue number + * @sec_idx: The secret index + * + * Returns AP queue status structure. + * + * Invoking this function in a non-SE environment + * may case a specification exception. + */ +static inline struct ap_queue_status ap_aapq(ap_qid_t qid, unsigned int sec_idx) +{ + unsigned long reg0 = qid | (8UL << 24); /* fc 8 is AAPQ */ + unsigned long reg2 = sec_idx; + union ap_queue_status_reg reg1; + + asm volatile( + " lgr 0,%[reg0]\n" /* qid arg into gr0 */ + " lgr 2,%[reg2]\n" /* secret index into gr2 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(AAPQ) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + : [reg1] "=&d" (reg1.value) + : [reg0] "d" (reg0), [reg2] "d" (reg2) + : "cc", "0", "1", "2"); + + return reg1.status; +} + +/** + * ap_nqap(): Send message to adjunct processor queue. + * @qid: The AP queue number + * @psmid: The program supplied message identifier + * @msg: The message text + * @length: The message length + * + * Returns AP queue status structure. + * Condition code 1 on NQAP can't happen because the L bit is 1. + * Condition code 2 on NQAP also means the send is incomplete, + * because a segment boundary was reached. The NQAP is repeated. + */ +static inline struct ap_queue_status ap_nqap(ap_qid_t qid, + unsigned long long psmid, + void *msg, size_t length) +{ + unsigned long reg0 = qid | 0x40000000UL; /* 0x4... is last msg part */ + union register_pair nqap_r1, nqap_r2; + union ap_queue_status_reg reg1; + + nqap_r1.even = (unsigned int)(psmid >> 32); + nqap_r1.odd = psmid & 0xffffffff; + nqap_r2.even = (unsigned long)msg; + nqap_r2.odd = (unsigned long)length; + + asm volatile ( + " lgr 0,%[reg0]\n" /* qid param in gr0 */ + "0: .insn rre,0xb2ad0000,%[nqap_r1],%[nqap_r2]\n" + " brc 2,0b\n" /* handle partial completion */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1.value), + [nqap_r2] "+&d" (nqap_r2.pair) + : [nqap_r1] "d" (nqap_r1.pair) + : "cc", "memory", "0", "1"); + return reg1.status; +} + +/** + * ap_dqap(): Receive message from adjunct processor queue. + * @qid: The AP queue number + * @psmid: Pointer to program supplied message identifier + * @msg: Pointer to message buffer + * @msglen: Message buffer size + * @length: Pointer to length of actually written bytes + * @reslength: Residual length on return + * @resgr0: input: gr0 value (only used if != 0), output: residual gr0 content + * + * Returns AP queue status structure. + * Condition code 1 on DQAP means the receive has taken place + * but only partially. The response is incomplete, hence the + * DQAP is repeated. + * Condition code 2 on DQAP also means the receive is incomplete, + * this time because a segment boundary was reached. Again, the + * DQAP is repeated. + * Note that gpr2 is used by the DQAP instruction to keep track of + * any 'residual' length, in case the instruction gets interrupted. + * Hence it gets zeroed before the instruction. + * If the message does not fit into the buffer, this function will + * return with a truncated message and the reply in the firmware queue + * is not removed. This is indicated to the caller with an + * ap_queue_status response_code value of all bits on (0xFF) and (if + * the reslength ptr is given) the remaining length is stored in + * *reslength and (if the resgr0 ptr is given) the updated gr0 value + * for further processing of this msg entry is stored in *resgr0. The + * caller needs to detect this situation and should invoke ap_dqap + * with a valid resgr0 ptr and a value in there != 0 to indicate that + * *resgr0 is to be used instead of qid to further process this entry. + */ +static inline struct ap_queue_status ap_dqap(ap_qid_t qid, + unsigned long *psmid, + void *msg, size_t msglen, + size_t *length, + size_t *reslength, + unsigned long *resgr0) +{ + unsigned long reg0 = resgr0 && *resgr0 ? *resgr0 : qid | 0x80000000UL; + union ap_queue_status_reg reg1; + unsigned long reg2; + union register_pair rp1, rp2; + + rp1.even = 0UL; + rp1.odd = 0UL; + rp2.even = (unsigned long)msg; + rp2.odd = (unsigned long)msglen; + + asm volatile( + " lgr 0,%[reg0]\n" /* qid param into gr0 */ + " lghi 2,0\n" /* 0 into gr2 (res length) */ + "0: ltgr %N[rp2],%N[rp2]\n" /* check buf len */ + " jz 2f\n" /* go out if buf len is 0 */ + "1: .insn rre,0xb2ae0000,%[rp1],%[rp2]\n" + " brc 6,0b\n" /* handle partial complete */ + "2: lgr %[reg0],0\n" /* gr0 (qid + info) into reg0 */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr %[reg2],2\n" /* gr2 (res length) into reg2 */ + : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1.value), + [reg2] "=&d" (reg2), [rp1] "+&d" (rp1.pair), + [rp2] "+&d" (rp2.pair) + : + : "cc", "memory", "0", "1", "2"); + + if (reslength) + *reslength = reg2; + if (reg2 != 0 && rp2.odd == 0) { + /* + * Partially complete, status in gr1 is not set. + * Signal the caller that this dqap is only partially received + * with a special status response code 0xFF and *resgr0 updated + */ + reg1.status.response_code = 0xFF; + if (resgr0) + *resgr0 = reg0; + } else { + *psmid = (rp1.even << 32) + rp1.odd; + if (resgr0) + *resgr0 = 0; + } + + /* update *length with the nr of bytes stored into the msg buffer */ + if (length) + *length = msglen - rp2.odd; + + return reg1.status; +} + +/* + * Interface to tell the AP bus code that a configuration + * change has happened. The bus code should at least do + * an ap bus resource rescan. + */ +#if IS_ENABLED(CONFIG_ZCRYPT) +void ap_bus_cfg_chg(void); +#else +static inline void ap_bus_cfg_chg(void){} +#endif + +#endif /* _ASM_S390_AP_H_ */ diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h new file mode 100644 index 000000000..f2240392c --- /dev/null +++ b/arch/s390/include/asm/appldata.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2006 + * + * Author(s): Melissa Howland + */ + +#ifndef _ASM_S390_APPLDATA_H +#define _ASM_S390_APPLDATA_H + +#include +#include + +#define APPLDATA_START_INTERVAL_REC 0x80 +#define APPLDATA_STOP_REC 0x81 +#define APPLDATA_GEN_EVENT_REC 0x82 +#define APPLDATA_START_CONFIG_REC 0x83 + +/* + * Parameter list for DIAGNOSE X'DC' + */ +struct appldata_parameter_list { + u16 diag; + u8 function; + u8 parlist_length; + u32 unused01; + u16 reserved; + u16 buffer_length; + u32 unused02; + u64 product_id_addr; + u64 buffer_addr; +} __attribute__ ((packed)); + +struct appldata_product_id { + char prod_nr[7]; /* product number */ + u16 prod_fn; /* product function */ + u8 record_nr; /* record number */ + u16 version_nr; /* version */ + u16 release_nr; /* release */ + u16 mod_lvl; /* modification level */ +} __attribute__ ((packed)); + + +static inline int appldata_asm(struct appldata_parameter_list *parm_list, + struct appldata_product_id *id, + unsigned short fn, void *buffer, + unsigned short length) +{ + int ry; + + if (!MACHINE_IS_VM) + return -EOPNOTSUPP; + parm_list->diag = 0xdc; + parm_list->function = fn; + parm_list->parlist_length = sizeof(*parm_list); + parm_list->buffer_length = length; + parm_list->product_id_addr = (unsigned long) id; + parm_list->buffer_addr = virt_to_phys(buffer); + diag_stat_inc(DIAG_STAT_X0DC); + asm volatile( + " diag %1,%0,0xdc" + : "=d" (ry) + : "d" (parm_list), "m" (*parm_list), "m" (*id) + : "cc"); + return ry; +} + +#endif /* _ASM_S390_APPLDATA_H */ diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h new file mode 100644 index 000000000..159404989 --- /dev/null +++ b/arch/s390/include/asm/archrandom.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernel interface for the s390 arch_random_* functions + * + * Copyright IBM Corp. 2017, 2022 + * + * Author: Harald Freudenberger + * + */ + +#ifndef _ASM_S390_ARCHRANDOM_H +#define _ASM_S390_ARCHRANDOM_H + +#include +#include +#include +#include + +DECLARE_STATIC_KEY_FALSE(s390_arch_random_available); +extern atomic64_t s390_arch_random_counter; + +static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) +{ + return 0; +} + +static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) +{ + if (static_branch_likely(&s390_arch_random_available) && + in_task()) { + cpacf_trng(NULL, 0, (u8 *)v, max_longs * sizeof(*v)); + atomic64_add(max_longs * sizeof(*v), &s390_arch_random_counter); + return max_longs; + } + return 0; +} + +#endif /* _ASM_S390_ARCHRANDOM_H */ diff --git a/arch/s390/include/asm/asm-const.h b/arch/s390/include/asm/asm-const.h new file mode 100644 index 000000000..11f615eb0 --- /dev/null +++ b/arch/s390/include/asm/asm-const.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_ASM_CONST_H +#define _ASM_S390_ASM_CONST_H + +#ifdef __ASSEMBLY__ +# define stringify_in_c(...) __VA_ARGS__ +#else +/* This version of stringify will deal with commas... */ +# define __stringify_in_c(...) #__VA_ARGS__ +# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " +#endif +#endif /* _ASM_S390_ASM_CONST_H */ diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h new file mode 100644 index 000000000..e6532477f --- /dev/null +++ b/arch/s390/include/asm/asm-extable.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_EXTABLE_H +#define __ASM_EXTABLE_H + +#include +#include +#include + +#define EX_TYPE_NONE 0 +#define EX_TYPE_FIXUP 1 +#define EX_TYPE_BPF 2 +#define EX_TYPE_UA_STORE 3 +#define EX_TYPE_UA_LOAD_MEM 4 +#define EX_TYPE_UA_LOAD_REG 5 +#define EX_TYPE_UA_LOAD_REGPAIR 6 + +#define EX_DATA_REG_ERR_SHIFT 0 +#define EX_DATA_REG_ERR GENMASK(3, 0) + +#define EX_DATA_REG_ADDR_SHIFT 4 +#define EX_DATA_REG_ADDR GENMASK(7, 4) + +#define EX_DATA_LEN_SHIFT 8 +#define EX_DATA_LEN GENMASK(11, 8) + +#define __EX_TABLE(_section, _fault, _target, _type) \ + stringify_in_c(.section _section,"a";) \ + stringify_in_c(.balign 4;) \ + stringify_in_c(.long (_fault) - .;) \ + stringify_in_c(.long (_target) - .;) \ + stringify_in_c(.short (_type);) \ + stringify_in_c(.short 0;) \ + stringify_in_c(.previous) + +#define __EX_TABLE_UA(_section, _fault, _target, _type, _regerr, _regaddr, _len)\ + stringify_in_c(.section _section,"a";) \ + stringify_in_c(.balign 4;) \ + stringify_in_c(.long (_fault) - .;) \ + stringify_in_c(.long (_target) - .;) \ + stringify_in_c(.short (_type);) \ + stringify_in_c(.macro extable_reg regerr, regaddr;) \ + stringify_in_c(.set .Lfound, 0;) \ + stringify_in_c(.set .Lcurr, 0;) \ + stringify_in_c(.irp rs,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15;) \ + stringify_in_c( .ifc "\regerr", "%%r\rs";) \ + stringify_in_c( .set .Lfound, 1;) \ + stringify_in_c( .set .Lregerr, .Lcurr;) \ + stringify_in_c( .endif;) \ + stringify_in_c( .set .Lcurr, .Lcurr+1;) \ + stringify_in_c(.endr;) \ + stringify_in_c(.ifne (.Lfound != 1);) \ + stringify_in_c( .error "extable_reg: bad register argument1";) \ + stringify_in_c(.endif;) \ + stringify_in_c(.set .Lfound, 0;) \ + stringify_in_c(.set .Lcurr, 0;) \ + stringify_in_c(.irp rs,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15;) \ + stringify_in_c( .ifc "\regaddr", "%%r\rs";) \ + stringify_in_c( .set .Lfound, 1;) \ + stringify_in_c( .set .Lregaddr, .Lcurr;) \ + stringify_in_c( .endif;) \ + stringify_in_c( .set .Lcurr, .Lcurr+1;) \ + stringify_in_c(.endr;) \ + stringify_in_c(.ifne (.Lfound != 1);) \ + stringify_in_c( .error "extable_reg: bad register argument2";) \ + stringify_in_c(.endif;) \ + stringify_in_c(.short .Lregerr << EX_DATA_REG_ERR_SHIFT | \ + .Lregaddr << EX_DATA_REG_ADDR_SHIFT | \ + _len << EX_DATA_LEN_SHIFT;) \ + stringify_in_c(.endm;) \ + stringify_in_c(extable_reg _regerr,_regaddr;) \ + stringify_in_c(.purgem extable_reg;) \ + stringify_in_c(.previous) + +#define EX_TABLE(_fault, _target) \ + __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP) + +#define EX_TABLE_AMODE31(_fault, _target) \ + __EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP) + +#define EX_TABLE_UA_STORE(_fault, _target, _regerr) \ + __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0) + +#define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len) \ + __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len) + +#define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \ + __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0) + +#define EX_TABLE_UA_LOAD_REGPAIR(_fault, _target, _regerr, _regzero) \ + __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REGPAIR, _regerr, _regzero, 0) + +#endif /* __ASM_EXTABLE_H */ diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h new file mode 100644 index 000000000..a873e873e --- /dev/null +++ b/arch/s390/include/asm/asm-prototypes.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_PROTOTYPES_H + +#include +#include +#include +#include + +__int128_t __ashlti3(__int128_t a, int b); +__int128_t __ashrti3(__int128_t a, int b); +__int128_t __lshrti3(__int128_t a, int b); + +#endif /* _ASM_S390_PROTOTYPES_H */ diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h new file mode 100644 index 000000000..7138d189c --- /dev/null +++ b/arch/s390/include/asm/atomic.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 1999, 2016 + * Author(s): Martin Schwidefsky , + * Denis Joseph Barrow, + * Arnd Bergmann, + */ + +#ifndef __ARCH_S390_ATOMIC__ +#define __ARCH_S390_ATOMIC__ + +#include +#include +#include +#include +#include + +static inline int arch_atomic_read(const atomic_t *v) +{ + return __atomic_read(v); +} +#define arch_atomic_read arch_atomic_read + +static inline void arch_atomic_set(atomic_t *v, int i) +{ + __atomic_set(v, i); +} +#define arch_atomic_set arch_atomic_set + +static inline int arch_atomic_add_return(int i, atomic_t *v) +{ + return __atomic_add_barrier(i, &v->counter) + i; +} +#define arch_atomic_add_return arch_atomic_add_return + +static inline int arch_atomic_fetch_add(int i, atomic_t *v) +{ + return __atomic_add_barrier(i, &v->counter); +} +#define arch_atomic_fetch_add arch_atomic_fetch_add + +static inline void arch_atomic_add(int i, atomic_t *v) +{ + __atomic_add(i, &v->counter); +} +#define arch_atomic_add arch_atomic_add + +#define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v) +#define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v) +#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v) + +#define ATOMIC_OPS(op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + __atomic_##op(i, &v->counter); \ +} \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + return __atomic_##op##_barrier(i, &v->counter); \ +} + +ATOMIC_OPS(and) +ATOMIC_OPS(or) +ATOMIC_OPS(xor) + +#undef ATOMIC_OPS + +#define arch_atomic_and arch_atomic_and +#define arch_atomic_or arch_atomic_or +#define arch_atomic_xor arch_atomic_xor +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + +#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) + +static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) +{ + return __atomic_cmpxchg(&v->counter, old, new); +} +#define arch_atomic_cmpxchg arch_atomic_cmpxchg + +#define ATOMIC64_INIT(i) { (i) } + +static inline s64 arch_atomic64_read(const atomic64_t *v) +{ + return __atomic64_read(v); +} +#define arch_atomic64_read arch_atomic64_read + +static inline void arch_atomic64_set(atomic64_t *v, s64 i) +{ + __atomic64_set(v, i); +} +#define arch_atomic64_set arch_atomic64_set + +static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v) +{ + return __atomic64_add_barrier(i, (long *)&v->counter) + i; +} +#define arch_atomic64_add_return arch_atomic64_add_return + +static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) +{ + return __atomic64_add_barrier(i, (long *)&v->counter); +} +#define arch_atomic64_fetch_add arch_atomic64_fetch_add + +static inline void arch_atomic64_add(s64 i, atomic64_t *v) +{ + __atomic64_add(i, (long *)&v->counter); +} +#define arch_atomic64_add arch_atomic64_add + +#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) + +static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + return __atomic64_cmpxchg((long *)&v->counter, old, new); +} +#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg + +#define ATOMIC64_OPS(op) \ +static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \ +{ \ + __atomic64_##op(i, (long *)&v->counter); \ +} \ +static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \ +{ \ + return __atomic64_##op##_barrier(i, (long *)&v->counter); \ +} + +ATOMIC64_OPS(and) +ATOMIC64_OPS(or) +ATOMIC64_OPS(xor) + +#undef ATOMIC64_OPS + +#define arch_atomic64_and arch_atomic64_and +#define arch_atomic64_or arch_atomic64_or +#define arch_atomic64_xor arch_atomic64_xor +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor + +#define arch_atomic64_sub_return(_i, _v) arch_atomic64_add_return(-(s64)(_i), _v) +#define arch_atomic64_fetch_sub(_i, _v) arch_atomic64_fetch_add(-(s64)(_i), _v) +#define arch_atomic64_sub(_i, _v) arch_atomic64_add(-(s64)(_i), _v) + +#endif /* __ARCH_S390_ATOMIC__ */ diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h new file mode 100644 index 000000000..50510e08b --- /dev/null +++ b/arch/s390/include/asm/atomic_ops.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Low level function for atomic operations + * + * Copyright IBM Corp. 1999, 2016 + */ + +#ifndef __ARCH_S390_ATOMIC_OPS__ +#define __ARCH_S390_ATOMIC_OPS__ + +static inline int __atomic_read(const atomic_t *v) +{ + int c; + + asm volatile( + " l %0,%1\n" + : "=d" (c) : "R" (v->counter)); + return c; +} + +static inline void __atomic_set(atomic_t *v, int i) +{ + asm volatile( + " st %1,%0\n" + : "=R" (v->counter) : "d" (i)); +} + +static inline s64 __atomic64_read(const atomic64_t *v) +{ + s64 c; + + asm volatile( + " lg %0,%1\n" + : "=d" (c) : "RT" (v->counter)); + return c; +} + +static inline void __atomic64_set(atomic64_t *v, s64 i) +{ + asm volatile( + " stg %1,%0\n" + : "=RT" (v->counter) : "d" (i)); +} + +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + +#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \ +static inline op_type op_name(op_type val, op_type *ptr) \ +{ \ + op_type old; \ + \ + asm volatile( \ + op_string " %[old],%[val],%[ptr]\n" \ + op_barrier \ + : [old] "=d" (old), [ptr] "+QS" (*ptr) \ + : [val] "d" (val) : "cc", "memory"); \ + return old; \ +} \ + +#define __ATOMIC_OPS(op_name, op_type, op_string) \ + __ATOMIC_OP(op_name, op_type, op_string, "\n") \ + __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n") + +__ATOMIC_OPS(__atomic_add, int, "laa") +__ATOMIC_OPS(__atomic_and, int, "lan") +__ATOMIC_OPS(__atomic_or, int, "lao") +__ATOMIC_OPS(__atomic_xor, int, "lax") + +__ATOMIC_OPS(__atomic64_add, long, "laag") +__ATOMIC_OPS(__atomic64_and, long, "lang") +__ATOMIC_OPS(__atomic64_or, long, "laog") +__ATOMIC_OPS(__atomic64_xor, long, "laxg") + +#undef __ATOMIC_OPS +#undef __ATOMIC_OP + +#define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier) \ +static __always_inline void op_name(op_type val, op_type *ptr) \ +{ \ + asm volatile( \ + op_string " %[ptr],%[val]\n" \ + op_barrier \ + : [ptr] "+QS" (*ptr) : [val] "i" (val) : "cc", "memory");\ +} + +#define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \ + __ATOMIC_CONST_OP(op_name, op_type, op_string, "\n") \ + __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n") + +__ATOMIC_CONST_OPS(__atomic_add_const, int, "asi") +__ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi") + +#undef __ATOMIC_CONST_OPS +#undef __ATOMIC_CONST_OP + +#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +#define __ATOMIC_OP(op_name, op_string) \ +static inline int op_name(int val, int *ptr) \ +{ \ + int old, new; \ + \ + asm volatile( \ + "0: lr %[new],%[old]\n" \ + op_string " %[new],%[val]\n" \ + " cs %[old],%[new],%[ptr]\n" \ + " jl 0b" \ + : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\ + : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \ + return old; \ +} + +#define __ATOMIC_OPS(op_name, op_string) \ + __ATOMIC_OP(op_name, op_string) \ + __ATOMIC_OP(op_name##_barrier, op_string) + +__ATOMIC_OPS(__atomic_add, "ar") +__ATOMIC_OPS(__atomic_and, "nr") +__ATOMIC_OPS(__atomic_or, "or") +__ATOMIC_OPS(__atomic_xor, "xr") + +#undef __ATOMIC_OPS + +#define __ATOMIC64_OP(op_name, op_string) \ +static inline long op_name(long val, long *ptr) \ +{ \ + long old, new; \ + \ + asm volatile( \ + "0: lgr %[new],%[old]\n" \ + op_string " %[new],%[val]\n" \ + " csg %[old],%[new],%[ptr]\n" \ + " jl 0b" \ + : [old] "=d" (old), [new] "=&d" (new), [ptr] "+QS" (*ptr)\ + : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \ + return old; \ +} + +#define __ATOMIC64_OPS(op_name, op_string) \ + __ATOMIC64_OP(op_name, op_string) \ + __ATOMIC64_OP(op_name##_barrier, op_string) + +__ATOMIC64_OPS(__atomic64_add, "agr") +__ATOMIC64_OPS(__atomic64_and, "ngr") +__ATOMIC64_OPS(__atomic64_or, "ogr") +__ATOMIC64_OPS(__atomic64_xor, "xgr") + +#undef __ATOMIC64_OPS + +#define __atomic_add_const(val, ptr) __atomic_add(val, ptr) +#define __atomic_add_const_barrier(val, ptr) __atomic_add(val, ptr) +#define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr) +#define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr) + +#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +static inline int __atomic_cmpxchg(int *ptr, int old, int new) +{ + asm volatile( + " cs %[old],%[new],%[ptr]" + : [old] "+d" (old), [ptr] "+Q" (*ptr) + : [new] "d" (new) + : "cc", "memory"); + return old; +} + +static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new) +{ + int old_expected = old; + + asm volatile( + " cs %[old],%[new],%[ptr]" + : [old] "+d" (old), [ptr] "+Q" (*ptr) + : [new] "d" (new) + : "cc", "memory"); + return old == old_expected; +} + +static inline long __atomic64_cmpxchg(long *ptr, long old, long new) +{ + asm volatile( + " csg %[old],%[new],%[ptr]" + : [old] "+d" (old), [ptr] "+QS" (*ptr) + : [new] "d" (new) + : "cc", "memory"); + return old; +} + +static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new) +{ + long old_expected = old; + + asm volatile( + " csg %[old],%[new],%[ptr]" + : [old] "+d" (old), [ptr] "+QS" (*ptr) + : [new] "d" (new) + : "cc", "memory"); + return old == old_expected; +} + +#endif /* __ARCH_S390_ATOMIC_OPS__ */ diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h new file mode 100644 index 000000000..82de2a7c4 --- /dev/null +++ b/arch/s390/include/asm/barrier.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 1999, 2009 + * + * Author(s): Martin Schwidefsky + */ + +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +/* + * Force strict CPU ordering. + * And yes, this is required on UP too when we're talking + * to devices. + */ + +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES +/* Fast-BCR without checkpoint synchronization */ +#define __ASM_BCR_SERIALIZE "bcr 14,0\n" +#else +#define __ASM_BCR_SERIALIZE "bcr 15,0\n" +#endif + +static __always_inline void bcr_serialize(void) +{ + asm volatile(__ASM_BCR_SERIALIZE : : : "memory"); +} + +#define __mb() bcr_serialize() +#define __rmb() barrier() +#define __wmb() barrier() +#define __dma_rmb() __mb() +#define __dma_wmb() __mb() +#define __smp_mb() __mb() +#define __smp_rmb() __rmb() +#define __smp_wmb() __wmb() + +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + WRITE_ONCE(*p, v); \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() + +/** + * array_index_mask_nospec - generate a mask for array_idx() that is + * ~0UL when the bounds check succeeds and 0 otherwise + * @index: array element index + * @size: number of elements in array + */ +#define array_index_mask_nospec array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + unsigned long mask; + + if (__builtin_constant_p(size) && size > 0) { + asm(" clgr %2,%1\n" + " slbgr %0,%0\n" + :"=d" (mask) : "d" (size-1), "d" (index) :"cc"); + return mask; + } + asm(" clgr %1,%2\n" + " slbgr %0,%0\n" + :"=d" (mask) : "d" (size), "d" (index) :"cc"); + return ~mask; +} + +#include + +#endif /* __ASM_BARRIER_H */ diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h new file mode 100644 index 000000000..2de74fcd0 --- /dev/null +++ b/arch/s390/include/asm/bitops.h @@ -0,0 +1,378 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 1999,2013 + * + * Author(s): Martin Schwidefsky , + * + * The description below was taken in large parts from the powerpc + * bitops header file: + * Within a word, bits are numbered LSB first. Lot's of places make + * this assumption by directly testing bits with (val & (1< 1 word) bitmaps on a + * big-endian system because, unlike little endian, the number of each + * bit depends on the word size. + * + * The bitop functions are defined to work on unsigned longs, so the bits + * end up numbered: + * |63..............0|127............64|191...........128|255...........192| + * + * We also have special functions which work with an MSB0 encoding. + * The bits are numbered: + * |0..............63|64............127|128...........191|192...........255| + * + * The main difference is that bit 0-63 in the bit number field needs to be + * reversed compared to the LSB0 encoded bit fields. This can be achieved by + * XOR with 0x3f. + * + */ + +#ifndef _S390_BITOPS_H +#define _S390_BITOPS_H + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +#include +#include +#include +#include +#include + +#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) + +static inline unsigned long * +__bitops_word(unsigned long nr, const volatile unsigned long *ptr) +{ + unsigned long addr; + + addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3); + return (unsigned long *)addr; +} + +static inline unsigned long __bitops_mask(unsigned long nr) +{ + return 1UL << (nr & (BITS_PER_LONG - 1)); +} + +static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr) +{ + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); + + __atomic64_or(mask, (long *)addr); +} + +static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr) +{ + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); + + __atomic64_and(~mask, (long *)addr); +} + +static __always_inline void arch_change_bit(unsigned long nr, + volatile unsigned long *ptr) +{ + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); + + __atomic64_xor(mask, (long *)addr); +} + +static inline bool arch_test_and_set_bit(unsigned long nr, + volatile unsigned long *ptr) +{ + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); + unsigned long old; + + old = __atomic64_or_barrier(mask, (long *)addr); + return old & mask; +} + +static inline bool arch_test_and_clear_bit(unsigned long nr, + volatile unsigned long *ptr) +{ + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); + unsigned long old; + + old = __atomic64_and_barrier(~mask, (long *)addr); + return old & mask; +} + +static inline bool arch_test_and_change_bit(unsigned long nr, + volatile unsigned long *ptr) +{ + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); + unsigned long old; + + old = __atomic64_xor_barrier(mask, (long *)addr); + return old & mask; +} + +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long *p = __bitops_word(nr, addr); + unsigned long mask = __bitops_mask(nr); + + *p |= mask; +} + +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long *p = __bitops_word(nr, addr); + unsigned long mask = __bitops_mask(nr); + + *p &= ~mask; +} + +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long *p = __bitops_word(nr, addr); + unsigned long mask = __bitops_mask(nr); + + *p ^= mask; +} + +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long *p = __bitops_word(nr, addr); + unsigned long mask = __bitops_mask(nr); + unsigned long old; + + old = *p; + *p |= mask; + return old & mask; +} + +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long *p = __bitops_word(nr, addr); + unsigned long mask = __bitops_mask(nr); + unsigned long old; + + old = *p; + *p &= ~mask; + return old & mask; +} + +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long *p = __bitops_word(nr, addr); + unsigned long mask = __bitops_mask(nr); + unsigned long old; + + old = *p; + *p ^= mask; + return old & mask; +} + +#define arch_test_bit generic_test_bit +#define arch_test_bit_acquire generic_test_bit_acquire + +static inline bool arch_test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *ptr) +{ + if (arch_test_bit(nr, ptr)) + return true; + return arch_test_and_set_bit(nr, ptr); +} + +static inline void arch_clear_bit_unlock(unsigned long nr, + volatile unsigned long *ptr) +{ + smp_mb__before_atomic(); + arch_clear_bit(nr, ptr); +} + +static inline void arch___clear_bit_unlock(unsigned long nr, + volatile unsigned long *ptr) +{ + smp_mb(); + arch___clear_bit(nr, ptr); +} + +#include +#include +#include + +/* + * Functions which use MSB0 bit numbering. + * The bits are numbered: + * |0..............63|64............127|128...........191|192...........255| + */ +unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size); +unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size, + unsigned long offset); + +#define for_each_set_bit_inv(bit, addr, size) \ + for ((bit) = find_first_bit_inv((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit_inv((addr), (size), (bit) + 1)) + +static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr) +{ + return set_bit(nr ^ (BITS_PER_LONG - 1), ptr); +} + +static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr) +{ + return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); +} + +static inline bool test_and_clear_bit_inv(unsigned long nr, + volatile unsigned long *ptr) +{ + return test_and_clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); +} + +static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr) +{ + return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr); +} + +static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr) +{ + return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); +} + +static inline bool test_bit_inv(unsigned long nr, + const volatile unsigned long *ptr) +{ + return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); +} + +/** + * __flogr - find leftmost one + * @word - The word to search + * + * Returns the bit number of the most significant bit set, + * where the most significant bit has bit number 0. + * If no bit is set this function returns 64. + */ +static inline unsigned char __flogr(unsigned long word) +{ + if (__builtin_constant_p(word)) { + unsigned long bit = 0; + + if (!word) + return 64; + if (!(word & 0xffffffff00000000UL)) { + word <<= 32; + bit += 32; + } + if (!(word & 0xffff000000000000UL)) { + word <<= 16; + bit += 16; + } + if (!(word & 0xff00000000000000UL)) { + word <<= 8; + bit += 8; + } + if (!(word & 0xf000000000000000UL)) { + word <<= 4; + bit += 4; + } + if (!(word & 0xc000000000000000UL)) { + word <<= 2; + bit += 2; + } + if (!(word & 0x8000000000000000UL)) { + word <<= 1; + bit += 1; + } + return bit; + } else { + union register_pair rp; + + rp.even = word; + asm volatile( + " flogr %[rp],%[rp]\n" + : [rp] "+d" (rp.pair) : : "cc"); + return rp.even; + } +} + +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static inline unsigned long __ffs(unsigned long word) +{ + return __flogr(-word & word) ^ (BITS_PER_LONG - 1); +} + +/** + * ffs - find first bit set + * @word: the word to search + * + * This is defined the same way as the libc and + * compiler builtin ffs routines (man ffs). + */ +static inline int ffs(int word) +{ + unsigned long mask = 2 * BITS_PER_LONG - 1; + unsigned int val = (unsigned int)word; + + return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask; +} + +/** + * __fls - find last (most-significant) set bit in a long word + * @word: the word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +static inline unsigned long __fls(unsigned long word) +{ + return __flogr(word) ^ (BITS_PER_LONG - 1); +} + +/** + * fls64 - find last set bit in a 64-bit word + * @word: the word to search + * + * This is defined in a similar way as the libc and compiler builtin + * ffsll, but returns the position of the most significant set bit. + * + * fls64(value) returns 0 if value is 0 or the position of the last + * set bit if value is nonzero. The last (most significant) bit is + * at position 64. + */ +static inline int fls64(unsigned long word) +{ + unsigned long mask = 2 * BITS_PER_LONG - 1; + + return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask; +} + +/** + * fls - find last (most-significant) bit set + * @word: the word to search + * + * This is defined the same way as ffs. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ +static inline int fls(unsigned int word) +{ + return fls64(word); +} + +#include +#include +#include +#include +#include + +#endif /* _S390_BITOPS_H */ diff --git a/arch/s390/include/asm/boot_data.h b/arch/s390/include/asm/boot_data.h new file mode 100644 index 000000000..f7eed27b3 --- /dev/null +++ b/arch/s390/include/asm/boot_data.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_BOOT_DATA_H + +#include +#include + +extern char early_command_line[COMMAND_LINE_SIZE]; +extern struct ipl_parameter_block ipl_block; +extern int ipl_block_valid; +extern int ipl_secure_flag; + +extern unsigned long ipl_cert_list_addr; +extern unsigned long ipl_cert_list_size; + +extern unsigned long early_ipl_comp_list_addr; +extern unsigned long early_ipl_comp_list_size; + +#endif /* _ASM_S390_BOOT_DATA_H */ diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h new file mode 100644 index 000000000..aebe1e22c --- /dev/null +++ b/arch/s390/include/asm/bug.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_BUG_H +#define _ASM_S390_BUG_H + +#include + +#ifdef CONFIG_BUG + +#ifdef CONFIG_DEBUG_BUGVERBOSE + +#define __EMIT_BUG(x) do { \ + asm_inline volatile( \ + "0: mc 0,0\n" \ + ".section .rodata.str,\"aMS\",@progbits,1\n" \ + "1: .asciz \""__FILE__"\"\n" \ + ".previous\n" \ + ".section __bug_table,\"awM\",@progbits,%2\n" \ + "2: .long 0b-.\n" \ + " .long 1b-.\n" \ + " .short %0,%1\n" \ + " .org 2b+%2\n" \ + ".previous\n" \ + : : "i" (__LINE__), \ + "i" (x), \ + "i" (sizeof(struct bug_entry))); \ +} while (0) + +#else /* CONFIG_DEBUG_BUGVERBOSE */ + +#define __EMIT_BUG(x) do { \ + asm_inline volatile( \ + "0: mc 0,0\n" \ + ".section __bug_table,\"awM\",@progbits,%1\n" \ + "1: .long 0b-.\n" \ + " .short %0\n" \ + " .org 1b+%1\n" \ + ".previous\n" \ + : : "i" (x), \ + "i" (sizeof(struct bug_entry))); \ +} while (0) + +#endif /* CONFIG_DEBUG_BUGVERBOSE */ + +#define BUG() do { \ + __EMIT_BUG(0); \ + unreachable(); \ +} while (0) + +#define __WARN_FLAGS(flags) do { \ + __EMIT_BUG(BUGFLAG_WARNING|(flags)); \ +} while (0) + +#define WARN_ON(x) ({ \ + int __ret_warn_on = !!(x); \ + if (__builtin_constant_p(__ret_warn_on)) { \ + if (__ret_warn_on) \ + __WARN(); \ + } else { \ + if (unlikely(__ret_warn_on)) \ + __WARN(); \ + } \ + unlikely(__ret_warn_on); \ +}) + +#define HAVE_ARCH_BUG +#define HAVE_ARCH_WARN_ON +#endif /* CONFIG_BUG */ + +#include + +#endif /* _ASM_S390_BUG_H */ diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h new file mode 100644 index 000000000..00128174c --- /dev/null +++ b/arch/s390/include/asm/cache.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999 + * + * Derived from "include/asm-i386/cache.h" + * Copyright (C) 1992, Linus Torvalds + */ + +#ifndef __ARCH_S390_CACHE_H +#define __ARCH_S390_CACHE_H + +#define L1_CACHE_BYTES 256 +#define L1_CACHE_SHIFT 8 +#define NET_SKB_PAD 32 + +#define __read_mostly __section(".data..read_mostly") + +#endif diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h new file mode 100644 index 000000000..91d261751 --- /dev/null +++ b/arch/s390/include/asm/ccwdev.h @@ -0,0 +1,237 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2002, 2009 + * + * Author(s): Arnd Bergmann + * + * Interface for CCW device drivers + */ +#ifndef _S390_CCWDEV_H_ +#define _S390_CCWDEV_H_ + +#include +#include +#include +#include +#include +#include +#include + +/* structs from asm/cio.h */ +struct irb; +struct ccw1; +struct ccw_dev_id; + +/* simplified initializers for struct ccw_device: + * CCW_DEVICE and CCW_DEVICE_DEVTYPE initialize one + * entry in your MODULE_DEVICE_TABLE and set the match_flag correctly */ +#define CCW_DEVICE(cu, cum) \ + .cu_type=(cu), .cu_model=(cum), \ + .match_flags=(CCW_DEVICE_ID_MATCH_CU_TYPE \ + | (cum ? CCW_DEVICE_ID_MATCH_CU_MODEL : 0)) + +#define CCW_DEVICE_DEVTYPE(cu, cum, dev, devm) \ + .cu_type=(cu), .cu_model=(cum), .dev_type=(dev), .dev_model=(devm),\ + .match_flags=CCW_DEVICE_ID_MATCH_CU_TYPE \ + | ((cum) ? CCW_DEVICE_ID_MATCH_CU_MODEL : 0) \ + | CCW_DEVICE_ID_MATCH_DEVICE_TYPE \ + | ((devm) ? CCW_DEVICE_ID_MATCH_DEVICE_MODEL : 0) + +/* scan through an array of device ids and return the first + * entry that matches the device. + * + * the array must end with an entry containing zero match_flags + */ +static inline const struct ccw_device_id * +ccw_device_id_match(const struct ccw_device_id *array, + const struct ccw_device_id *match) +{ + const struct ccw_device_id *id = array; + + for (id = array; id->match_flags; id++) { + if ((id->match_flags & CCW_DEVICE_ID_MATCH_CU_TYPE) + && (id->cu_type != match->cu_type)) + continue; + + if ((id->match_flags & CCW_DEVICE_ID_MATCH_CU_MODEL) + && (id->cu_model != match->cu_model)) + continue; + + if ((id->match_flags & CCW_DEVICE_ID_MATCH_DEVICE_TYPE) + && (id->dev_type != match->dev_type)) + continue; + + if ((id->match_flags & CCW_DEVICE_ID_MATCH_DEVICE_MODEL) + && (id->dev_model != match->dev_model)) + continue; + + return id; + } + + return NULL; +} + +/** + * struct ccw_device - channel attached device + * @ccwlock: pointer to device lock + * @id: id of this device + * @drv: ccw driver for this device + * @dev: embedded device structure + * @online: online status of device + * @handler: interrupt handler + * + * @handler is a member of the device rather than the driver since a driver + * can have different interrupt handlers for different ccw devices + * (multi-subchannel drivers). + */ +struct ccw_device { + spinlock_t *ccwlock; +/* private: */ + struct ccw_device_private *private; /* cio private information */ + struct mutex reg_mutex; +/* public: */ + struct ccw_device_id id; + struct ccw_driver *drv; + struct device dev; + int online; + void (*handler) (struct ccw_device *, unsigned long, struct irb *); +}; + +/* + * Possible events used by the path_event notifier. + */ +#define PE_NONE 0x0 +#define PE_PATH_GONE 0x1 /* A path is no longer available. */ +#define PE_PATH_AVAILABLE 0x2 /* A path has become available and + was successfully verified. */ +#define PE_PATHGROUP_ESTABLISHED 0x4 /* A pathgroup was reset and had + to be established again. */ +#define PE_PATH_FCES_EVENT 0x8 /* The FCES Status of a path has + * changed. */ + +/* + * Possible CIO actions triggered by the unit check handler. + */ +enum uc_todo { + UC_TODO_RETRY, + UC_TODO_RETRY_ON_NEW_PATH, + UC_TODO_STOP +}; + +/** + * struct ccw_driver - device driver for channel attached devices + * @ids: ids supported by this driver + * @probe: function called on probe + * @remove: function called on remove + * @set_online: called when setting device online + * @set_offline: called when setting device offline + * @notify: notify driver of device state changes + * @path_event: notify driver of channel path events + * @shutdown: called at device shutdown + * @uc_handler: callback for unit check handler + * @driver: embedded device driver structure + * @int_class: interruption class to use for accounting interrupts + */ +struct ccw_driver { + struct ccw_device_id *ids; + int (*probe) (struct ccw_device *); + void (*remove) (struct ccw_device *); + int (*set_online) (struct ccw_device *); + int (*set_offline) (struct ccw_device *); + int (*notify) (struct ccw_device *, int); + void (*path_event) (struct ccw_device *, int *); + void (*shutdown) (struct ccw_device *); + enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *); + struct device_driver driver; + enum interruption_class int_class; +}; + +extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, + const char *bus_id); + +/* devices drivers call these during module load and unload. + * When a driver is registered, its probe method is called + * when new devices for its type pop up */ +extern int ccw_driver_register (struct ccw_driver *driver); +extern void ccw_driver_unregister (struct ccw_driver *driver); +extern int ccw_device_set_options_mask(struct ccw_device *, unsigned long); +extern int ccw_device_set_options(struct ccw_device *, unsigned long); +extern void ccw_device_clear_options(struct ccw_device *, unsigned long); +int ccw_device_is_pathgroup(struct ccw_device *cdev); +int ccw_device_is_multipath(struct ccw_device *cdev); + +/* Allow for i/o completion notification after primary interrupt status. */ +#define CCWDEV_EARLY_NOTIFICATION 0x0001 +/* Report all interrupt conditions. */ +#define CCWDEV_REPORT_ALL 0x0002 +/* Try to perform path grouping. */ +#define CCWDEV_DO_PATHGROUP 0x0004 +/* Allow forced onlining of boxed devices. */ +#define CCWDEV_ALLOW_FORCE 0x0008 +/* Try to use multipath mode. */ +#define CCWDEV_DO_MULTIPATH 0x0010 + +extern int ccw_device_start(struct ccw_device *, struct ccw1 *, + unsigned long, __u8, unsigned long); +extern int ccw_device_start_timeout(struct ccw_device *, struct ccw1 *, + unsigned long, __u8, unsigned long, int); +extern int ccw_device_start_key(struct ccw_device *, struct ccw1 *, + unsigned long, __u8, __u8, unsigned long); +extern int ccw_device_start_timeout_key(struct ccw_device *, struct ccw1 *, + unsigned long, __u8, __u8, + unsigned long, int); + + +extern int ccw_device_resume(struct ccw_device *); +extern int ccw_device_halt(struct ccw_device *, unsigned long); +extern int ccw_device_clear(struct ccw_device *, unsigned long); +int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, u8 key); +int ccw_device_tm_start_key(struct ccw_device *, struct tcw *, + unsigned long, u8, u8); +int ccw_device_tm_start_timeout_key(struct ccw_device *, struct tcw *, + unsigned long, u8, u8, int); +int ccw_device_tm_start(struct ccw_device *, struct tcw *, + unsigned long, u8); +int ccw_device_tm_start_timeout(struct ccw_device *, struct tcw *, + unsigned long, u8, int); +int ccw_device_tm_intrg(struct ccw_device *cdev); + +int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask); + +extern int ccw_device_set_online(struct ccw_device *cdev); +extern int ccw_device_set_offline(struct ccw_device *cdev); + + +extern struct ciw *ccw_device_get_ciw(struct ccw_device *, __u32 cmd); +extern __u8 ccw_device_get_path_mask(struct ccw_device *); +extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *); + +#define get_ccwdev_lock(x) (x)->ccwlock + +#define to_ccwdev(n) container_of(n, struct ccw_device, dev) +#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) + +extern struct ccw_device *ccw_device_create_console(struct ccw_driver *); +extern void ccw_device_destroy_console(struct ccw_device *); +extern int ccw_device_enable_console(struct ccw_device *); +extern void ccw_device_wait_idle(struct ccw_device *); + +extern void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size); +extern void ccw_device_dma_free(struct ccw_device *cdev, + void *cpu_addr, size_t size); + +int ccw_device_siosl(struct ccw_device *); + +extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *); + +struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *, int); +u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx); +int ccw_device_pnso(struct ccw_device *cdev, + struct chsc_pnso_area *pnso_area, u8 oc, + struct chsc_pnso_resume_token resume_token, int cnc); +int ccw_device_get_cssid(struct ccw_device *cdev, u8 *cssid); +int ccw_device_get_iid(struct ccw_device *cdev, u8 *iid); +int ccw_device_get_chpid(struct ccw_device *cdev, int chp_idx, u8 *chpid); +int ccw_device_get_chid(struct ccw_device *cdev, int chp_idx, u16 *chid); +#endif /* _S390_CCWDEV_H_ */ diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h new file mode 100644 index 000000000..11d2fb3de --- /dev/null +++ b/arch/s390/include/asm/ccwgroup.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef S390_CCWGROUP_H +#define S390_CCWGROUP_H + +struct ccw_device; +struct ccw_driver; + +/** + * struct ccwgroup_device - ccw group device + * @state: online/offline state + * @count: number of attached slave devices + * @dev: embedded device structure + * @cdev: variable number of slave devices, allocated as needed + * @ungroup_work: used to ungroup the ccwgroup device + */ +struct ccwgroup_device { + enum { + CCWGROUP_OFFLINE, + CCWGROUP_ONLINE, + } state; +/* private: */ + atomic_t onoff; + struct mutex reg_mutex; +/* public: */ + unsigned int count; + struct device dev; + struct work_struct ungroup_work; + struct ccw_device *cdev[]; +}; + +/** + * struct ccwgroup_driver - driver for ccw group devices + * @setup: function called during device creation to setup the device + * @remove: function called on remove + * @set_online: function called when device is set online + * @set_offline: function called when device is set offline + * @shutdown: function called when device is shut down + * @driver: embedded driver structure + * @ccw_driver: supported ccw_driver (optional) + */ +struct ccwgroup_driver { + int (*setup) (struct ccwgroup_device *); + void (*remove) (struct ccwgroup_device *); + int (*set_online) (struct ccwgroup_device *); + int (*set_offline) (struct ccwgroup_device *); + void (*shutdown)(struct ccwgroup_device *); + + struct device_driver driver; + struct ccw_driver *ccw_driver; +}; + +extern int ccwgroup_driver_register (struct ccwgroup_driver *cdriver); +extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver); +int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv, + int num_devices, const char *buf); + +extern int ccwgroup_set_online(struct ccwgroup_device *gdev); +int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv); + +extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev); +extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev); + +#define to_ccwgroupdev(x) container_of((x), struct ccwgroup_device, dev) +#define to_ccwgroupdrv(x) container_of((x), struct ccwgroup_driver, driver) + +#if IS_ENABLED(CONFIG_CCWGROUP) +bool dev_is_ccwgroup(struct device *dev); +#else /* CONFIG_CCWGROUP */ +static inline bool dev_is_ccwgroup(struct device *dev) +{ + return false; +} +#endif /* CONFIG_CCWGROUP */ + +#endif diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h new file mode 100644 index 000000000..69837eec2 --- /dev/null +++ b/arch/s390/include/asm/checksum.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 fast network checksum routines + * + * S390 version + * Copyright IBM Corp. 1999 + * Author(s): Ulrich Hild (first version) + * Martin Schwidefsky (heavily optimized CKSM version) + * D.J. Barrow (third attempt) + */ + +#ifndef _S390_CHECKSUM_H +#define _S390_CHECKSUM_H + +#include +#include + +/* + * Computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit). + * + * Returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic. + * + * This function must be called with even lengths, except + * for the last fragment, which may be odd. + * + * It's best to have buff aligned on a 32-bit boundary. + */ +static inline __wsum csum_partial(const void *buff, int len, __wsum sum) +{ + union register_pair rp = { + .even = (unsigned long) buff, + .odd = (unsigned long) len, + }; + + kasan_check_read(buff, len); + asm volatile( + "0: cksm %[sum],%[rp]\n" + " jo 0b\n" + : [sum] "+&d" (sum), [rp] "+&d" (rp.pair) : : "cc", "memory"); + return sum; +} + +/* + * Fold a partial checksum without adding pseudo headers. + */ +static inline __sum16 csum_fold(__wsum sum) +{ + u32 csum = (__force u32) sum; + + csum += (csum >> 16) | (csum << 16); + csum >>= 16; + return (__force __sum16) ~csum; +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksums on 4 octet boundaries. + */ +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + __u64 csum = 0; + __u32 *ptr = (u32 *)iph; + + csum += *ptr++; + csum += *ptr++; + csum += *ptr++; + csum += *ptr++; + ihl -= 4; + while (ihl--) + csum += *ptr++; + csum += (csum >> 32) | (csum << 32); + return csum_fold((__force __wsum)(csum >> 32)); +} + +/* + * Computes the checksum of the TCP/UDP pseudo-header. + * Returns a 32-bit checksum. + */ +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) +{ + __u64 csum = (__force __u64)sum; + + csum += (__force __u32)saddr; + csum += (__force __u32)daddr; + csum += len; + csum += proto; + csum += (csum >> 32) | (csum << 32); + return (__force __wsum)(csum >> 32); +} + +/* + * Computes the checksum of the TCP/UDP pseudo-header. + * Returns a 16-bit checksum, already complemented. + */ +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); +} + +/* + * Used for miscellaneous IP-like checksums, mainly icmp. + */ +static inline __sum16 ip_compute_csum(const void *buff, int len) +{ + return csum_fold(csum_partial(buff, len, 0)); +} + +#define _HAVE_ARCH_IPV6_CSUM +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum csum) +{ + __u64 sum = (__force __u64)csum; + + sum += (__force __u32)saddr->s6_addr32[0]; + sum += (__force __u32)saddr->s6_addr32[1]; + sum += (__force __u32)saddr->s6_addr32[2]; + sum += (__force __u32)saddr->s6_addr32[3]; + sum += (__force __u32)daddr->s6_addr32[0]; + sum += (__force __u32)daddr->s6_addr32[1]; + sum += (__force __u32)daddr->s6_addr32[2]; + sum += (__force __u32)daddr->s6_addr32[3]; + sum += len; + sum += proto; + sum += (sum >> 32) | (sum << 32); + return csum_fold((__force __wsum)(sum >> 32)); +} + +#endif /* _S390_CHECKSUM_H */ diff --git a/arch/s390/include/asm/chpid.h b/arch/s390/include/asm/chpid.h new file mode 100644 index 000000000..20e0d22f2 --- /dev/null +++ b/arch/s390/include/asm/chpid.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2007, 2012 + * Author(s): Peter Oberparleiter + */ +#ifndef _ASM_S390_CHPID_H +#define _ASM_S390_CHPID_H + +#include +#include + +struct channel_path_desc_fmt0 { + u8 flags; + u8 lsn; + u8 desc; + u8 chpid; + u8 swla; + u8 zeroes; + u8 chla; + u8 chpp; +} __packed; + +static inline void chp_id_init(struct chp_id *chpid) +{ + memset(chpid, 0, sizeof(struct chp_id)); +} + +static inline int chp_id_is_equal(struct chp_id *a, struct chp_id *b) +{ + return (a->id == b->id) && (a->cssid == b->cssid); +} + +static inline void chp_id_next(struct chp_id *chpid) +{ + if (chpid->id < __MAX_CHPID) + chpid->id++; + else { + chpid->id = 0; + chpid->cssid++; + } +} + +static inline int chp_id_is_valid(struct chp_id *chpid) +{ + return (chpid->cssid <= __MAX_CSSID); +} + + +#define chp_id_for_each(c) \ + for (chp_id_init(c); chp_id_is_valid(c); chp_id_next(c)) +#endif /* _ASM_S390_CHPID_H */ diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h new file mode 100644 index 000000000..bb48ea380 --- /dev/null +++ b/arch/s390/include/asm/chsc.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2020 + * + * Author(s): Alexandra Winter + * + * Interface for Channel Subsystem Call + */ +#ifndef _ASM_S390_CHSC_H +#define _ASM_S390_CHSC_H + +#include + +/** + * Operation codes for CHSC PNSO: + * PNSO_OC_NET_BRIDGE_INFO - only addresses that are visible to a bridgeport + * PNSO_OC_NET_ADDR_INFO - all addresses + */ +#define PNSO_OC_NET_BRIDGE_INFO 0 +#define PNSO_OC_NET_ADDR_INFO 3 +/** + * struct chsc_pnso_naid_l2 - network address information descriptor + * @nit: Network interface token + * @addr_lnid: network address and logical network id (VLAN ID) + */ +struct chsc_pnso_naid_l2 { + u64 nit; + struct { u8 mac[6]; u16 lnid; } addr_lnid; +} __packed; + +struct chsc_pnso_resume_token { + u64 t1; + u64 t2; +} __packed; + +struct chsc_pnso_naihdr { + struct chsc_pnso_resume_token resume_token; + u32:32; + u32 instance; + u32:24; + u8 naids; + u32 reserved[3]; +} __packed; + +struct chsc_pnso_area { + struct chsc_header request; + u8:2; + u8 m:1; + u8:5; + u8:2; + u8 ssid:2; + u8 fmt:4; + u16 sch; + u8:8; + u8 cssid; + u16:16; + u8 oc; + u32:24; + struct chsc_pnso_resume_token resume_token; + u32 n:1; + u32:31; + u32 reserved[3]; + struct chsc_header response; + u32:32; + struct chsc_pnso_naihdr naihdr; + struct chsc_pnso_naid_l2 entries[]; +} __packed __aligned(PAGE_SIZE); + +#endif /* _ASM_S390_CHSC_H */ diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h new file mode 100644 index 000000000..1c4f585dd --- /dev/null +++ b/arch/s390/include/asm/cio.h @@ -0,0 +1,378 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common interface for I/O on S/390 + */ +#ifndef _ASM_S390_CIO_H_ +#define _ASM_S390_CIO_H_ + +#include +#include +#include +#include + +#define LPM_ANYPATH 0xff +#define __MAX_CSSID 0 +#define __MAX_SUBCHANNEL 65535 +#define __MAX_SSID 3 + +#include + +/** + * struct ccw1 - channel command word + * @cmd_code: command code + * @flags: flags, like IDA addressing, etc. + * @count: byte count + * @cda: data address + * + * The ccw is the basic structure to build channel programs that perform + * operations with the device or the control unit. Only Format-1 channel + * command words are supported. + */ +struct ccw1 { + __u8 cmd_code; + __u8 flags; + __u16 count; + __u32 cda; +} __attribute__ ((packed,aligned(8))); + +/** + * struct ccw0 - channel command word + * @cmd_code: command code + * @cda: data address + * @flags: flags, like IDA addressing, etc. + * @reserved: will be ignored + * @count: byte count + * + * The format-0 ccw structure. + */ +struct ccw0 { + __u8 cmd_code; + __u32 cda : 24; + __u8 flags; + __u8 reserved; + __u16 count; +} __packed __aligned(8); + +#define CCW_FLAG_DC 0x80 +#define CCW_FLAG_CC 0x40 +#define CCW_FLAG_SLI 0x20 +#define CCW_FLAG_SKIP 0x10 +#define CCW_FLAG_PCI 0x08 +#define CCW_FLAG_IDA 0x04 +#define CCW_FLAG_SUSPEND 0x02 + +#define CCW_CMD_READ_IPL 0x02 +#define CCW_CMD_NOOP 0x03 +#define CCW_CMD_BASIC_SENSE 0x04 +#define CCW_CMD_TIC 0x08 +#define CCW_CMD_STLCK 0x14 +#define CCW_CMD_SENSE_PGID 0x34 +#define CCW_CMD_SUSPEND_RECONN 0x5B +#define CCW_CMD_RDC 0x64 +#define CCW_CMD_RELEASE 0x94 +#define CCW_CMD_SET_PGID 0xAF +#define CCW_CMD_SENSE_ID 0xE4 +#define CCW_CMD_DCTL 0xF3 + +#define SENSE_MAX_COUNT 0x20 + +/** + * struct erw - extended report word + * @res0: reserved + * @auth: authorization check + * @pvrf: path-verification-required flag + * @cpt: channel-path timeout + * @fsavf: failing storage address validity flag + * @cons: concurrent sense + * @scavf: secondary ccw address validity flag + * @fsaf: failing storage address format + * @scnt: sense count, if @cons == %1 + * @res16: reserved + */ +struct erw { + __u32 res0 : 3; + __u32 auth : 1; + __u32 pvrf : 1; + __u32 cpt : 1; + __u32 fsavf : 1; + __u32 cons : 1; + __u32 scavf : 1; + __u32 fsaf : 1; + __u32 scnt : 6; + __u32 res16 : 16; +} __attribute__ ((packed)); + +/** + * struct erw_eadm - EADM Subchannel extended report word + * @b: aob error + * @r: arsb error + */ +struct erw_eadm { + __u32 : 16; + __u32 b : 1; + __u32 r : 1; + __u32 : 14; +} __packed; + +/** + * struct sublog - subchannel logout area + * @res0: reserved + * @esf: extended status flags + * @lpum: last path used mask + * @arep: ancillary report + * @fvf: field-validity flags + * @sacc: storage access code + * @termc: termination code + * @devsc: device-status check + * @serr: secondary error + * @ioerr: i/o-error alert + * @seqc: sequence code + */ +struct sublog { + __u32 res0 : 1; + __u32 esf : 7; + __u32 lpum : 8; + __u32 arep : 1; + __u32 fvf : 5; + __u32 sacc : 2; + __u32 termc : 2; + __u32 devsc : 1; + __u32 serr : 1; + __u32 ioerr : 1; + __u32 seqc : 3; +} __attribute__ ((packed)); + +/** + * struct esw0 - Format 0 Extended Status Word (ESW) + * @sublog: subchannel logout + * @erw: extended report word + * @faddr: failing storage address + * @saddr: secondary ccw address + */ +struct esw0 { + struct sublog sublog; + struct erw erw; + __u32 faddr[2]; + __u32 saddr; +} __attribute__ ((packed)); + +/** + * struct esw1 - Format 1 Extended Status Word (ESW) + * @zero0: reserved zeros + * @lpum: last path used mask + * @zero16: reserved zeros + * @erw: extended report word + * @zeros: three fullwords of zeros + */ +struct esw1 { + __u8 zero0; + __u8 lpum; + __u16 zero16; + struct erw erw; + __u32 zeros[3]; +} __attribute__ ((packed)); + +/** + * struct esw2 - Format 2 Extended Status Word (ESW) + * @zero0: reserved zeros + * @lpum: last path used mask + * @dcti: device-connect-time interval + * @erw: extended report word + * @zeros: three fullwords of zeros + */ +struct esw2 { + __u8 zero0; + __u8 lpum; + __u16 dcti; + struct erw erw; + __u32 zeros[3]; +} __attribute__ ((packed)); + +/** + * struct esw3 - Format 3 Extended Status Word (ESW) + * @zero0: reserved zeros + * @lpum: last path used mask + * @res: reserved + * @erw: extended report word + * @zeros: three fullwords of zeros + */ +struct esw3 { + __u8 zero0; + __u8 lpum; + __u16 res; + struct erw erw; + __u32 zeros[3]; +} __attribute__ ((packed)); + +/** + * struct esw_eadm - EADM Subchannel Extended Status Word (ESW) + * @sublog: subchannel logout + * @erw: extended report word + */ +struct esw_eadm { + __u32 sublog; + struct erw_eadm erw; + __u32 : 32; + __u32 : 32; + __u32 : 32; +} __packed; + +/** + * struct irb - interruption response block + * @scsw: subchannel status word + * @esw: extended status word + * @ecw: extended control word + * + * The irb that is handed to the device driver when an interrupt occurs. For + * solicited interrupts, the common I/O layer already performs checks whether + * a field is valid; a field not being valid is always passed as %0. + * If a unit check occurred, @ecw may contain sense data; this is retrieved + * by the common I/O layer itself if the device doesn't support concurrent + * sense (so that the device driver never needs to perform basic sense itself). + * For unsolicited interrupts, the irb is passed as-is (expect for sense data, + * if applicable). + */ +struct irb { + union scsw scsw; + union { + struct esw0 esw0; + struct esw1 esw1; + struct esw2 esw2; + struct esw3 esw3; + struct esw_eadm eadm; + } esw; + __u8 ecw[32]; +} __attribute__ ((packed,aligned(4))); + +/** + * struct ciw - command information word (CIW) layout + * @et: entry type + * @reserved: reserved bits + * @ct: command type + * @cmd: command code + * @count: command count + */ +struct ciw { + __u32 et : 2; + __u32 reserved : 2; + __u32 ct : 4; + __u32 cmd : 8; + __u32 count : 16; +} __attribute__ ((packed)); + +#define CIW_TYPE_RCD 0x0 /* read configuration data */ +#define CIW_TYPE_SII 0x1 /* set interface identifier */ +#define CIW_TYPE_RNI 0x2 /* read node identifier */ + +/* + * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands" + */ + +#define ND_VALIDITY_VALID 0 +#define ND_VALIDITY_OUTDATED 1 +#define ND_VALIDITY_INVALID 2 + +struct node_descriptor { + /* Flags. */ + union { + struct { + u32 validity:3; + u32 reserved:5; + } __packed; + u8 byte0; + } __packed; + + /* Node parameters. */ + u32 params:24; + + /* Node ID. */ + char type[6]; + char model[3]; + char manufacturer[3]; + char plant[2]; + char seq[12]; + u16 tag; +} __packed; + +/* + * Flags used as input parameters for do_IO() + */ +#define DOIO_ALLOW_SUSPEND 0x0001 /* allow for channel prog. suspend */ +#define DOIO_DENY_PREFETCH 0x0002 /* don't allow for CCW prefetch */ +#define DOIO_SUPPRESS_INTER 0x0004 /* suppress intermediate inter. */ + /* ... for suspended CCWs */ +/* Device or subchannel gone. */ +#define CIO_GONE 0x0001 +/* No path to device. */ +#define CIO_NO_PATH 0x0002 +/* Device has appeared. */ +#define CIO_OPER 0x0004 +/* Sick revalidation of device. */ +#define CIO_REVALIDATE 0x0008 +/* Device did not respond in time. */ +#define CIO_BOXED 0x0010 + +/** + * struct ccw_dev_id - unique identifier for ccw devices + * @ssid: subchannel set id + * @devno: device number + * + * This structure is not directly based on any hardware structure. The + * hardware identifies a device by its device number and its subchannel, + * which is in turn identified by its id. In order to get a unique identifier + * for ccw devices across subchannel sets, @struct ccw_dev_id has been + * introduced. + */ +struct ccw_dev_id { + u8 ssid; + u16 devno; +}; + +/** + * ccw_dev_id_is_equal() - compare two ccw_dev_ids + * @dev_id1: a ccw_dev_id + * @dev_id2: another ccw_dev_id + * Returns: + * %1 if the two structures are equal field-by-field, + * %0 if not. + * Context: + * any + */ +static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1, + struct ccw_dev_id *dev_id2) +{ + if ((dev_id1->ssid == dev_id2->ssid) && + (dev_id1->devno == dev_id2->devno)) + return 1; + return 0; +} + +/** + * pathmask_to_pos() - find the position of the left-most bit in a pathmask + * @mask: pathmask with at least one bit set + */ +static inline u8 pathmask_to_pos(u8 mask) +{ + return 8 - ffs(mask); +} + +extern void css_schedule_reprobe(void); + +extern void *cio_dma_zalloc(size_t size); +extern void cio_dma_free(void *cpu_addr, size_t size); +extern struct device *cio_get_dma_css_dev(void); + +void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, + size_t size); +void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size); +void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev); +struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages); + +/* Function from drivers/s390/cio/chsc.c */ +int chsc_sstpc(void *page, unsigned int op, u16 ctrl, long *clock_delta); +int chsc_sstpi(void *page, void *result, size_t size); +int chsc_stzi(void *page, void *result, size_t size); +int chsc_sgib(u32 origin); +int chsc_scud(u16 cu, u64 *esm, u8 *esm_valid); + +#endif diff --git a/arch/s390/include/asm/clocksource.h b/arch/s390/include/asm/clocksource.h new file mode 100644 index 000000000..03434369f --- /dev/null +++ b/arch/s390/include/asm/clocksource.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* s390-specific clocksource additions */ + +#ifndef _ASM_S390_CLOCKSOURCE_H +#define _ASM_S390_CLOCKSOURCE_H + +#endif /* _ASM_S390_CLOCKSOURCE_H */ diff --git a/arch/s390/include/asm/clp.h b/arch/s390/include/asm/clp.h new file mode 100644 index 000000000..10919eeb7 --- /dev/null +++ b/arch/s390/include/asm/clp.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_CLP_H +#define _ASM_S390_CLP_H + +/* CLP common request & response block size */ +#define CLP_BLK_SIZE PAGE_SIZE + +/* Call Logical Processor - Command Code */ +#define CLP_SLPC 0x0001 + +#define CLP_LPS_BASE 0 +#define CLP_LPS_PCI 2 + +struct clp_req_hdr { + u16 len; + u16 cmd; + u32 fmt : 4; + u32 reserved1 : 28; + u64 reserved2; +} __packed; + +struct clp_rsp_hdr { + u16 len; + u16 rsp; + u32 fmt : 4; + u32 reserved1 : 28; + u64 reserved2; +} __packed; + +/* CLP Response Codes */ +#define CLP_RC_OK 0x0010 /* Command request successfully */ +#define CLP_RC_CMD 0x0020 /* Command code not recognized */ +#define CLP_RC_PERM 0x0030 /* Command not authorized */ +#define CLP_RC_FMT 0x0040 /* Invalid command request format */ +#define CLP_RC_LEN 0x0050 /* Invalid command request length */ +#define CLP_RC_8K 0x0060 /* Command requires 8K LPCB */ +#define CLP_RC_RESNOT0 0x0070 /* Reserved field not zero */ +#define CLP_RC_NODATA 0x0080 /* No data available */ +#define CLP_RC_FC_UNKNOWN 0x0100 /* Function code not recognized */ + +/* Store logical-processor characteristics request */ +struct clp_req_slpc { + struct clp_req_hdr hdr; +} __packed; + +struct clp_rsp_slpc { + struct clp_rsp_hdr hdr; + u32 reserved2[4]; + u32 lpif[8]; + u32 reserved3[8]; + u32 lpic[8]; +} __packed; + +struct clp_req_rsp_slpc { + struct clp_req_slpc request; + struct clp_rsp_slpc response; +} __packed; + +#endif diff --git a/arch/s390/include/asm/cmb.h b/arch/s390/include/asm/cmb.h new file mode 100644 index 000000000..599594c37 --- /dev/null +++ b/arch/s390/include/asm/cmb.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef S390_CMB_H +#define S390_CMB_H + +#include + +struct ccw_device; +extern int enable_cmf(struct ccw_device *cdev); +extern int disable_cmf(struct ccw_device *cdev); +extern int __disable_cmf(struct ccw_device *cdev); +extern u64 cmf_read(struct ccw_device *cdev, int index); +extern int cmf_readall(struct ccw_device *cdev, struct cmbdata *data); + +#endif /* S390_CMB_H */ diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h new file mode 100644 index 000000000..aae031537 --- /dev/null +++ b/arch/s390/include/asm/cmpxchg.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 1999, 2011 + * + * Author(s): Martin Schwidefsky , + */ + +#ifndef __ASM_CMPXCHG_H +#define __ASM_CMPXCHG_H + +#include +#include +#include + +void __xchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +__arch_xchg(unsigned long x, unsigned long address, int size) +{ + unsigned long old; + int shift; + + switch (size) { + case 1: + shift = (3 ^ (address & 3)) << 3; + address ^= address & 3; + asm volatile( + " l %0,%1\n" + "0: lr 0,%0\n" + " nr 0,%3\n" + " or 0,%2\n" + " cs %0,0,%1\n" + " jl 0b\n" + : "=&d" (old), "+Q" (*(int *) address) + : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)) + : "memory", "cc", "0"); + return old >> shift; + case 2: + shift = (2 ^ (address & 2)) << 3; + address ^= address & 2; + asm volatile( + " l %0,%1\n" + "0: lr 0,%0\n" + " nr 0,%3\n" + " or 0,%2\n" + " cs %0,0,%1\n" + " jl 0b\n" + : "=&d" (old), "+Q" (*(int *) address) + : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)) + : "memory", "cc", "0"); + return old >> shift; + case 4: + asm volatile( + " l %0,%1\n" + "0: cs %0,%2,%1\n" + " jl 0b\n" + : "=&d" (old), "+Q" (*(int *) address) + : "d" (x) + : "memory", "cc"); + return old; + case 8: + asm volatile( + " lg %0,%1\n" + "0: csg %0,%2,%1\n" + " jl 0b\n" + : "=&d" (old), "+QS" (*(long *) address) + : "d" (x) + : "memory", "cc"); + return old; + } + __xchg_called_with_bad_pointer(); + return x; +} + +#define arch_xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) __ret; \ + \ + __ret = (__typeof__(*(ptr))) \ + __arch_xchg((unsigned long)(x), (unsigned long)(ptr), \ + sizeof(*(ptr))); \ + __ret; \ +}) + +void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline unsigned long __cmpxchg(unsigned long address, + unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 1: { + unsigned int prev, shift, mask; + + shift = (3 ^ (address & 3)) << 3; + address ^= address & 3; + old = (old & 0xff) << shift; + new = (new & 0xff) << shift; + mask = ~(0xff << shift); + asm volatile( + " l %[prev],%[address]\n" + " nr %[prev],%[mask]\n" + " xilf %[mask],0xffffffff\n" + " or %[new],%[prev]\n" + " or %[prev],%[tmp]\n" + "0: lr %[tmp],%[prev]\n" + " cs %[prev],%[new],%[address]\n" + " jnl 1f\n" + " xr %[tmp],%[prev]\n" + " xr %[new],%[tmp]\n" + " nr %[tmp],%[mask]\n" + " jz 0b\n" + "1:" + : [prev] "=&d" (prev), + [address] "+Q" (*(int *)address), + [tmp] "+&d" (old), + [new] "+&d" (new), + [mask] "+&d" (mask) + :: "memory", "cc"); + return prev >> shift; + } + case 2: { + unsigned int prev, shift, mask; + + shift = (2 ^ (address & 2)) << 3; + address ^= address & 2; + old = (old & 0xffff) << shift; + new = (new & 0xffff) << shift; + mask = ~(0xffff << shift); + asm volatile( + " l %[prev],%[address]\n" + " nr %[prev],%[mask]\n" + " xilf %[mask],0xffffffff\n" + " or %[new],%[prev]\n" + " or %[prev],%[tmp]\n" + "0: lr %[tmp],%[prev]\n" + " cs %[prev],%[new],%[address]\n" + " jnl 1f\n" + " xr %[tmp],%[prev]\n" + " xr %[new],%[tmp]\n" + " nr %[tmp],%[mask]\n" + " jz 0b\n" + "1:" + : [prev] "=&d" (prev), + [address] "+Q" (*(int *)address), + [tmp] "+&d" (old), + [new] "+&d" (new), + [mask] "+&d" (mask) + :: "memory", "cc"); + return prev >> shift; + } + case 4: { + unsigned int prev = old; + + asm volatile( + " cs %[prev],%[new],%[address]\n" + : [prev] "+&d" (prev), + [address] "+Q" (*(int *)address) + : [new] "d" (new) + : "memory", "cc"); + return prev; + } + case 8: { + unsigned long prev = old; + + asm volatile( + " csg %[prev],%[new],%[address]\n" + : [prev] "+&d" (prev), + [address] "+QS" (*(long *)address) + : [new] "d" (new) + : "memory", "cc"); + return prev; + } + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define arch_cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + \ + __ret = (__typeof__(*(ptr))) \ + __cmpxchg((unsigned long)(ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr))); \ + __ret; \ +}) + +#define arch_cmpxchg64 arch_cmpxchg +#define arch_cmpxchg_local arch_cmpxchg +#define arch_cmpxchg64_local arch_cmpxchg + +#define system_has_cmpxchg128() 1 + +static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new) +{ + asm volatile( + " cdsg %[old],%[new],%[ptr]\n" + : [old] "+d" (old), [ptr] "+QS" (*ptr) + : [new] "d" (new) + : "memory", "cc"); + return old; +} + +#define arch_cmpxchg128 arch_cmpxchg128 + +#endif /* __ASM_CMPXCHG_H */ diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h new file mode 100644 index 000000000..3cb9d813f --- /dev/null +++ b/arch/s390/include/asm/compat.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390X_COMPAT_H +#define _ASM_S390X_COMPAT_H +/* + * Architecture specific compatibility types + */ +#include +#include +#include +#include +#include + +#define compat_mode_t compat_mode_t +typedef u16 compat_mode_t; + +#define __compat_uid_t __compat_uid_t +typedef u16 __compat_uid_t; +typedef u16 __compat_gid_t; + +#define compat_dev_t compat_dev_t +typedef u16 compat_dev_t; + +#define compat_ipc_pid_t compat_ipc_pid_t +typedef u16 compat_ipc_pid_t; + +#define compat_statfs compat_statfs + +#include + +#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p( \ + typeof(0?(__force t)0:0ULL), u64)) + +#define __SC_DELOUSE(t,v) ({ \ + BUILD_BUG_ON(sizeof(t) > 4 && !__TYPE_IS_PTR(t)); \ + (__force t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \ +}) + +#define PSW32_MASK_USER 0x0000FF00UL + +#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \ + PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \ + PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \ + PSW32_ASC_PRIMARY) + +#define COMPAT_UTS_MACHINE "s390\0\0\0\0" + +typedef u16 compat_nlink_t; + +typedef struct { + u32 mask; + u32 addr; +} __aligned(8) psw_compat_t; + +typedef struct { + psw_compat_t psw; + u32 gprs[NUM_GPRS]; + u32 acrs[NUM_ACRS]; + u32 orig_gpr2; +} s390_compat_regs; + +typedef struct { + u32 gprs_high[NUM_GPRS]; +} s390_compat_regs_high; + +struct compat_stat { + compat_dev_t st_dev; + u16 __pad1; + compat_ino_t st_ino; + compat_mode_t st_mode; + compat_nlink_t st_nlink; + __compat_uid_t st_uid; + __compat_gid_t st_gid; + compat_dev_t st_rdev; + u16 __pad2; + u32 st_size; + u32 st_blksize; + u32 st_blocks; + u32 st_atime; + u32 st_atime_nsec; + u32 st_mtime; + u32 st_mtime_nsec; + u32 st_ctime; + u32 st_ctime_nsec; + u32 __unused4; + u32 __unused5; +}; + +struct compat_statfs { + u32 f_type; + u32 f_bsize; + u32 f_blocks; + u32 f_bfree; + u32 f_bavail; + u32 f_files; + u32 f_ffree; + compat_fsid_t f_fsid; + u32 f_namelen; + u32 f_frsize; + u32 f_flags; + u32 f_spare[4]; +}; + +struct compat_statfs64 { + u32 f_type; + u32 f_bsize; + u64 f_blocks; + u64 f_bfree; + u64 f_bavail; + u64 f_files; + u64 f_ffree; + compat_fsid_t f_fsid; + u32 f_namelen; + u32 f_frsize; + u32 f_flags; + u32 f_spare[5]; +}; + +/* + * A pointer passed in from user mode. This should not + * be used for syscall parameters, just declare them + * as pointers because the syscall entry code will have + * appropriately converted them already. + */ + +static inline void __user *compat_ptr(compat_uptr_t uptr) +{ + return (void __user *)(unsigned long)(uptr & 0x7fffffffUL); +} +#define compat_ptr(uptr) compat_ptr(uptr) + +#ifdef CONFIG_COMPAT + +static inline int is_compat_task(void) +{ + return test_thread_flag(TIF_31BIT); +} + +#endif + +#endif /* _ASM_S390X_COMPAT_H */ diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h new file mode 100644 index 000000000..b378e2b57 --- /dev/null +++ b/arch/s390/include/asm/cpacf.h @@ -0,0 +1,551 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CP Assist for Cryptographic Functions (CPACF) + * + * Copyright IBM Corp. 2003, 2023 + * Author(s): Thomas Spatzier + * Jan Glauber + * Harald Freudenberger (freude@de.ibm.com) + * Martin Schwidefsky + */ +#ifndef _ASM_S390_CPACF_H +#define _ASM_S390_CPACF_H + +#include + +/* + * Instruction opcodes for the CPACF instructions + */ +#define CPACF_KMAC 0xb91e /* MSA */ +#define CPACF_KM 0xb92e /* MSA */ +#define CPACF_KMC 0xb92f /* MSA */ +#define CPACF_KIMD 0xb93e /* MSA */ +#define CPACF_KLMD 0xb93f /* MSA */ +#define CPACF_PCKMO 0xb928 /* MSA3 */ +#define CPACF_KMF 0xb92a /* MSA4 */ +#define CPACF_KMO 0xb92b /* MSA4 */ +#define CPACF_PCC 0xb92c /* MSA4 */ +#define CPACF_KMCTR 0xb92d /* MSA4 */ +#define CPACF_PRNO 0xb93c /* MSA5 */ +#define CPACF_KMA 0xb929 /* MSA8 */ +#define CPACF_KDSA 0xb93a /* MSA9 */ + +/* + * En/decryption modifier bits + */ +#define CPACF_ENCRYPT 0x00 +#define CPACF_DECRYPT 0x80 + +/* + * Function codes for the KM (CIPHER MESSAGE) instruction + */ +#define CPACF_KM_QUERY 0x00 +#define CPACF_KM_DEA 0x01 +#define CPACF_KM_TDEA_128 0x02 +#define CPACF_KM_TDEA_192 0x03 +#define CPACF_KM_AES_128 0x12 +#define CPACF_KM_AES_192 0x13 +#define CPACF_KM_AES_256 0x14 +#define CPACF_KM_PAES_128 0x1a +#define CPACF_KM_PAES_192 0x1b +#define CPACF_KM_PAES_256 0x1c +#define CPACF_KM_XTS_128 0x32 +#define CPACF_KM_XTS_256 0x34 +#define CPACF_KM_PXTS_128 0x3a +#define CPACF_KM_PXTS_256 0x3c + +/* + * Function codes for the KMC (CIPHER MESSAGE WITH CHAINING) + * instruction + */ +#define CPACF_KMC_QUERY 0x00 +#define CPACF_KMC_DEA 0x01 +#define CPACF_KMC_TDEA_128 0x02 +#define CPACF_KMC_TDEA_192 0x03 +#define CPACF_KMC_AES_128 0x12 +#define CPACF_KMC_AES_192 0x13 +#define CPACF_KMC_AES_256 0x14 +#define CPACF_KMC_PAES_128 0x1a +#define CPACF_KMC_PAES_192 0x1b +#define CPACF_KMC_PAES_256 0x1c +#define CPACF_KMC_PRNG 0x43 + +/* + * Function codes for the KMCTR (CIPHER MESSAGE WITH COUNTER) + * instruction + */ +#define CPACF_KMCTR_QUERY 0x00 +#define CPACF_KMCTR_DEA 0x01 +#define CPACF_KMCTR_TDEA_128 0x02 +#define CPACF_KMCTR_TDEA_192 0x03 +#define CPACF_KMCTR_AES_128 0x12 +#define CPACF_KMCTR_AES_192 0x13 +#define CPACF_KMCTR_AES_256 0x14 +#define CPACF_KMCTR_PAES_128 0x1a +#define CPACF_KMCTR_PAES_192 0x1b +#define CPACF_KMCTR_PAES_256 0x1c + +/* + * Function codes for the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) + * instruction + */ +#define CPACF_KIMD_QUERY 0x00 +#define CPACF_KIMD_SHA_1 0x01 +#define CPACF_KIMD_SHA_256 0x02 +#define CPACF_KIMD_SHA_512 0x03 +#define CPACF_KIMD_SHA3_224 0x20 +#define CPACF_KIMD_SHA3_256 0x21 +#define CPACF_KIMD_SHA3_384 0x22 +#define CPACF_KIMD_SHA3_512 0x23 +#define CPACF_KIMD_GHASH 0x41 + +/* + * Function codes for the KLMD (COMPUTE LAST MESSAGE DIGEST) + * instruction + */ +#define CPACF_KLMD_QUERY 0x00 +#define CPACF_KLMD_SHA_1 0x01 +#define CPACF_KLMD_SHA_256 0x02 +#define CPACF_KLMD_SHA_512 0x03 +#define CPACF_KLMD_SHA3_224 0x20 +#define CPACF_KLMD_SHA3_256 0x21 +#define CPACF_KLMD_SHA3_384 0x22 +#define CPACF_KLMD_SHA3_512 0x23 + +/* + * function codes for the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) + * instruction + */ +#define CPACF_KMAC_QUERY 0x00 +#define CPACF_KMAC_DEA 0x01 +#define CPACF_KMAC_TDEA_128 0x02 +#define CPACF_KMAC_TDEA_192 0x03 + +/* + * Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT) + * instruction + */ +#define CPACF_PCKMO_QUERY 0x00 +#define CPACF_PCKMO_ENC_DES_KEY 0x01 +#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02 +#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03 +#define CPACF_PCKMO_ENC_AES_128_KEY 0x12 +#define CPACF_PCKMO_ENC_AES_192_KEY 0x13 +#define CPACF_PCKMO_ENC_AES_256_KEY 0x14 +#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20 +#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21 +#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22 +#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28 +#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29 + +/* + * Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION) + * instruction + */ +#define CPACF_PRNO_QUERY 0x00 +#define CPACF_PRNO_SHA512_DRNG_GEN 0x03 +#define CPACF_PRNO_SHA512_DRNG_SEED 0x83 +#define CPACF_PRNO_TRNG_Q_R2C_RATIO 0x70 +#define CPACF_PRNO_TRNG 0x72 + +/* + * Function codes for the KMA (CIPHER MESSAGE WITH AUTHENTICATION) + * instruction + */ +#define CPACF_KMA_QUERY 0x00 +#define CPACF_KMA_GCM_AES_128 0x12 +#define CPACF_KMA_GCM_AES_192 0x13 +#define CPACF_KMA_GCM_AES_256 0x14 + +/* + * Flags for the KMA (CIPHER MESSAGE WITH AUTHENTICATION) instruction + */ +#define CPACF_KMA_LPC 0x100 /* Last-Plaintext/Ciphertext */ +#define CPACF_KMA_LAAD 0x200 /* Last-AAD */ +#define CPACF_KMA_HS 0x400 /* Hash-subkey Supplied */ + +typedef struct { unsigned char bytes[16]; } cpacf_mask_t; + +/** + * cpacf_query() - check if a specific CPACF function is available + * @opcode: the opcode of the crypto instruction + * @func: the function code to test for + * + * Executes the query function for the given crypto instruction @opcode + * and checks if @func is available + * + * Returns 1 if @func is available for @opcode, 0 otherwise + */ +static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask) +{ + asm volatile( + " lghi 0,0\n" /* query function */ + " lgr 1,%[mask]\n" + " spm 0\n" /* pckmo doesn't change the cc */ + /* Parameter regs are ignored, but must be nonzero and unique */ + "0: .insn rrf,%[opc] << 16,2,4,6,0\n" + " brc 1,0b\n" /* handle partial completion */ + : "=m" (*mask) + : [mask] "d" ((unsigned long)mask), [opc] "i" (opcode) + : "cc", "0", "1"); +} + +static __always_inline int __cpacf_check_opcode(unsigned int opcode) +{ + switch (opcode) { + case CPACF_KMAC: + case CPACF_KM: + case CPACF_KMC: + case CPACF_KIMD: + case CPACF_KLMD: + return test_facility(17); /* check for MSA */ + case CPACF_PCKMO: + return test_facility(76); /* check for MSA3 */ + case CPACF_KMF: + case CPACF_KMO: + case CPACF_PCC: + case CPACF_KMCTR: + return test_facility(77); /* check for MSA4 */ + case CPACF_PRNO: + return test_facility(57); /* check for MSA5 */ + case CPACF_KMA: + return test_facility(146); /* check for MSA8 */ + default: + BUG(); + } +} + +static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask) +{ + if (__cpacf_check_opcode(opcode)) { + __cpacf_query(opcode, mask); + return 1; + } + memset(mask, 0, sizeof(*mask)); + return 0; +} + +static inline int cpacf_test_func(cpacf_mask_t *mask, unsigned int func) +{ + return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0; +} + +static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int func) +{ + cpacf_mask_t mask; + + if (cpacf_query(opcode, &mask)) + return cpacf_test_func(&mask, func); + return 0; +} + +/** + * cpacf_km() - executes the KM (CIPHER MESSAGE) instruction + * @func: the function code passed to KM; see CPACF_KM_xxx defines + * @param: address of parameter block; see POP for details on each func + * @dest: address of destination memory area + * @src: address of source memory area + * @src_len: length of src operand in bytes + * + * Returns 0 for the query func, number of processed bytes for + * encryption/decryption funcs + */ +static inline int cpacf_km(unsigned long func, void *param, + u8 *dest, const u8 *src, long src_len) +{ + union register_pair d, s; + + d.even = (unsigned long)dest; + s.even = (unsigned long)src; + s.odd = (unsigned long)src_len; + asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[pba]\n" + "0: .insn rre,%[opc] << 16,%[dst],%[src]\n" + " brc 1,0b\n" /* handle partial completion */ + : [src] "+&d" (s.pair), [dst] "+&d" (d.pair) + : [fc] "d" (func), [pba] "d" ((unsigned long)param), + [opc] "i" (CPACF_KM) + : "cc", "memory", "0", "1"); + + return src_len - s.odd; +} + +/** + * cpacf_kmc() - executes the KMC (CIPHER MESSAGE WITH CHAINING) instruction + * @func: the function code passed to KM; see CPACF_KMC_xxx defines + * @param: address of parameter block; see POP for details on each func + * @dest: address of destination memory area + * @src: address of source memory area + * @src_len: length of src operand in bytes + * + * Returns 0 for the query func, number of processed bytes for + * encryption/decryption funcs + */ +static inline int cpacf_kmc(unsigned long func, void *param, + u8 *dest, const u8 *src, long src_len) +{ + union register_pair d, s; + + d.even = (unsigned long)dest; + s.even = (unsigned long)src; + s.odd = (unsigned long)src_len; + asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[pba]\n" + "0: .insn rre,%[opc] << 16,%[dst],%[src]\n" + " brc 1,0b\n" /* handle partial completion */ + : [src] "+&d" (s.pair), [dst] "+&d" (d.pair) + : [fc] "d" (func), [pba] "d" ((unsigned long)param), + [opc] "i" (CPACF_KMC) + : "cc", "memory", "0", "1"); + + return src_len - s.odd; +} + +/** + * cpacf_kimd() - executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) + * instruction + * @func: the function code passed to KM; see CPACF_KIMD_xxx defines + * @param: address of parameter block; see POP for details on each func + * @src: address of source memory area + * @src_len: length of src operand in bytes + */ +static inline void cpacf_kimd(unsigned long func, void *param, + const u8 *src, long src_len) +{ + union register_pair s; + + s.even = (unsigned long)src; + s.odd = (unsigned long)src_len; + asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[pba]\n" + "0: .insn rre,%[opc] << 16,0,%[src]\n" + " brc 1,0b\n" /* handle partial completion */ + : [src] "+&d" (s.pair) + : [fc] "d" (func), [pba] "d" ((unsigned long)(param)), + [opc] "i" (CPACF_KIMD) + : "cc", "memory", "0", "1"); +} + +/** + * cpacf_klmd() - executes the KLMD (COMPUTE LAST MESSAGE DIGEST) instruction + * @func: the function code passed to KM; see CPACF_KLMD_xxx defines + * @param: address of parameter block; see POP for details on each func + * @src: address of source memory area + * @src_len: length of src operand in bytes + */ +static inline void cpacf_klmd(unsigned long func, void *param, + const u8 *src, long src_len) +{ + union register_pair s; + + s.even = (unsigned long)src; + s.odd = (unsigned long)src_len; + asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[pba]\n" + "0: .insn rre,%[opc] << 16,0,%[src]\n" + " brc 1,0b\n" /* handle partial completion */ + : [src] "+&d" (s.pair) + : [fc] "d" (func), [pba] "d" ((unsigned long)param), + [opc] "i" (CPACF_KLMD) + : "cc", "memory", "0", "1"); +} + +/** + * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) + * instruction + * @func: the function code passed to KM; see CPACF_KMAC_xxx defines + * @param: address of parameter block; see POP for details on each func + * @src: address of source memory area + * @src_len: length of src operand in bytes + * + * Returns 0 for the query func, number of processed bytes for digest funcs + */ +static inline int cpacf_kmac(unsigned long func, void *param, + const u8 *src, long src_len) +{ + union register_pair s; + + s.even = (unsigned long)src; + s.odd = (unsigned long)src_len; + asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[pba]\n" + "0: .insn rre,%[opc] << 16,0,%[src]\n" + " brc 1,0b\n" /* handle partial completion */ + : [src] "+&d" (s.pair) + : [fc] "d" (func), [pba] "d" ((unsigned long)param), + [opc] "i" (CPACF_KMAC) + : "cc", "memory", "0", "1"); + + return src_len - s.odd; +} + +/** + * cpacf_kmctr() - executes the KMCTR (CIPHER MESSAGE WITH COUNTER) instruction + * @func: the function code passed to KMCTR; see CPACF_KMCTR_xxx defines + * @param: address of parameter block; see POP for details on each func + * @dest: address of destination memory area + * @src: address of source memory area + * @src_len: length of src operand in bytes + * @counter: address of counter value + * + * Returns 0 for the query func, number of processed bytes for + * encryption/decryption funcs + */ +static inline int cpacf_kmctr(unsigned long func, void *param, u8 *dest, + const u8 *src, long src_len, u8 *counter) +{ + union register_pair d, s, c; + + d.even = (unsigned long)dest; + s.even = (unsigned long)src; + s.odd = (unsigned long)src_len; + c.even = (unsigned long)counter; + asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[pba]\n" + "0: .insn rrf,%[opc] << 16,%[dst],%[src],%[ctr],0\n" + " brc 1,0b\n" /* handle partial completion */ + : [src] "+&d" (s.pair), [dst] "+&d" (d.pair), + [ctr] "+&d" (c.pair) + : [fc] "d" (func), [pba] "d" ((unsigned long)param), + [opc] "i" (CPACF_KMCTR) + : "cc", "memory", "0", "1"); + + return src_len - s.odd; +} + +/** + * cpacf_prno() - executes the PRNO (PERFORM RANDOM NUMBER OPERATION) + * instruction + * @func: the function code passed to PRNO; see CPACF_PRNO_xxx defines + * @param: address of parameter block; see POP for details on each func + * @dest: address of destination memory area + * @dest_len: size of destination memory area in bytes + * @seed: address of seed data + * @seed_len: size of seed data in bytes + */ +static inline void cpacf_prno(unsigned long func, void *param, + u8 *dest, unsigned long dest_len, + const u8 *seed, unsigned long seed_len) +{ + union register_pair d, s; + + d.even = (unsigned long)dest; + d.odd = (unsigned long)dest_len; + s.even = (unsigned long)seed; + s.odd = (unsigned long)seed_len; + asm volatile ( + " lgr 0,%[fc]\n" + " lgr 1,%[pba]\n" + "0: .insn rre,%[opc] << 16,%[dst],%[seed]\n" + " brc 1,0b\n" /* handle partial completion */ + : [dst] "+&d" (d.pair) + : [fc] "d" (func), [pba] "d" ((unsigned long)param), + [seed] "d" (s.pair), [opc] "i" (CPACF_PRNO) + : "cc", "memory", "0", "1"); +} + +/** + * cpacf_trng() - executes the TRNG subfunction of the PRNO instruction + * @ucbuf: buffer for unconditioned data + * @ucbuf_len: amount of unconditioned data to fetch in bytes + * @cbuf: buffer for conditioned data + * @cbuf_len: amount of conditioned data to fetch in bytes + */ +static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len, + u8 *cbuf, unsigned long cbuf_len) +{ + union register_pair u, c; + + u.even = (unsigned long)ucbuf; + u.odd = (unsigned long)ucbuf_len; + c.even = (unsigned long)cbuf; + c.odd = (unsigned long)cbuf_len; + asm volatile ( + " lghi 0,%[fc]\n" + "0: .insn rre,%[opc] << 16,%[ucbuf],%[cbuf]\n" + " brc 1,0b\n" /* handle partial completion */ + : [ucbuf] "+&d" (u.pair), [cbuf] "+&d" (c.pair) + : [fc] "K" (CPACF_PRNO_TRNG), [opc] "i" (CPACF_PRNO) + : "cc", "memory", "0"); +} + +/** + * cpacf_pcc() - executes the PCC (PERFORM CRYPTOGRAPHIC COMPUTATION) + * instruction + * @func: the function code passed to PCC; see CPACF_KM_xxx defines + * @param: address of parameter block; see POP for details on each func + */ +static inline void cpacf_pcc(unsigned long func, void *param) +{ + asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[pba]\n" + "0: .insn rre,%[opc] << 16,0,0\n" /* PCC opcode */ + " brc 1,0b\n" /* handle partial completion */ + : + : [fc] "d" (func), [pba] "d" ((unsigned long)param), + [opc] "i" (CPACF_PCC) + : "cc", "memory", "0", "1"); +} + +/** + * cpacf_pckmo() - executes the PCKMO (PERFORM CRYPTOGRAPHIC KEY + * MANAGEMENT) instruction + * @func: the function code passed to PCKMO; see CPACF_PCKMO_xxx defines + * @param: address of parameter block; see POP for details on each func + * + * Returns 0. + */ +static inline void cpacf_pckmo(long func, void *param) +{ + asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[pba]\n" + " .insn rre,%[opc] << 16,0,0\n" /* PCKMO opcode */ + : + : [fc] "d" (func), [pba] "d" ((unsigned long)param), + [opc] "i" (CPACF_PCKMO) + : "cc", "memory", "0", "1"); +} + +/** + * cpacf_kma() - executes the KMA (CIPHER MESSAGE WITH AUTHENTICATION) + * instruction + * @func: the function code passed to KMA; see CPACF_KMA_xxx defines + * @param: address of parameter block; see POP for details on each func + * @dest: address of destination memory area + * @src: address of source memory area + * @src_len: length of src operand in bytes + * @aad: address of additional authenticated data memory area + * @aad_len: length of aad operand in bytes + */ +static inline void cpacf_kma(unsigned long func, void *param, u8 *dest, + const u8 *src, unsigned long src_len, + const u8 *aad, unsigned long aad_len) +{ + union register_pair d, s, a; + + d.even = (unsigned long)dest; + s.even = (unsigned long)src; + s.odd = (unsigned long)src_len; + a.even = (unsigned long)aad; + a.odd = (unsigned long)aad_len; + asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[pba]\n" + "0: .insn rrf,%[opc] << 16,%[dst],%[src],%[aad],0\n" + " brc 1,0b\n" /* handle partial completion */ + : [dst] "+&d" (d.pair), [src] "+&d" (s.pair), + [aad] "+&d" (a.pair) + : [fc] "d" (func), [pba] "d" ((unsigned long)param), + [opc] "i" (CPACF_KMA) + : "cc", "memory", "0", "1"); +} + +#endif /* _ASM_S390_CPACF_H */ diff --git a/arch/s390/include/asm/cpcmd.h b/arch/s390/include/asm/cpcmd.h new file mode 100644 index 000000000..c3c993abe --- /dev/null +++ b/arch/s390/include/asm/cpcmd.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Christian Borntraeger (cborntra@de.ibm.com), + */ + +#ifndef _ASM_S390_CPCMD_H +#define _ASM_S390_CPCMD_H + +/* + * the lowlevel function for cpcmd + */ +int __cpcmd(const char *cmd, char *response, int rlen, int *response_code); + +/* + * cpcmd is the in-kernel interface for issuing CP commands + * + * cmd: null-terminated command string, max 240 characters + * response: response buffer for VM's textual response + * rlen: size of the response buffer, cpcmd will not exceed this size + * but will cap the output, if its too large. Everything that + * did not fit into the buffer will be silently dropped + * response_code: return pointer for VM's error code + * return value: the size of the response. The caller can check if the buffer + * was large enough by comparing the return value and rlen + * NOTE: If the response buffer is not in real storage, cpcmd can sleep + */ +int cpcmd(const char *cmd, char *response, int rlen, int *response_code); + +#endif /* _ASM_S390_CPCMD_H */ diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h new file mode 100644 index 000000000..26c710cd3 --- /dev/null +++ b/arch/s390/include/asm/cpu.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2000, 2009 + * Author(s): Hartmut Penner , + * Martin Schwidefsky , + * Christian Ehrhardt , + */ + +#ifndef _ASM_S390_CPU_H +#define _ASM_S390_CPU_H + +#ifndef __ASSEMBLY__ + +#include +#include + +struct cpuid +{ + unsigned int version : 8; + unsigned int ident : 24; + unsigned int machine : 16; + unsigned int unused : 16; +} __attribute__ ((packed, aligned(8))); + +DECLARE_STATIC_KEY_FALSE(cpu_has_bear); + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_S390_CPU_H */ diff --git a/arch/s390/include/asm/cpu_mf-insn.h b/arch/s390/include/asm/cpu_mf-insn.h new file mode 100644 index 000000000..a68b362e0 --- /dev/null +++ b/arch/s390/include/asm/cpu_mf-insn.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Support for CPU-MF instructions + * + * Copyright IBM Corp. 2019 + * Author(s): Hendrik Brueckner + */ +#ifndef _ASM_S390_CPU_MF_INSN_H +#define _ASM_S390_CPU_MF_INSN_H + +#ifdef __ASSEMBLY__ + +/* Macro to generate the STCCTM instruction with a customized + * M3 field designating the counter set. + */ +.macro STCCTM r1 m3 db2 + .insn rsy,0xeb0000000017,\r1,\m3 & 0xf,\db2 +.endm + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h new file mode 100644 index 000000000..a0de5b9b0 --- /dev/null +++ b/arch/s390/include/asm/cpu_mf.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CPU-measurement facilities + * + * Copyright IBM Corp. 2012, 2018 + * Author(s): Hendrik Brueckner + * Jan Glauber + */ +#ifndef _ASM_S390_CPU_MF_H +#define _ASM_S390_CPU_MF_H + +#include +#include +#include + +asm(".include \"asm/cpu_mf-insn.h\"\n"); + +#define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */ +#define CPU_MF_INT_SF_ISE (1 << 30) /* incorrect SDBT entry */ +#define CPU_MF_INT_SF_PRA (1 << 29) /* program request alert */ +#define CPU_MF_INT_SF_SACA (1 << 23) /* sampler auth. change alert */ +#define CPU_MF_INT_SF_LSDA (1 << 22) /* loss of sample data alert */ +#define CPU_MF_INT_CF_MTDA (1 << 15) /* loss of MT ctr. data alert */ +#define CPU_MF_INT_CF_CACA (1 << 7) /* counter auth. change alert */ +#define CPU_MF_INT_CF_LCDA (1 << 6) /* loss of counter data alert */ +#define CPU_MF_INT_CF_MASK (CPU_MF_INT_CF_MTDA|CPU_MF_INT_CF_CACA| \ + CPU_MF_INT_CF_LCDA) +#define CPU_MF_INT_SF_MASK (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE| \ + CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \ + CPU_MF_INT_SF_LSDA) + +#define CPU_MF_SF_RIBM_NOTAV 0x1 /* Sampling unavailable */ + +/* CPU measurement facility support */ +static inline int cpum_cf_avail(void) +{ + return test_facility(40) && test_facility(67); +} + +static inline int cpum_sf_avail(void) +{ + return test_facility(40) && test_facility(68); +} + +struct cpumf_ctr_info { + u16 cfvn; + u16 auth_ctl; + u16 enable_ctl; + u16 act_ctl; + u16 max_cpu; + u16 csvn; + u16 max_cg; + u16 reserved1; + u32 reserved2[12]; +} __packed; + +/* QUERY SAMPLING INFORMATION block */ +struct hws_qsi_info_block { /* Bit(s) */ + unsigned int b0_13:14; /* 0-13: zeros */ + unsigned int as:1; /* 14: basic-sampling authorization */ + unsigned int ad:1; /* 15: diag-sampling authorization */ + unsigned int b16_21:6; /* 16-21: zeros */ + unsigned int es:1; /* 22: basic-sampling enable control */ + unsigned int ed:1; /* 23: diag-sampling enable control */ + unsigned int b24_29:6; /* 24-29: zeros */ + unsigned int cs:1; /* 30: basic-sampling activation control */ + unsigned int cd:1; /* 31: diag-sampling activation control */ + unsigned int bsdes:16; /* 4-5: size of basic sampling entry */ + unsigned int dsdes:16; /* 6-7: size of diagnostic sampling entry */ + unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */ + unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/ + unsigned long tear; /* 24-31: TEAR contents */ + unsigned long dear; /* 32-39: DEAR contents */ + unsigned int rsvrd0:24; /* 40-42: reserved */ + unsigned int ribm:8; /* 43: Reserved by IBM */ + unsigned int cpu_speed; /* 44-47: CPU speed */ + unsigned long long rsvrd1; /* 48-55: reserved */ + unsigned long long rsvrd2; /* 56-63: reserved */ +} __packed; + +/* SET SAMPLING CONTROLS request block */ +struct hws_lsctl_request_block { + unsigned int s:1; /* 0: maximum buffer indicator */ + unsigned int h:1; /* 1: part. level reserved for VM use*/ + unsigned long long b2_53:52;/* 2-53: zeros */ + unsigned int es:1; /* 54: basic-sampling enable control */ + unsigned int ed:1; /* 55: diag-sampling enable control */ + unsigned int b56_61:6; /* 56-61: - zeros */ + unsigned int cs:1; /* 62: basic-sampling activation control */ + unsigned int cd:1; /* 63: diag-sampling activation control */ + unsigned long interval; /* 8-15: sampling interval */ + unsigned long tear; /* 16-23: TEAR contents */ + unsigned long dear; /* 24-31: DEAR contents */ + /* 32-63: */ + unsigned long rsvrd1; /* reserved */ + unsigned long rsvrd2; /* reserved */ + unsigned long rsvrd3; /* reserved */ + unsigned long rsvrd4; /* reserved */ +} __packed; + +struct hws_basic_entry { + unsigned int def:16; /* 0-15 Data Entry Format */ + unsigned int R:4; /* 16-19 reserved */ + unsigned int U:4; /* 20-23 Number of unique instruct. */ + unsigned int z:2; /* zeros */ + unsigned int T:1; /* 26 PSW DAT mode */ + unsigned int W:1; /* 27 PSW wait state */ + unsigned int P:1; /* 28 PSW Problem state */ + unsigned int AS:2; /* 29-30 PSW address-space control */ + unsigned int I:1; /* 31 entry valid or invalid */ + unsigned int CL:2; /* 32-33 Configuration Level */ + unsigned int H:1; /* 34 Host Indicator */ + unsigned int LS:1; /* 35 Limited Sampling */ + unsigned int:12; + unsigned int prim_asn:16; /* primary ASN */ + unsigned long long ia; /* Instruction Address */ + unsigned long long gpp; /* Guest Program Parameter */ + unsigned long long hpp; /* Host Program Parameter */ +} __packed; + +struct hws_diag_entry { + unsigned int def:16; /* 0-15 Data Entry Format */ + unsigned int R:15; /* 16-19 and 20-30 reserved */ + unsigned int I:1; /* 31 entry valid or invalid */ + u8 data[]; /* Machine-dependent sample data */ +} __packed; + +struct hws_combined_entry { + struct hws_basic_entry basic; /* Basic-sampling data entry */ + struct hws_diag_entry diag; /* Diagnostic-sampling data entry */ +} __packed; + +union hws_trailer_header { + struct { + unsigned int f:1; /* 0 - Block Full Indicator */ + unsigned int a:1; /* 1 - Alert request control */ + unsigned int t:1; /* 2 - Timestamp format */ + unsigned int :29; /* 3 - 31: Reserved */ + unsigned int bsdes:16; /* 32-47: size of basic SDE */ + unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */ + unsigned long long overflow; /* 64 - Overflow Count */ + }; + u128 val; +}; + +struct hws_trailer_entry { + union hws_trailer_header header; /* 0 - 15 Flags + Overflow Count */ + unsigned char timestamp[16]; /* 16 - 31 timestamp */ + unsigned long long reserved1; /* 32 -Reserved */ + unsigned long long reserved2; /* */ + union { /* 48 - reserved for programming use */ + struct { + unsigned int clock_base:1; /* in progusage2 */ + unsigned long long progusage1:63; + unsigned long long progusage2; + }; + unsigned long long progusage[2]; + }; +} __packed; + +/* Load program parameter */ +static inline void lpp(void *pp) +{ + asm volatile("lpp 0(%0)\n" :: "a" (pp) : "memory"); +} + +/* Query counter information */ +static inline int qctri(struct cpumf_ctr_info *info) +{ + int rc = -EINVAL; + + asm volatile ( + "0: qctri %1\n" + "1: lhi %0,0\n" + "2:\n" + EX_TABLE(1b, 2b) + : "+d" (rc), "=Q" (*info)); + return rc; +} + +/* Load CPU-counter-set controls */ +static inline int lcctl(u64 ctl) +{ + int cc; + + asm volatile ( + " lcctl %1\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) : "Q" (ctl) : "cc"); + return cc; +} + +/* Extract CPU counter */ +static inline int __ecctr(u64 ctr, u64 *content) +{ + u64 _content; + int cc; + + asm volatile ( + " ecctr %0,%2\n" + " ipm %1\n" + " srl %1,28\n" + : "=d" (_content), "=d" (cc) : "d" (ctr) : "cc"); + *content = _content; + return cc; +} + +/* Extract CPU counter */ +static inline int ecctr(u64 ctr, u64 *val) +{ + u64 content; + int cc; + + cc = __ecctr(ctr, &content); + if (!cc) + *val = content; + return cc; +} + +/* Store CPU counter multiple for a particular counter set */ +enum stcctm_ctr_set { + EXTENDED = 0, + BASIC = 1, + PROBLEM_STATE = 2, + CRYPTO_ACTIVITY = 3, + MT_DIAG = 5, + MT_DIAG_CLEARING = 9, /* clears loss-of-MT-ctr-data alert */ +}; + +static __always_inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest) +{ + int cc; + + asm volatile ( + " STCCTM %2,%3,%1\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) + : "Q" (*dest), "d" (range), "i" (set) + : "cc", "memory"); + return cc; +} + +/* Query sampling information */ +static inline int qsi(struct hws_qsi_info_block *info) +{ + int cc = 1; + + asm volatile( + "0: qsi %1\n" + "1: lhi %0,0\n" + "2:\n" + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) + : "+d" (cc), "+Q" (*info)); + return cc ? -EINVAL : 0; +} + +/* Load sampling controls */ +static inline int lsctl(struct hws_lsctl_request_block *req) +{ + int cc; + + cc = 1; + asm volatile( + "0: lsctl 0(%1)\n" + "1: ipm %0\n" + " srl %0,28\n" + "2:\n" + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) + : "+d" (cc), "+a" (req) + : "m" (*req) + : "cc", "memory"); + + return cc ? -EINVAL : 0; +} +#endif /* _ASM_S390_CPU_MF_H */ diff --git a/arch/s390/include/asm/cpufeature.h b/arch/s390/include/asm/cpufeature.h new file mode 100644 index 000000000..931204613 --- /dev/null +++ b/arch/s390/include/asm/cpufeature.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Module interface for CPU features + * + * Copyright IBM Corp. 2015, 2022 + * Author(s): Hendrik Brueckner + */ + +#ifndef __ASM_S390_CPUFEATURE_H +#define __ASM_S390_CPUFEATURE_H + +enum { + S390_CPU_FEATURE_MSA, + S390_CPU_FEATURE_VXRS, + S390_CPU_FEATURE_UV, + MAX_CPU_FEATURES +}; + +#define cpu_feature(feature) (feature) + +int cpu_have_feature(unsigned int nr); + +#endif /* __ASM_S390_CPUFEATURE_H */ diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h new file mode 100644 index 000000000..30bb3ec4e --- /dev/null +++ b/arch/s390/include/asm/cputime.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2004 + * + * Author: Martin Schwidefsky + */ + +#ifndef _S390_CPUTIME_H +#define _S390_CPUTIME_H + +#include +#include + +/* + * Convert cputime to nanoseconds. + */ +#define cputime_to_nsecs(cputime) tod_to_ns(cputime) + +void account_idle_time_irq(void); + +#endif /* _S390_CPUTIME_H */ diff --git a/arch/s390/include/asm/crw.h b/arch/s390/include/asm/crw.h new file mode 100644 index 000000000..97456d98f --- /dev/null +++ b/arch/s390/include/asm/crw.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Data definitions for channel report processing + * Copyright IBM Corp. 2000, 2009 + * Author(s): Ingo Adlung , + * Martin Schwidefsky , + * Cornelia Huck , + */ + +#ifndef _ASM_S390_CRW_H +#define _ASM_S390_CRW_H + +#include + +/* + * Channel Report Word + */ +struct crw { + __u32 res1 : 1; /* reserved zero */ + __u32 slct : 1; /* solicited */ + __u32 oflw : 1; /* overflow */ + __u32 chn : 1; /* chained */ + __u32 rsc : 4; /* reporting source code */ + __u32 anc : 1; /* ancillary report */ + __u32 res2 : 1; /* reserved zero */ + __u32 erc : 6; /* error-recovery code */ + __u32 rsid : 16; /* reporting-source ID */ +} __attribute__ ((packed)); + +typedef void (*crw_handler_t)(struct crw *, struct crw *, int); + +extern int crw_register_handler(int rsc, crw_handler_t handler); +extern void crw_unregister_handler(int rsc); +extern void crw_handle_channel_report(void); +void crw_wait_for_channel_report(void); + +#define NR_RSCS 16 + +#define CRW_RSC_MONITOR 0x2 /* monitoring facility */ +#define CRW_RSC_SCH 0x3 /* subchannel */ +#define CRW_RSC_CPATH 0x4 /* channel path */ +#define CRW_RSC_CONFIG 0x9 /* configuration-alert facility */ +#define CRW_RSC_CSS 0xB /* channel subsystem */ + +#define CRW_ERC_EVENT 0x00 /* event information pending */ +#define CRW_ERC_AVAIL 0x01 /* available */ +#define CRW_ERC_INIT 0x02 /* initialized */ +#define CRW_ERC_TERROR 0x03 /* temporary error */ +#define CRW_ERC_IPARM 0x04 /* installed parm initialized */ +#define CRW_ERC_TERM 0x05 /* terminal */ +#define CRW_ERC_PERRN 0x06 /* perm. error, fac. not init */ +#define CRW_ERC_PERRI 0x07 /* perm. error, facility init */ +#define CRW_ERC_PMOD 0x08 /* installed parameters modified */ + +#endif /* _ASM_S390_CRW_H */ diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h new file mode 100644 index 000000000..638137d46 --- /dev/null +++ b/arch/s390/include/asm/css_chars.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_CSS_CHARS_H +#define _ASM_CSS_CHARS_H + +#include + +struct css_general_char { + u64 : 12; + u64 dynio : 1; /* bit 12 */ + u64 : 4; + u64 eadm : 1; /* bit 17 */ + u64 : 23; + u64 aif : 1; /* bit 41 */ + u64 : 3; + u64 mcss : 1; /* bit 45 */ + u64 fcs : 1; /* bit 46 */ + u64 : 1; + u64 ext_mb : 1; /* bit 48 */ + u64 : 7; + u64 aif_tdd : 1; /* bit 56 */ + u64 : 1; + u64 qebsm : 1; /* bit 58 */ + u64 : 2; + u64 aiv : 1; /* bit 61 */ + u64 : 2; + + u64 : 3; + u64 aif_osa : 1; /* bit 67 */ + u64 : 12; + u64 eadm_rf : 1; /* bit 80 */ + u64 : 1; + u64 cib : 1; /* bit 82 */ + u64 : 5; + u64 fcx : 1; /* bit 88 */ + u64 : 19; + u64 alt_ssi : 1; /* bit 108 */ + u64 : 1; + u64 narf : 1; /* bit 110 */ + u64 : 5; + u64 enarf: 1; /* bit 116 */ + u64 : 6; + u64 util_str : 1;/* bit 123 */ +} __packed; + +extern struct css_general_char css_general_characteristics; + +#endif diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h new file mode 100644 index 000000000..adf7d8cda --- /dev/null +++ b/arch/s390/include/asm/ctl_reg.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 1999, 2009 + * + * Author(s): Martin Schwidefsky + */ + +#ifndef __ASM_CTL_REG_H +#define __ASM_CTL_REG_H + +#include + +#define CR0_CLOCK_COMPARATOR_SIGN BIT(63 - 10) +#define CR0_LOW_ADDRESS_PROTECTION BIT(63 - 35) +#define CR0_FETCH_PROTECTION_OVERRIDE BIT(63 - 38) +#define CR0_STORAGE_PROTECTION_OVERRIDE BIT(63 - 39) +#define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(63 - 49) +#define CR0_EXTERNAL_CALL_SUBMASK BIT(63 - 50) +#define CR0_CLOCK_COMPARATOR_SUBMASK BIT(63 - 52) +#define CR0_CPU_TIMER_SUBMASK BIT(63 - 53) +#define CR0_SERVICE_SIGNAL_SUBMASK BIT(63 - 54) +#define CR0_UNUSED_56 BIT(63 - 56) +#define CR0_INTERRUPT_KEY_SUBMASK BIT(63 - 57) +#define CR0_MEASUREMENT_ALERT_SUBMASK BIT(63 - 58) + +#define CR14_UNUSED_32 BIT(63 - 32) +#define CR14_UNUSED_33 BIT(63 - 33) +#define CR14_CHANNEL_REPORT_SUBMASK BIT(63 - 35) +#define CR14_RECOVERY_SUBMASK BIT(63 - 36) +#define CR14_DEGRADATION_SUBMASK BIT(63 - 37) +#define CR14_EXTERNAL_DAMAGE_SUBMASK BIT(63 - 38) +#define CR14_WARNING_SUBMASK BIT(63 - 39) + +#ifndef __ASSEMBLY__ + +#include + +#define __ctl_load(array, low, high) do { \ + typedef struct { char _[sizeof(array)]; } addrtype; \ + \ + BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ + asm volatile( \ + " lctlg %1,%2,%0\n" \ + : \ + : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \ + : "memory"); \ +} while (0) + +#define __ctl_store(array, low, high) do { \ + typedef struct { char _[sizeof(array)]; } addrtype; \ + \ + BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ + asm volatile( \ + " stctg %1,%2,%0\n" \ + : "=Q" (*(addrtype *)(&array)) \ + : "i" (low), "i" (high)); \ +} while (0) + +static __always_inline void __ctl_set_bit(unsigned int cr, unsigned int bit) +{ + unsigned long reg; + + __ctl_store(reg, cr, cr); + reg |= 1UL << bit; + __ctl_load(reg, cr, cr); +} + +static __always_inline void __ctl_clear_bit(unsigned int cr, unsigned int bit) +{ + unsigned long reg; + + __ctl_store(reg, cr, cr); + reg &= ~(1UL << bit); + __ctl_load(reg, cr, cr); +} + +void smp_ctl_set_clear_bit(int cr, int bit, bool set); + +static inline void ctl_set_bit(int cr, int bit) +{ + smp_ctl_set_clear_bit(cr, bit, true); +} + +static inline void ctl_clear_bit(int cr, int bit) +{ + smp_ctl_set_clear_bit(cr, bit, false); +} + +union ctlreg0 { + unsigned long val; + struct { + unsigned long : 8; + unsigned long tcx : 1; /* Transactional-Execution control */ + unsigned long pifo : 1; /* Transactional-Execution Program- + Interruption-Filtering Override */ + unsigned long : 3; + unsigned long ccc : 1; /* Cryptography counter control */ + unsigned long pec : 1; /* PAI extension control */ + unsigned long : 17; + unsigned long : 3; + unsigned long lap : 1; /* Low-address-protection control */ + unsigned long : 4; + unsigned long edat : 1; /* Enhanced-DAT-enablement control */ + unsigned long : 2; + unsigned long iep : 1; /* Instruction-Execution-Protection */ + unsigned long : 1; + unsigned long afp : 1; /* AFP-register control */ + unsigned long vx : 1; /* Vector enablement control */ + unsigned long : 7; + unsigned long sssm : 1; /* Service signal subclass mask */ + unsigned long : 9; + }; +}; + +union ctlreg2 { + unsigned long val; + struct { + unsigned long : 33; + unsigned long ducto : 25; + unsigned long : 1; + unsigned long gse : 1; + unsigned long : 1; + unsigned long tds : 1; + unsigned long tdc : 2; + }; +}; + +union ctlreg5 { + unsigned long val; + struct { + unsigned long : 33; + unsigned long pasteo: 25; + unsigned long : 6; + }; +}; + +union ctlreg15 { + unsigned long val; + struct { + unsigned long lsea : 61; + unsigned long : 3; + }; +}; + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_CTL_REG_H */ diff --git a/arch/s390/include/asm/current.h b/arch/s390/include/asm/current.h new file mode 100644 index 000000000..68f843152 --- /dev/null +++ b/arch/s390/include/asm/current.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * Derived from "include/asm-i386/current.h" + */ + +#ifndef _S390_CURRENT_H +#define _S390_CURRENT_H + +#include + +struct task_struct; + +#define current ((struct task_struct *const)S390_lowcore.current_task) + +#endif /* !(_S390_CURRENT_H) */ diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h new file mode 100644 index 000000000..ccd4e148b --- /dev/null +++ b/arch/s390/include/asm/debug.h @@ -0,0 +1,490 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S/390 debug facility + * + * Copyright IBM Corp. 1999, 2020 + */ +#ifndef _ASM_S390_DEBUG_H +#define _ASM_S390_DEBUG_H + +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */ +#define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */ +#define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */ +#define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */ +#define DEBUG_MAX_NAME_LEN 64 /* max length for a debugfs file name */ +#define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */ + +#define DEBUG_DIR_ROOT "s390dbf" /* name of debug root directory in proc fs */ + +#define DEBUG_DATA(entry) (char *)(entry + 1) /* data is stored behind */ + /* the entry information */ + +#define __DEBUG_FEATURE_VERSION 3 /* version of debug feature */ + +struct __debug_entry { + unsigned long clock : 60; + unsigned long exception : 1; + unsigned long level : 3; + void *caller; + unsigned short cpu; +} __packed; + +typedef struct __debug_entry debug_entry_t; + +struct debug_view; + +typedef struct debug_info { + struct debug_info *next; + struct debug_info *prev; + refcount_t ref_count; + spinlock_t lock; + int level; + int nr_areas; + int pages_per_area; + int buf_size; + int entry_size; + debug_entry_t ***areas; + int active_area; + int *active_pages; + int *active_entries; + struct dentry *debugfs_root_entry; + struct dentry *debugfs_entries[DEBUG_MAX_VIEWS]; + struct debug_view *views[DEBUG_MAX_VIEWS]; + char name[DEBUG_MAX_NAME_LEN]; + umode_t mode; +} debug_info_t; + +typedef int (debug_header_proc_t) (debug_info_t *id, + struct debug_view *view, + int area, + debug_entry_t *entry, + char *out_buf); + +typedef int (debug_format_proc_t) (debug_info_t *id, + struct debug_view *view, char *out_buf, + const char *in_buf); +typedef int (debug_prolog_proc_t) (debug_info_t *id, + struct debug_view *view, + char *out_buf); +typedef int (debug_input_proc_t) (debug_info_t *id, + struct debug_view *view, + struct file *file, + const char __user *user_buf, + size_t in_buf_size, loff_t *offset); + +int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view, + int area, debug_entry_t *entry, char *out_buf); + +struct debug_view { + char name[DEBUG_MAX_NAME_LEN]; + debug_prolog_proc_t *prolog_proc; + debug_header_proc_t *header_proc; + debug_format_proc_t *format_proc; + debug_input_proc_t *input_proc; + void *private_data; +}; + +extern struct debug_view debug_hex_ascii_view; +extern struct debug_view debug_sprintf_view; + +/* do NOT use the _common functions */ + +debug_entry_t *debug_event_common(debug_info_t *id, int level, + const void *data, int length); + +debug_entry_t *debug_exception_common(debug_info_t *id, int level, + const void *data, int length); + +/* Debug Feature API: */ + +debug_info_t *debug_register(const char *name, int pages, int nr_areas, + int buf_size); + +debug_info_t *debug_register_mode(const char *name, int pages, int nr_areas, + int buf_size, umode_t mode, uid_t uid, + gid_t gid); + +void debug_unregister(debug_info_t *id); + +void debug_set_level(debug_info_t *id, int new_level); + +void debug_set_critical(void); + +void debug_stop_all(void); + +/** + * debug_level_enabled() - Returns true if debug events for the specified + * level would be logged. Otherwise returns false. + * + * @id: handle for debug log + * @level: debug level + * + * Return: + * - %true if level is less or equal to the current debug level. + */ +static inline bool debug_level_enabled(debug_info_t *id, int level) +{ + return level <= id->level; +} + +/** + * debug_event() - writes binary debug entry to active debug area + * (if level <= actual debug level) + * + * @id: handle for debug log + * @level: debug level + * @data: pointer to data for debug entry + * @length: length of data in bytes + * + * Return: + * - Address of written debug entry + * - %NULL if error + */ +static inline debug_entry_t *debug_event(debug_info_t *id, int level, + void *data, int length) +{ + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; + return debug_event_common(id, level, data, length); +} + +/** + * debug_int_event() - writes unsigned integer debug entry to active debug area + * (if level <= actual debug level) + * + * @id: handle for debug log + * @level: debug level + * @tag: integer value for debug entry + * + * Return: + * - Address of written debug entry + * - %NULL if error + */ +static inline debug_entry_t *debug_int_event(debug_info_t *id, int level, + unsigned int tag) +{ + unsigned int t = tag; + + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; + return debug_event_common(id, level, &t, sizeof(unsigned int)); +} + +/** + * debug_long_event() - writes unsigned long debug entry to active debug area + * (if level <= actual debug level) + * + * @id: handle for debug log + * @level: debug level + * @tag: long integer value for debug entry + * + * Return: + * - Address of written debug entry + * - %NULL if error + */ +static inline debug_entry_t *debug_long_event(debug_info_t *id, int level, + unsigned long tag) +{ + unsigned long t = tag; + + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; + return debug_event_common(id, level, &t, sizeof(unsigned long)); +} + +/** + * debug_text_event() - writes string debug entry in ascii format to active + * debug area (if level <= actual debug level) + * + * @id: handle for debug log + * @level: debug level + * @txt: string for debug entry + * + * Return: + * - Address of written debug entry + * - %NULL if error + */ +static inline debug_entry_t *debug_text_event(debug_info_t *id, int level, + const char *txt) +{ + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; + return debug_event_common(id, level, txt, strlen(txt)); +} + +/* + * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are + * stored in the s390dbf. See Documentation/arch/s390/s390dbf.rst for more details! + */ +extern debug_entry_t * +__debug_sprintf_event(debug_info_t *id, int level, char *string, ...) + __attribute__ ((format(printf, 3, 4))); + +/** + * debug_sprintf_event() - writes debug entry with format string + * and varargs (longs) to active debug area + * (if level $<=$ actual debug level). + * + * @_id: handle for debug log + * @_level: debug level + * @_fmt: format string for debug entry + * @...: varargs used as in sprintf() + * + * Return: + * - Address of written debug entry + * - %NULL if error + * + * floats and long long datatypes cannot be used as varargs. + */ +#define debug_sprintf_event(_id, _level, _fmt, ...) \ +({ \ + debug_entry_t *__ret; \ + debug_info_t *__id = _id; \ + int __level = _level; \ + \ + if ((!__id) || (__level > __id->level)) \ + __ret = NULL; \ + else \ + __ret = __debug_sprintf_event(__id, __level, \ + _fmt, ## __VA_ARGS__); \ + __ret; \ +}) + +/** + * debug_exception() - writes binary debug entry to active debug area + * (if level <= actual debug level) + * and switches to next debug area + * + * @id: handle for debug log + * @level: debug level + * @data: pointer to data for debug entry + * @length: length of data in bytes + * + * Return: + * - Address of written debug entry + * - %NULL if error + */ +static inline debug_entry_t *debug_exception(debug_info_t *id, int level, + void *data, int length) +{ + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; + return debug_exception_common(id, level, data, length); +} + +/** + * debug_int_exception() - writes unsigned int debug entry to active debug area + * (if level <= actual debug level) + * and switches to next debug area + * + * @id: handle for debug log + * @level: debug level + * @tag: integer value for debug entry + * + * Return: + * - Address of written debug entry + * - %NULL if error + */ +static inline debug_entry_t *debug_int_exception(debug_info_t *id, int level, + unsigned int tag) +{ + unsigned int t = tag; + + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; + return debug_exception_common(id, level, &t, sizeof(unsigned int)); +} + +/** + * debug_long_exception() - writes long debug entry to active debug area + * (if level <= actual debug level) + * and switches to next debug area + * + * @id: handle for debug log + * @level: debug level + * @tag: long integer value for debug entry + * + * Return: + * - Address of written debug entry + * - %NULL if error + */ +static inline debug_entry_t *debug_long_exception (debug_info_t *id, int level, + unsigned long tag) +{ + unsigned long t = tag; + + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; + return debug_exception_common(id, level, &t, sizeof(unsigned long)); +} + +/** + * debug_text_exception() - writes string debug entry in ascii format to active + * debug area (if level <= actual debug level) + * and switches to next debug area + * area + * + * @id: handle for debug log + * @level: debug level + * @txt: string for debug entry + * + * Return: + * - Address of written debug entry + * - %NULL if error + */ +static inline debug_entry_t *debug_text_exception(debug_info_t *id, int level, + const char *txt) +{ + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; + return debug_exception_common(id, level, txt, strlen(txt)); +} + +/* + * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are + * stored in the s390dbf. See Documentation/arch/s390/s390dbf.rst for more details! + */ +extern debug_entry_t * +__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...) + __attribute__ ((format(printf, 3, 4))); + + +/** + * debug_sprintf_exception() - writes debug entry with format string and + * varargs (longs) to active debug area + * (if level <= actual debug level) + * and switches to next debug area. + * + * @_id: handle for debug log + * @_level: debug level + * @_fmt: format string for debug entry + * @...: varargs used as in sprintf() + * + * Return: + * - Address of written debug entry + * - %NULL if error + * + * floats and long long datatypes cannot be used as varargs. + */ +#define debug_sprintf_exception(_id, _level, _fmt, ...) \ +({ \ + debug_entry_t *__ret; \ + debug_info_t *__id = _id; \ + int __level = _level; \ + \ + if ((!__id) || (__level > __id->level)) \ + __ret = NULL; \ + else \ + __ret = __debug_sprintf_exception(__id, __level, \ + _fmt, ## __VA_ARGS__);\ + __ret; \ +}) + +int debug_register_view(debug_info_t *id, struct debug_view *view); + +int debug_unregister_view(debug_info_t *id, struct debug_view *view); + +#ifndef MODULE + +/* + * Note: Initial page and area numbers must be fixed to allow static + * initialization. This enables very early tracing. Changes to these values + * must be reflected in __DEFINE_STATIC_AREA. + */ +#define EARLY_PAGES 8 +#define EARLY_AREAS 1 + +#define VNAME(var, suffix) __##var##_##suffix + +/* + * Define static areas for early trace data. During boot debug_register_static() + * will replace these with dynamically allocated areas to allow custom page and + * area sizes, and dynamic resizing. + */ +#define __DEFINE_STATIC_AREA(var) \ +static char VNAME(var, data)[EARLY_PAGES][PAGE_SIZE] __initdata; \ +static debug_entry_t *VNAME(var, pages)[EARLY_PAGES] __initdata = { \ + (debug_entry_t *)VNAME(var, data)[0], \ + (debug_entry_t *)VNAME(var, data)[1], \ + (debug_entry_t *)VNAME(var, data)[2], \ + (debug_entry_t *)VNAME(var, data)[3], \ + (debug_entry_t *)VNAME(var, data)[4], \ + (debug_entry_t *)VNAME(var, data)[5], \ + (debug_entry_t *)VNAME(var, data)[6], \ + (debug_entry_t *)VNAME(var, data)[7], \ +}; \ +static debug_entry_t **VNAME(var, areas)[EARLY_AREAS] __initdata = { \ + (debug_entry_t **)VNAME(var, pages), \ +}; \ +static int VNAME(var, active_pages)[EARLY_AREAS] __initdata; \ +static int VNAME(var, active_entries)[EARLY_AREAS] __initdata + +#define __DEBUG_INFO_INIT(var, _name, _buf_size) { \ + .next = NULL, \ + .prev = NULL, \ + .ref_count = REFCOUNT_INIT(1), \ + .lock = __SPIN_LOCK_UNLOCKED(var.lock), \ + .level = DEBUG_DEFAULT_LEVEL, \ + .nr_areas = EARLY_AREAS, \ + .pages_per_area = EARLY_PAGES, \ + .buf_size = (_buf_size), \ + .entry_size = sizeof(debug_entry_t) + (_buf_size), \ + .areas = VNAME(var, areas), \ + .active_area = 0, \ + .active_pages = VNAME(var, active_pages), \ + .active_entries = VNAME(var, active_entries), \ + .debugfs_root_entry = NULL, \ + .debugfs_entries = { NULL }, \ + .views = { NULL }, \ + .name = (_name), \ + .mode = 0600, \ +} + +#define __REGISTER_STATIC_DEBUG_INFO(var, name, pages, areas, view) \ +static int __init VNAME(var, reg)(void) \ +{ \ + debug_register_static(&var, (pages), (areas)); \ + debug_register_view(&var, (view)); \ + return 0; \ +} \ +arch_initcall(VNAME(var, reg)) + +/** + * DEFINE_STATIC_DEBUG_INFO - Define static debug_info_t + * + * @var: Name of debug_info_t variable + * @name: Name of debug log (e.g. used for debugfs entry) + * @pages: Number of pages per area + * @nr_areas: Number of debug areas + * @buf_size: Size of data area in each debug entry + * @view: Pointer to debug view struct + * + * Define a static debug_info_t for early tracing. The associated debugfs log + * is automatically registered with the specified debug view. + * + * Important: Users of this macro must not call any of the + * debug_register/_unregister() functions for this debug_info_t! + * + * Note: Tracing will start with a fixed number of initial pages and areas. + * The debug area will be changed to use the specified numbers during + * arch_initcall. + */ +#define DEFINE_STATIC_DEBUG_INFO(var, name, pages, nr_areas, buf_size, view) \ +__DEFINE_STATIC_AREA(var); \ +static debug_info_t __refdata var = \ + __DEBUG_INFO_INIT(var, (name), (buf_size)); \ +__REGISTER_STATIC_DEBUG_INFO(var, name, pages, nr_areas, view) + +void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas); + +#endif /* MODULE */ + +#endif /* _ASM_S390_DEBUG_H */ diff --git a/arch/s390/include/asm/delay.h b/arch/s390/include/asm/delay.h new file mode 100644 index 000000000..21a8fe18f --- /dev/null +++ b/arch/s390/include/asm/delay.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * Derived from "include/asm-i386/delay.h" + * Copyright (C) 1993 Linus Torvalds + * + * Delay routines calling functions in arch/s390/lib/delay.c + */ + +#ifndef _S390_DELAY_H +#define _S390_DELAY_H + +void __ndelay(unsigned long nsecs); +void __udelay(unsigned long usecs); +void __delay(unsigned long loops); + +#define ndelay(n) __ndelay((unsigned long)(n)) +#define udelay(n) __udelay((unsigned long)(n)) +#define mdelay(n) __udelay((unsigned long)(n) * 1000) + +#endif /* defined(_S390_DELAY_H) */ diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h new file mode 100644 index 000000000..bed804137 --- /dev/null +++ b/arch/s390/include/asm/diag.h @@ -0,0 +1,351 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * s390 diagnose functions + * + * Copyright IBM Corp. 2007 + * Author(s): Michael Holzheu + */ + +#ifndef _ASM_S390_DIAG_H +#define _ASM_S390_DIAG_H + +#include +#include +#include +#include + +enum diag_stat_enum { + DIAG_STAT_X008, + DIAG_STAT_X00C, + DIAG_STAT_X010, + DIAG_STAT_X014, + DIAG_STAT_X044, + DIAG_STAT_X064, + DIAG_STAT_X08C, + DIAG_STAT_X09C, + DIAG_STAT_X0DC, + DIAG_STAT_X204, + DIAG_STAT_X210, + DIAG_STAT_X224, + DIAG_STAT_X250, + DIAG_STAT_X258, + DIAG_STAT_X26C, + DIAG_STAT_X288, + DIAG_STAT_X2C4, + DIAG_STAT_X2FC, + DIAG_STAT_X304, + DIAG_STAT_X308, + DIAG_STAT_X318, + DIAG_STAT_X320, + DIAG_STAT_X500, + NR_DIAG_STAT +}; + +void diag_stat_inc(enum diag_stat_enum nr); +void diag_stat_inc_norecursion(enum diag_stat_enum nr); + +/* + * Diagnose 10: Release page range + */ +static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) +{ + unsigned long start_addr, end_addr; + + start_addr = pfn_to_phys(start_pfn); + end_addr = pfn_to_phys(start_pfn + num_pfn - 1); + + diag_stat_inc(DIAG_STAT_X010); + asm volatile( + "0: diag %0,%1,0x10\n" + "1: nopr %%r7\n" + EX_TABLE(0b, 1b) + EX_TABLE(1b, 1b) + : : "a" (start_addr), "a" (end_addr)); +} + +/* + * Diagnose 14: Input spool file manipulation + */ +extern int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode); + +/* + * Diagnose 210: Get information about a virtual device + */ +struct diag210 { + u16 vrdcdvno; /* device number (input) */ + u16 vrdclen; /* data block length (input) */ + u8 vrdcvcla; /* virtual device class (output) */ + u8 vrdcvtyp; /* virtual device type (output) */ + u8 vrdcvsta; /* virtual device status (output) */ + u8 vrdcvfla; /* virtual device flags (output) */ + u8 vrdcrccl; /* real device class (output) */ + u8 vrdccrty; /* real device type (output) */ + u8 vrdccrmd; /* real device model (output) */ + u8 vrdccrft; /* real device feature (output) */ +} __packed __aligned(4); + +extern int diag210(struct diag210 *addr); + +struct diag8c { + u8 flags; + u8 num_partitions; + u16 width; + u16 height; + u8 data[]; +} __packed __aligned(4); + +extern int diag8c(struct diag8c *out, struct ccw_dev_id *devno); + +/* bit is set in flags, when physical cpu info is included in diag 204 data */ +#define DIAG204_LPAR_PHYS_FLG 0x80 +#define DIAG204_LPAR_NAME_LEN 8 /* lpar name len in diag 204 data */ +#define DIAG204_CPU_NAME_LEN 16 /* type name len of cpus in diag224 name table */ + +/* diag 204 subcodes */ +enum diag204_sc { + DIAG204_SUBC_STIB4 = 4, + DIAG204_SUBC_RSI = 5, + DIAG204_SUBC_STIB6 = 6, + DIAG204_SUBC_STIB7 = 7 +}; + +#define DIAG204_SUBCODE_MASK 0xffff + +/* The two available diag 204 data formats */ +enum diag204_format { + DIAG204_INFO_SIMPLE = 0, + DIAG204_INFO_EXT = 0x00010000 +}; + +enum diag204_cpu_flags { + DIAG204_CPU_ONLINE = 0x20, + DIAG204_CPU_CAPPED = 0x40, +}; + +struct diag204_info_blk_hdr { + __u8 npar; + __u8 flags; + __u16 tslice; + __u16 phys_cpus; + __u16 this_part; + __u64 curtod; +} __packed; + +struct diag204_x_info_blk_hdr { + __u8 npar; + __u8 flags; + __u16 tslice; + __u16 phys_cpus; + __u16 this_part; + __u64 curtod1; + __u64 curtod2; + char reserved[40]; +} __packed; + +struct diag204_part_hdr { + __u8 pn; + __u8 cpus; + char reserved[6]; + char part_name[DIAG204_LPAR_NAME_LEN]; +} __packed; + +struct diag204_x_part_hdr { + __u8 pn; + __u8 cpus; + __u8 rcpus; + __u8 pflag; + __u32 mlu; + char part_name[DIAG204_LPAR_NAME_LEN]; + char lpc_name[8]; + char os_name[8]; + __u64 online_cs; + __u64 online_es; + __u8 upid; + __u8 reserved:3; + __u8 mtid:5; + char reserved1[2]; + __u32 group_mlu; + char group_name[8]; + char hardware_group_name[8]; + char reserved2[24]; +} __packed; + +struct diag204_cpu_info { + __u16 cpu_addr; + char reserved1[2]; + __u8 ctidx; + __u8 cflag; + __u16 weight; + __u64 acc_time; + __u64 lp_time; +} __packed; + +struct diag204_x_cpu_info { + __u16 cpu_addr; + char reserved1[2]; + __u8 ctidx; + __u8 cflag; + __u16 weight; + __u64 acc_time; + __u64 lp_time; + __u16 min_weight; + __u16 cur_weight; + __u16 max_weight; + char reseved2[2]; + __u64 online_time; + __u64 wait_time; + __u32 pma_weight; + __u32 polar_weight; + __u32 cpu_type_cap; + __u32 group_cpu_type_cap; + char reserved3[32]; +} __packed; + +struct diag204_phys_hdr { + char reserved1[1]; + __u8 cpus; + char reserved2[6]; + char mgm_name[8]; +} __packed; + +struct diag204_x_phys_hdr { + char reserved1[1]; + __u8 cpus; + char reserved2[6]; + char mgm_name[8]; + char reserved3[80]; +} __packed; + +struct diag204_phys_cpu { + __u16 cpu_addr; + char reserved1[2]; + __u8 ctidx; + char reserved2[3]; + __u64 mgm_time; + char reserved3[8]; +} __packed; + +struct diag204_x_phys_cpu { + __u16 cpu_addr; + char reserved1[2]; + __u8 ctidx; + char reserved2[1]; + __u16 weight; + __u64 mgm_time; + char reserved3[80]; +} __packed; + +struct diag204_x_part_block { + struct diag204_x_part_hdr hdr; + struct diag204_x_cpu_info cpus[]; +} __packed; + +struct diag204_x_phys_block { + struct diag204_x_phys_hdr hdr; + struct diag204_x_phys_cpu cpus[]; +} __packed; + +enum diag26c_sc { + DIAG26C_PORT_VNIC = 0x00000024, + DIAG26C_MAC_SERVICES = 0x00000030 +}; + +enum diag26c_version { + DIAG26C_VERSION2 = 0x00000002, /* z/VM 5.4.0 */ + DIAG26C_VERSION6_VM65918 = 0x00020006 /* z/VM 6.4.0 + VM65918 */ +}; + +#define DIAG26C_VNIC_INFO 0x0002 +struct diag26c_vnic_req { + u32 resp_buf_len; + u32 resp_version; + u16 req_format; + u16 vlan_id; + u64 sys_name; + u8 res[2]; + u16 devno; +} __packed __aligned(8); + +#define VNIC_INFO_PROT_L3 1 +#define VNIC_INFO_PROT_L2 2 +/* Note: this is the bare minimum, use it for uninitialized VNICs only. */ +struct diag26c_vnic_resp { + u32 version; + u32 entry_cnt; + /* VNIC info: */ + u32 next_entry; + u64 owner; + u16 devno; + u8 status; + u8 type; + u64 lan_owner; + u64 lan_name; + u64 port_name; + u8 port_type; + u8 ext_status:6; + u8 protocol:2; + u16 base_devno; + u32 port_num; + u32 ifindex; + u32 maxinfo; + u32 dev_count; + /* 3x device info: */ + u8 dev_info1[28]; + u8 dev_info2[28]; + u8 dev_info3[28]; +} __packed __aligned(8); + +#define DIAG26C_GET_MAC 0x0000 +struct diag26c_mac_req { + u32 resp_buf_len; + u32 resp_version; + u16 op_code; + u16 devno; + u8 res[4]; +}; + +struct diag26c_mac_resp { + u32 version; + u8 mac[ETH_ALEN]; + u8 res[2]; +} __aligned(8); + +#define CPNC_LINUX 0x4 +union diag318_info { + unsigned long val; + struct { + unsigned long cpnc : 8; + unsigned long cpvc : 56; + }; +}; + +int diag204(unsigned long subcode, unsigned long size, void *addr); +int diag224(void *ptr); +int diag26c(void *req, void *resp, enum diag26c_sc subcode); + +struct hypfs_diag0c_entry; + +/* + * This structure must contain only pointers/references into + * the AMODE31 text section. + */ +struct diag_ops { + int (*diag210)(struct diag210 *addr); + int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode); + int (*diag14)(unsigned long rx, unsigned long ry1, unsigned long subcode); + int (*diag8c)(struct diag8c *addr, struct ccw_dev_id *devno, size_t len); + void (*diag0c)(struct hypfs_diag0c_entry *entry); + void (*diag308_reset)(void); +}; + +extern struct diag_ops diag_amode31_ops; +extern struct diag210 *__diag210_tmp_amode31; + +int _diag210_amode31(struct diag210 *addr); +int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode); +int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode); +void _diag0c_amode31(struct hypfs_diag0c_entry *entry); +void _diag308_reset_amode31(void); +int _diag8c_amode31(struct diag8c *addr, struct ccw_dev_id *devno, size_t len); + +#endif /* _ASM_S390_DIAG_H */ diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h new file mode 100644 index 000000000..c18ed6091 --- /dev/null +++ b/arch/s390/include/asm/dis.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Disassemble s390 instructions. + * + * Copyright IBM Corp. 2007 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +#ifndef __ASM_S390_DIS_H__ +#define __ASM_S390_DIS_H__ + +#include + +static inline int insn_length(unsigned char code) +{ + return ((((int) code + 64) >> 7) + 1) << 1; +} + +struct pt_regs; + +void show_code(struct pt_regs *regs); +void print_fn_code(unsigned char *code, unsigned long len); +struct s390_insn *find_insn(unsigned char *code); + +static inline int is_known_insn(unsigned char *code) +{ + return !!find_insn(code); +} + +#endif /* __ASM_S390_DIS_H__ */ diff --git a/arch/s390/include/asm/dma.h b/arch/s390/include/asm/dma.h new file mode 100644 index 000000000..7fe3e3195 --- /dev/null +++ b/arch/s390/include/asm/dma.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_DMA_H +#define _ASM_S390_DMA_H + +#include + +/* + * MAX_DMA_ADDRESS is ambiguous because on s390 its completely unrelated + * to DMA. It _is_ used for the s390 memory zone split at 2GB caused + * by the 31 bit heritage. + */ +#define MAX_DMA_ADDRESS __va(0x80000000) + +#endif /* _ASM_S390_DMA_H */ diff --git a/arch/s390/include/asm/dwarf.h b/arch/s390/include/asm/dwarf.h new file mode 100644 index 000000000..4f21ae561 --- /dev/null +++ b/arch/s390/include/asm/dwarf.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_DWARF_H +#define _ASM_S390_DWARF_H + +#ifdef __ASSEMBLY__ + +#define CFI_STARTPROC .cfi_startproc +#define CFI_ENDPROC .cfi_endproc +#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset +#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset +#define CFI_RESTORE .cfi_restore + +#ifdef CONFIG_AS_CFI_VAL_OFFSET +#define CFI_VAL_OFFSET .cfi_val_offset +#else +#define CFI_VAL_OFFSET # +#endif + +#ifndef BUILD_VDSO + /* + * Emit CFI data in .debug_frame sections and not in .eh_frame + * sections. The .eh_frame CFI is used for runtime unwind + * information that is not being used. Hence, vmlinux.lds.S + * can discard the .eh_frame sections. + */ + .cfi_sections .debug_frame +#else + /* + * For vDSO, emit CFI data in both, .eh_frame and .debug_frame + * sections. + */ + .cfi_sections .eh_frame, .debug_frame +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_S390_DWARF_H */ diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h new file mode 100644 index 000000000..06f795855 --- /dev/null +++ b/arch/s390/include/asm/eadm.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_EADM_H +#define _ASM_S390_EADM_H + +#include +#include +#include + +struct arqb { + u64 data; + u16 fmt:4; + u16:12; + u16 cmd_code; + u16:16; + u16 msb_count; + u32 reserved[12]; +} __packed; + +#define ARQB_CMD_MOVE 1 + +struct arsb { + u16 fmt:4; + u32:28; + u8 ef; + u8:8; + u8 ecbi; + u8:8; + u8 fvf; + u16:16; + u8 eqc; + u32:32; + u64 fail_msb; + u64 fail_aidaw; + u64 fail_ms; + u64 fail_scm; + u32 reserved[4]; +} __packed; + +#define EQC_WR_PROHIBIT 22 + +struct msb { + u8 fmt:4; + u8 oc:4; + u8 flags; + u16:12; + u16 bs:4; + u32 blk_count; + u64 data_addr; + u64 scm_addr; + u64:64; +} __packed; + +struct aidaw { + u8 flags; + u32 :24; + u32 :32; + u64 data_addr; +} __packed; + +#define MSB_OC_CLEAR 0 +#define MSB_OC_READ 1 +#define MSB_OC_WRITE 2 +#define MSB_OC_RELEASE 3 + +#define MSB_FLAG_BNM 0x80 +#define MSB_FLAG_IDA 0x40 + +#define MSB_BS_4K 0 +#define MSB_BS_1M 1 + +#define AOB_NR_MSB 124 + +struct aob { + struct arqb request; + struct arsb response; + struct msb msb[AOB_NR_MSB]; +} __packed __aligned(PAGE_SIZE); + +struct aob_rq_header { + struct scm_device *scmdev; + char data[]; +}; + +struct scm_device { + u64 address; + u64 size; + unsigned int nr_max_block; + struct device dev; + struct { + unsigned int persistence:4; + unsigned int oper_state:4; + unsigned int data_state:4; + unsigned int rank:4; + unsigned int release:1; + unsigned int res_id:8; + } __packed attrs; +}; + +#define OP_STATE_GOOD 1 +#define OP_STATE_TEMP_ERR 2 +#define OP_STATE_PERM_ERR 3 + +enum scm_event {SCM_CHANGE, SCM_AVAIL}; + +struct scm_driver { + struct device_driver drv; + int (*probe) (struct scm_device *scmdev); + void (*remove) (struct scm_device *scmdev); + void (*notify) (struct scm_device *scmdev, enum scm_event event); + void (*handler) (struct scm_device *scmdev, void *data, + blk_status_t error); +}; + +int scm_driver_register(struct scm_driver *scmdrv); +void scm_driver_unregister(struct scm_driver *scmdrv); + +int eadm_start_aob(struct aob *aob); +void scm_irq_handler(struct aob *aob, blk_status_t error); + +#endif /* _ASM_S390_EADM_H */ diff --git a/arch/s390/include/asm/ebcdic.h b/arch/s390/include/asm/ebcdic.h new file mode 100644 index 000000000..efb50fc68 --- /dev/null +++ b/arch/s390/include/asm/ebcdic.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * EBCDIC -> ASCII, ASCII -> EBCDIC conversion routines. + * + * S390 version + * Copyright IBM Corp. 1999 + * Author(s): Martin Schwidefsky + */ + +#ifndef _EBCDIC_H +#define _EBCDIC_H + +#include + +extern __u8 _ascebc_500[256]; /* ASCII -> EBCDIC 500 conversion table */ +extern __u8 _ebcasc_500[256]; /* EBCDIC 500 -> ASCII conversion table */ +extern __u8 _ascebc[256]; /* ASCII -> EBCDIC conversion table */ +extern __u8 _ebcasc[256]; /* EBCDIC -> ASCII conversion table */ +extern __u8 _ebc_tolower[256]; /* EBCDIC -> lowercase */ +extern __u8 _ebc_toupper[256]; /* EBCDIC -> uppercase */ + +static inline void +codepage_convert(const __u8 *codepage, volatile char *addr, unsigned long nr) +{ + if (nr-- <= 0) + return; + asm volatile( + " bras 1,1f\n" + " tr 0(1,%0),0(%2)\n" + "0: tr 0(256,%0),0(%2)\n" + " la %0,256(%0)\n" + "1: ahi %1,-256\n" + " jnm 0b\n" + " ex %1,0(1)" + : "+&a" (addr), "+&a" (nr) + : "a" (codepage) : "cc", "memory", "1"); +} + +#define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr) +#define EBCASC(addr,nr) codepage_convert(_ebcasc, addr, nr) +#define ASCEBC_500(addr,nr) codepage_convert(_ascebc_500, addr, nr) +#define EBCASC_500(addr,nr) codepage_convert(_ebcasc_500, addr, nr) +#define EBC_TOLOWER(addr,nr) codepage_convert(_ebc_tolower, addr, nr) +#define EBC_TOUPPER(addr,nr) codepage_convert(_ebc_toupper, addr, nr) + +#endif + diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h new file mode 100644 index 000000000..70a30ae25 --- /dev/null +++ b/arch/s390/include/asm/elf.h @@ -0,0 +1,302 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * + * Derived from "include/asm-i386/elf.h" + */ + +#ifndef __ASMS390_ELF_H +#define __ASMS390_ELF_H + +/* s390 relocations defined by the ABIs */ +#define R_390_NONE 0 /* No reloc. */ +#define R_390_8 1 /* Direct 8 bit. */ +#define R_390_12 2 /* Direct 12 bit. */ +#define R_390_16 3 /* Direct 16 bit. */ +#define R_390_32 4 /* Direct 32 bit. */ +#define R_390_PC32 5 /* PC relative 32 bit. */ +#define R_390_GOT12 6 /* 12 bit GOT offset. */ +#define R_390_GOT32 7 /* 32 bit GOT offset. */ +#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */ +#define R_390_COPY 9 /* Copy symbol at runtime. */ +#define R_390_GLOB_DAT 10 /* Create GOT entry. */ +#define R_390_JMP_SLOT 11 /* Create PLT entry. */ +#define R_390_RELATIVE 12 /* Adjust by program base. */ +#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */ +#define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */ +#define R_390_GOT16 15 /* 16 bit GOT offset. */ +#define R_390_PC16 16 /* PC relative 16 bit. */ +#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */ +#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */ +#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */ +#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */ +#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */ +#define R_390_64 22 /* Direct 64 bit. */ +#define R_390_PC64 23 /* PC relative 64 bit. */ +#define R_390_GOT64 24 /* 64 bit GOT offset. */ +#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */ +#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */ +#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */ +#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */ +#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */ +#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */ +#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */ +#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */ +#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */ +#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */ +#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */ +#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */ +#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */ +#define R_390_TLS_GDCALL 38 /* Tag for function call in general + dynamic TLS code. */ +#define R_390_TLS_LDCALL 39 /* Tag for function call in local + dynamic TLS code. */ +#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic + thread local data. */ +#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic + thread local data. */ +#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS + block offset. */ +#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS + block offset. */ +#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS + block offset. */ +#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic + thread local data in LD code. */ +#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic + thread local data in LD code. */ +#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for + negated static TLS block offset. */ +#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for + negated static TLS block offset. */ +#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for + negated static TLS block offset. */ +#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to + static TLS block. */ +#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to + static TLS block. */ +#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS + block. */ +#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS + block. */ +#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */ +#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */ +#define R_390_TLS_TPOFF 56 /* Negate offset in static TLS + block. */ +#define R_390_20 57 /* Direct 20 bit. */ +#define R_390_GOT20 58 /* 20 bit GOT offset. */ +#define R_390_GOTPLT20 59 /* 20 bit offset to jump slot. */ +#define R_390_TLS_GOTIE20 60 /* 20 bit GOT offset for static TLS + block offset. */ +/* Keep this the last entry. */ +#define R_390_NUM 61 + +enum { + HWCAP_NR_ESAN3 = 0, + HWCAP_NR_ZARCH = 1, + HWCAP_NR_STFLE = 2, + HWCAP_NR_MSA = 3, + HWCAP_NR_LDISP = 4, + HWCAP_NR_EIMM = 5, + HWCAP_NR_DFP = 6, + HWCAP_NR_HPAGE = 7, + HWCAP_NR_ETF3EH = 8, + HWCAP_NR_HIGH_GPRS = 9, + HWCAP_NR_TE = 10, + HWCAP_NR_VXRS = 11, + HWCAP_NR_VXRS_BCD = 12, + HWCAP_NR_VXRS_EXT = 13, + HWCAP_NR_GS = 14, + HWCAP_NR_VXRS_EXT2 = 15, + HWCAP_NR_VXRS_PDE = 16, + HWCAP_NR_SORT = 17, + HWCAP_NR_DFLT = 18, + HWCAP_NR_VXRS_PDE2 = 19, + HWCAP_NR_NNPA = 20, + HWCAP_NR_PCI_MIO = 21, + HWCAP_NR_SIE = 22, + HWCAP_NR_MAX +}; + +/* Bits present in AT_HWCAP. */ +#define HWCAP_ESAN3 BIT(HWCAP_NR_ESAN3) +#define HWCAP_ZARCH BIT(HWCAP_NR_ZARCH) +#define HWCAP_STFLE BIT(HWCAP_NR_STFLE) +#define HWCAP_MSA BIT(HWCAP_NR_MSA) +#define HWCAP_LDISP BIT(HWCAP_NR_LDISP) +#define HWCAP_EIMM BIT(HWCAP_NR_EIMM) +#define HWCAP_DFP BIT(HWCAP_NR_DFP) +#define HWCAP_HPAGE BIT(HWCAP_NR_HPAGE) +#define HWCAP_ETF3EH BIT(HWCAP_NR_ETF3EH) +#define HWCAP_HIGH_GPRS BIT(HWCAP_NR_HIGH_GPRS) +#define HWCAP_TE BIT(HWCAP_NR_TE) +#define HWCAP_VXRS BIT(HWCAP_NR_VXRS) +#define HWCAP_VXRS_BCD BIT(HWCAP_NR_VXRS_BCD) +#define HWCAP_VXRS_EXT BIT(HWCAP_NR_VXRS_EXT) +#define HWCAP_GS BIT(HWCAP_NR_GS) +#define HWCAP_VXRS_EXT2 BIT(HWCAP_NR_VXRS_EXT2) +#define HWCAP_VXRS_PDE BIT(HWCAP_NR_VXRS_PDE) +#define HWCAP_SORT BIT(HWCAP_NR_SORT) +#define HWCAP_DFLT BIT(HWCAP_NR_DFLT) +#define HWCAP_VXRS_PDE2 BIT(HWCAP_NR_VXRS_PDE2) +#define HWCAP_NNPA BIT(HWCAP_NR_NNPA) +#define HWCAP_PCI_MIO BIT(HWCAP_NR_PCI_MIO) +#define HWCAP_SIE BIT(HWCAP_NR_SIE) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS64 +#define ELF_DATA ELFDATA2MSB +#define ELF_ARCH EM_S390 + +/* s390 specific phdr types */ +#define PT_S390_PGSTE 0x70000000 + +/* + * ELF register definitions.. + */ + +#include + +#include +#include +#include + +typedef s390_fp_regs elf_fpregset_t; +typedef s390_regs elf_gregset_t; + +typedef s390_fp_regs compat_elf_fpregset_t; +typedef s390_compat_regs compat_elf_gregset_t; + +#include /* for task_struct */ +#include + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) \ + (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ + && (x)->e_ident[EI_CLASS] == ELF_CLASS) +#define compat_elf_check_arch(x) \ + (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ + && (x)->e_ident[EI_CLASS] == ELF_CLASS) +#define compat_start_thread start_thread31 + +struct arch_elf_state { + int rc; +}; + +#define INIT_ARCH_ELF_STATE { .rc = 0 } + +#define arch_check_elf(ehdr, interp, interp_ehdr, state) (0) +#ifdef CONFIG_PGSTE +#define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \ +({ \ + struct arch_elf_state *_state = state; \ + if ((phdr)->p_type == PT_S390_PGSTE && \ + !page_table_allocate_pgste && \ + !test_thread_flag(TIF_PGSTE) && \ + !current->mm->context.alloc_pgste) { \ + set_thread_flag(TIF_PGSTE); \ + set_pt_regs_flag(task_pt_regs(current), \ + PIF_EXECVE_PGSTE_RESTART); \ + _state->rc = -EAGAIN; \ + } \ + _state->rc; \ +}) +#else +#define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \ +({ \ + (state)->rc; \ +}) +#endif + +/* For SVR4/S390 the function pointer to be registered with `atexit` is + passed in R14. */ +#define ELF_PLAT_INIT(_r, load_addr) \ + do { \ + _r->gprs[14] = 0; \ + } while (0) + +#define CORE_DUMP_USE_REGSET +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. 64-bit + tasks are aligned to 4GB. */ +#define ELF_ET_DYN_BASE (is_compat_task() ? \ + (STACK_TOP / 3 * 2) : \ + (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) + +/* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. */ + +extern unsigned long elf_hwcap; +#define ELF_HWCAP (elf_hwcap) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. + + For the moment, we have only optimizations for the Intel generations, + but that could change... */ + +#define ELF_PLATFORM_SIZE 8 +extern char elf_platform[]; +#define ELF_PLATFORM (elf_platform) + +#ifndef CONFIG_COMPAT +#define SET_PERSONALITY(ex) \ +do { \ + set_personality(PER_LINUX | \ + (current->personality & (~PER_MASK))); \ + current->thread.sys_call_table = sys_call_table; \ +} while (0) +#else /* CONFIG_COMPAT */ +#define SET_PERSONALITY(ex) \ +do { \ + if (personality(current->personality) != PER_LINUX32) \ + set_personality(PER_LINUX | \ + (current->personality & ~PER_MASK)); \ + if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ + set_thread_flag(TIF_31BIT); \ + current->thread.sys_call_table = \ + sys_call_table_emu; \ + } else { \ + clear_thread_flag(TIF_31BIT); \ + current->thread.sys_call_table = \ + sys_call_table; \ + } \ +} while (0) +#endif /* CONFIG_COMPAT */ + +/* + * Cache aliasing on the latest machines calls for a mapping granularity + * of 512KB for the anonymous mapping base. For 64-bit processes use a + * 512KB alignment and a randomization of up to 1GB. For 31-bit processes + * the virtual address space is limited, use no alignment and limit the + * randomization to 8MB. + * For the additional randomization of the program break use 32MB for + * 64-bit and 8MB for 31-bit. + */ +#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL) +#define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) +#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) +#define STACK_RND_MASK MMAP_RND_MASK + +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ +#define ARCH_DLINFO \ +do { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (unsigned long)current->mm->context.vdso_base); \ +} while (0) + +struct linux_binprm; + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +int arch_setup_additional_pages(struct linux_binprm *, int); + +#endif diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h new file mode 100644 index 000000000..fdd319a62 --- /dev/null +++ b/arch/s390/include/asm/entry-common.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_S390_ENTRY_COMMON_H +#define ARCH_S390_ENTRY_COMMON_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP) + +void do_per_trap(struct pt_regs *regs); + +static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) +{ + if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) + debug_user_asce(0); + + pai_kernel_enter(regs); +} + +#define arch_enter_from_user_mode arch_enter_from_user_mode + +static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs, + unsigned long ti_work) +{ + if (ti_work & _TIF_PER_TRAP) { + clear_thread_flag(TIF_PER_TRAP); + do_per_trap(regs); + } + + if (ti_work & _TIF_GUARDED_STORAGE) + gs_load_bc_cb(regs); +} + +#define arch_exit_to_user_mode_work arch_exit_to_user_mode_work + +static __always_inline void arch_exit_to_user_mode(void) +{ + if (test_cpu_flag(CIF_FPU)) + __load_fpu_regs(); + + if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) + debug_user_asce(1); + + pai_kernel_exit(current_pt_regs()); +} + +#define arch_exit_to_user_mode arch_exit_to_user_mode + +static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + unsigned long ti_work) +{ + choose_random_kstack_offset(get_tod_clock_fast() & 0xff); +} + +#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare + +#endif diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h new file mode 100644 index 000000000..641bfbec9 --- /dev/null +++ b/arch/s390/include/asm/exec.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 1999, 2009 + * + * Author(s): Martin Schwidefsky + */ + +#ifndef __ASM_EXEC_H +#define __ASM_EXEC_H + +extern unsigned long arch_align_stack(unsigned long sp); + +#endif /* __ASM_EXEC_H */ diff --git a/arch/s390/include/asm/extable.h b/arch/s390/include/asm/extable.h new file mode 100644 index 000000000..af6ba5274 --- /dev/null +++ b/arch/s390/include/asm/extable.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __S390_EXTABLE_H +#define __S390_EXTABLE_H + +#include +#include + +/* + * The exception table consists of three addresses: + * + * - Address of an instruction that is allowed to fault. + * - Address at which the program should continue. + * - Optional address of handler that takes pt_regs * argument and runs in + * interrupt context. + * + * No registers are modified, so it is entirely up to the continuation code + * to figure out what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + int insn, fixup; + short type, data; +}; + +extern struct exception_table_entry *__start_amode31_ex_table; +extern struct exception_table_entry *__stop_amode31_ex_table; + +const struct exception_table_entry *s390_search_extables(unsigned long addr); + +static inline unsigned long extable_fixup(const struct exception_table_entry *x) +{ + return (unsigned long)&x->fixup + x->fixup; +} + +#define ARCH_HAS_RELATIVE_EXTABLE + +static inline void swap_ex_entry_fixup(struct exception_table_entry *a, + struct exception_table_entry *b, + struct exception_table_entry tmp, + int delta) +{ + a->fixup = b->fixup + delta; + b->fixup = tmp.fixup - delta; + a->type = b->type; + b->type = tmp.type; + a->data = b->data; + b->data = tmp.data; +} +#define swap_ex_entry_fixup swap_ex_entry_fixup + +#ifdef CONFIG_BPF_JIT + +bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs); + +#else /* !CONFIG_BPF_JIT */ + +static inline bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs) +{ + return false; +} + +#endif /* CONFIG_BPF_JIT */ + +bool fixup_exception(struct pt_regs *regs); + +#endif diff --git a/arch/s390/include/asm/extmem.h b/arch/s390/include/asm/extmem.h new file mode 100644 index 000000000..568fd81bb --- /dev/null +++ b/arch/s390/include/asm/extmem.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * definitions for external memory segment support + * Copyright IBM Corp. 2003 + */ + +#ifndef _ASM_S390X_DCSS_H +#define _ASM_S390X_DCSS_H +#ifndef __ASSEMBLY__ + +/* possible values for segment type as returned by segment_info */ +#define SEG_TYPE_SW 0 +#define SEG_TYPE_EW 1 +#define SEG_TYPE_SR 2 +#define SEG_TYPE_ER 3 +#define SEG_TYPE_SN 4 +#define SEG_TYPE_EN 5 +#define SEG_TYPE_SC 6 +#define SEG_TYPE_EWEN 7 + +#define SEGMENT_SHARED 0 +#define SEGMENT_EXCLUSIVE 1 + +int segment_load (char *name, int segtype, unsigned long *addr, unsigned long *length); +void segment_unload(char *name); +void segment_save(char *name); +int segment_type (char* name); +int segment_modify_shared (char *name, int do_nonshared); +void segment_warning(int rc, char *seg_name); + +#endif +#endif diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h new file mode 100644 index 000000000..94b691902 --- /dev/null +++ b/arch/s390/include/asm/facility.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 1999, 2009 + * + * Author(s): Martin Schwidefsky + */ + +#ifndef __ASM_FACILITY_H +#define __ASM_FACILITY_H + +#include + +#include +#include +#include +#include + +#include + +#define MAX_FACILITY_BIT (sizeof(stfle_fac_list) * 8) + +extern u64 stfle_fac_list[16]; +extern u64 alt_stfle_fac_list[16]; + +static inline void __set_facility(unsigned long nr, void *facilities) +{ + unsigned char *ptr = (unsigned char *) facilities; + + if (nr >= MAX_FACILITY_BIT) + return; + ptr[nr >> 3] |= 0x80 >> (nr & 7); +} + +static inline void __clear_facility(unsigned long nr, void *facilities) +{ + unsigned char *ptr = (unsigned char *) facilities; + + if (nr >= MAX_FACILITY_BIT) + return; + ptr[nr >> 3] &= ~(0x80 >> (nr & 7)); +} + +static inline int __test_facility(unsigned long nr, void *facilities) +{ + unsigned char *ptr; + + if (nr >= MAX_FACILITY_BIT) + return 0; + ptr = (unsigned char *) facilities + (nr >> 3); + return (*ptr & (0x80 >> (nr & 7))) != 0; +} + +/* + * The test_facility function uses the bit ordering where the MSB is bit 0. + * That makes it easier to query facility bits with the bit number as + * documented in the Principles of Operation. + */ +static inline int test_facility(unsigned long nr) +{ + unsigned long facilities_als[] = { FACILITIES_ALS }; + + if (__builtin_constant_p(nr) && nr < sizeof(facilities_als) * 8) { + if (__test_facility(nr, &facilities_als)) + return 1; + } + return __test_facility(nr, &stfle_fac_list); +} + +static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size) +{ + unsigned long reg0 = size - 1; + + asm volatile( + " lgr 0,%[reg0]\n" + " .insn s,0xb2b00000,%[list]\n" /* stfle */ + " lgr %[reg0],0\n" + : [reg0] "+&d" (reg0), [list] "+Q" (*stfle_fac_list) + : + : "memory", "cc", "0"); + return reg0; +} + +/** + * stfle - Store facility list extended + * @stfle_fac_list: array where facility list can be stored + * @size: size of passed in array in double words + */ +static inline void __stfle(u64 *stfle_fac_list, int size) +{ + unsigned long nr; + u32 stfl_fac_list; + + asm volatile( + " stfl 0(0)\n" + : "=m" (S390_lowcore.stfl_fac_list)); + stfl_fac_list = S390_lowcore.stfl_fac_list; + memcpy(stfle_fac_list, &stfl_fac_list, 4); + nr = 4; /* bytes stored by stfl */ + if (stfl_fac_list & 0x01000000) { + /* More facility bits available with stfle */ + nr = __stfle_asm(stfle_fac_list, size); + nr = min_t(unsigned long, (nr + 1) * 8, size * 8); + } + memset((char *) stfle_fac_list + nr, 0, size * 8 - nr); +} + +static inline void stfle(u64 *stfle_fac_list, int size) +{ + preempt_disable(); + __stfle(stfle_fac_list, size); + preempt_enable(); +} + +#endif /* __ASM_FACILITY_H */ diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h new file mode 100644 index 000000000..29784b4b4 --- /dev/null +++ b/arch/s390/include/asm/fcx.h @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Functions for assembling fcx enabled I/O control blocks. + * + * Copyright IBM Corp. 2008 + * Author(s): Peter Oberparleiter + */ + +#ifndef _ASM_S390_FCX_H +#define _ASM_S390_FCX_H + +#include + +#define TCW_FORMAT_DEFAULT 0 +#define TCW_TIDAW_FORMAT_DEFAULT 0 +#define TCW_FLAGS_INPUT_TIDA (1 << (23 - 5)) +#define TCW_FLAGS_TCCB_TIDA (1 << (23 - 6)) +#define TCW_FLAGS_OUTPUT_TIDA (1 << (23 - 7)) +#define TCW_FLAGS_TIDAW_FORMAT(x) ((x) & 3) << (23 - 9) +#define TCW_FLAGS_GET_TIDAW_FORMAT(x) (((x) >> (23 - 9)) & 3) + +/** + * struct tcw - Transport Control Word (TCW) + * @format: TCW format + * @flags: TCW flags + * @tccbl: Transport-Command-Control-Block Length + * @r: Read Operations + * @w: Write Operations + * @output: Output-Data Address + * @input: Input-Data Address + * @tsb: Transport-Status-Block Address + * @tccb: Transport-Command-Control-Block Address + * @output_count: Output Count + * @input_count: Input Count + * @intrg: Interrogate TCW Address + */ +struct tcw { + u32 format:2; + u32 :6; + u32 flags:24; + u32 :8; + u32 tccbl:6; + u32 r:1; + u32 w:1; + u32 :16; + u64 output; + u64 input; + u64 tsb; + u64 tccb; + u32 output_count; + u32 input_count; + u32 :32; + u32 :32; + u32 :32; + u32 intrg; +} __attribute__ ((packed, aligned(64))); + +#define TIDAW_FLAGS_LAST (1 << (7 - 0)) +#define TIDAW_FLAGS_SKIP (1 << (7 - 1)) +#define TIDAW_FLAGS_DATA_INT (1 << (7 - 2)) +#define TIDAW_FLAGS_TTIC (1 << (7 - 3)) +#define TIDAW_FLAGS_INSERT_CBC (1 << (7 - 4)) + +/** + * struct tidaw - Transport-Indirect-Addressing Word (TIDAW) + * @flags: TIDAW flags. Can be an arithmetic OR of the following constants: + * %TIDAW_FLAGS_LAST, %TIDAW_FLAGS_SKIP, %TIDAW_FLAGS_DATA_INT, + * %TIDAW_FLAGS_TTIC, %TIDAW_FLAGS_INSERT_CBC + * @count: Count + * @addr: Address + */ +struct tidaw { + u32 flags:8; + u32 :24; + u32 count; + u64 addr; +} __attribute__ ((packed, aligned(16))); + +/** + * struct tsa_iostat - I/O-Status Transport-Status Area (IO-Stat TSA) + * @dev_time: Device Time + * @def_time: Defer Time + * @queue_time: Queue Time + * @dev_busy_time: Device-Busy Time + * @dev_act_time: Device-Active-Only Time + * @sense: Sense Data (if present) + */ +struct tsa_iostat { + u32 dev_time; + u32 def_time; + u32 queue_time; + u32 dev_busy_time; + u32 dev_act_time; + u8 sense[32]; +} __attribute__ ((packed)); + +/** + * struct tsa_ddpcs - Device-Detected-Program-Check Transport-Status Area (DDPC TSA) + * @rc: Reason Code + * @rcq: Reason Code Qualifier + * @sense: Sense Data (if present) + */ +struct tsa_ddpc { + u32 :24; + u32 rc:8; + u8 rcq[16]; + u8 sense[32]; +} __attribute__ ((packed)); + +#define TSA_INTRG_FLAGS_CU_STATE_VALID (1 << (7 - 0)) +#define TSA_INTRG_FLAGS_DEV_STATE_VALID (1 << (7 - 1)) +#define TSA_INTRG_FLAGS_OP_STATE_VALID (1 << (7 - 2)) + +/** + * struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA) + * @format: Format + * @flags: Flags. Can be an arithmetic OR of the following constants: + * %TSA_INTRG_FLAGS_CU_STATE_VALID, %TSA_INTRG_FLAGS_DEV_STATE_VALID, + * %TSA_INTRG_FLAGS_OP_STATE_VALID + * @cu_state: Controle-Unit State + * @dev_state: Device State + * @op_state: Operation State + * @sd_info: State-Dependent Information + * @dl_id: Device-Level Identifier + * @dd_data: Device-Dependent Data + */ +struct tsa_intrg { + u32 format:8; + u32 flags:8; + u32 cu_state:8; + u32 dev_state:8; + u32 op_state:8; + u32 :24; + u8 sd_info[12]; + u32 dl_id; + u8 dd_data[28]; +} __attribute__ ((packed)); + +#define TSB_FORMAT_NONE 0 +#define TSB_FORMAT_IOSTAT 1 +#define TSB_FORMAT_DDPC 2 +#define TSB_FORMAT_INTRG 3 + +#define TSB_FLAGS_DCW_OFFSET_VALID (1 << (7 - 0)) +#define TSB_FLAGS_COUNT_VALID (1 << (7 - 1)) +#define TSB_FLAGS_CACHE_MISS (1 << (7 - 2)) +#define TSB_FLAGS_TIME_VALID (1 << (7 - 3)) +#define TSB_FLAGS_FORMAT(x) ((x) & 7) +#define TSB_FORMAT(t) ((t)->flags & 7) + +/** + * struct tsb - Transport-Status Block (TSB) + * @length: Length + * @flags: Flags. Can be an arithmetic OR of the following constants: + * %TSB_FLAGS_DCW_OFFSET_VALID, %TSB_FLAGS_COUNT_VALID, %TSB_FLAGS_CACHE_MISS, + * %TSB_FLAGS_TIME_VALID + * @dcw_offset: DCW Offset + * @count: Count + * @tsa: Transport-Status-Area + */ +struct tsb { + u32 length:8; + u32 flags:8; + u32 dcw_offset:16; + u32 count; + u32 :32; + union { + struct tsa_iostat iostat; + struct tsa_ddpc ddpc; + struct tsa_intrg intrg; + } __attribute__ ((packed)) tsa; +} __attribute__ ((packed, aligned(8))); + +#define DCW_INTRG_FORMAT_DEFAULT 0 + +#define DCW_INTRG_RC_UNSPECIFIED 0 +#define DCW_INTRG_RC_TIMEOUT 1 + +#define DCW_INTRG_RCQ_UNSPECIFIED 0 +#define DCW_INTRG_RCQ_PRIMARY 1 +#define DCW_INTRG_RCQ_SECONDARY 2 + +#define DCW_INTRG_FLAGS_MPM (1 << (7 - 0)) +#define DCW_INTRG_FLAGS_PPR (1 << (7 - 1)) +#define DCW_INTRG_FLAGS_CRIT (1 << (7 - 2)) + +/** + * struct dcw_intrg_data - Interrogate DCW data + * @format: Format. Should be %DCW_INTRG_FORMAT_DEFAULT + * @rc: Reason Code. Can be one of %DCW_INTRG_RC_UNSPECIFIED, + * %DCW_INTRG_RC_TIMEOUT + * @rcq: Reason Code Qualifier: Can be one of %DCW_INTRG_RCQ_UNSPECIFIED, + * %DCW_INTRG_RCQ_PRIMARY, %DCW_INTRG_RCQ_SECONDARY + * @lpm: Logical-Path Mask + * @pam: Path-Available Mask + * @pim: Path-Installed Mask + * @timeout: Timeout + * @flags: Flags. Can be an arithmetic OR of %DCW_INTRG_FLAGS_MPM, + * %DCW_INTRG_FLAGS_PPR, %DCW_INTRG_FLAGS_CRIT + * @time: Time + * @prog_id: Program Identifier + * @prog_data: Program-Dependent Data + */ +struct dcw_intrg_data { + u32 format:8; + u32 rc:8; + u32 rcq:8; + u32 lpm:8; + u32 pam:8; + u32 pim:8; + u32 timeout:16; + u32 flags:8; + u32 :24; + u32 :32; + u64 time; + u64 prog_id; + u8 prog_data[]; +} __attribute__ ((packed)); + +#define DCW_FLAGS_CC (1 << (7 - 1)) + +#define DCW_CMD_WRITE 0x01 +#define DCW_CMD_READ 0x02 +#define DCW_CMD_CONTROL 0x03 +#define DCW_CMD_SENSE 0x04 +#define DCW_CMD_SENSE_ID 0xe4 +#define DCW_CMD_INTRG 0x40 + +/** + * struct dcw - Device-Command Word (DCW) + * @cmd: Command Code. Can be one of %DCW_CMD_WRITE, %DCW_CMD_READ, + * %DCW_CMD_CONTROL, %DCW_CMD_SENSE, %DCW_CMD_SENSE_ID, %DCW_CMD_INTRG + * @flags: Flags. Can be an arithmetic OR of %DCW_FLAGS_CC + * @cd_count: Control-Data Count + * @count: Count + * @cd: Control Data + */ +struct dcw { + u32 cmd:8; + u32 flags:8; + u32 :8; + u32 cd_count:8; + u32 count; + u8 cd[]; +} __attribute__ ((packed)); + +#define TCCB_FORMAT_DEFAULT 0x7f +#define TCCB_MAX_DCW 30 +#define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \ + TCCB_MAX_DCW * sizeof(struct dcw) + \ + sizeof(struct tccb_tcat)) +#define TCCB_SAC_DEFAULT 0x1ffe +#define TCCB_SAC_INTRG 0x1fff + +/** + * struct tccb_tcah - Transport-Command-Area Header (TCAH) + * @format: Format. Should be %TCCB_FORMAT_DEFAULT + * @tcal: Transport-Command-Area Length + * @sac: Service-Action Code. Can be one of %TCCB_SAC_DEFAULT, %TCCB_SAC_INTRG + * @prio: Priority + */ +struct tccb_tcah { + u32 format:8; + u32 :24; + u32 :24; + u32 tcal:8; + u32 sac:16; + u32 :8; + u32 prio:8; + u32 :32; +} __attribute__ ((packed)); + +/** + * struct tccb_tcat - Transport-Command-Area Trailer (TCAT) + * @count: Transport Count + */ +struct tccb_tcat { + u32 :32; + u32 count; +} __attribute__ ((packed)); + +/** + * struct tccb - (partial) Transport-Command-Control Block (TCCB) + * @tcah: TCAH + * @tca: Transport-Command Area + */ +struct tccb { + struct tccb_tcah tcah; + u8 tca[]; +} __attribute__ ((packed, aligned(8))); + +struct tcw *tcw_get_intrg(struct tcw *tcw); +void *tcw_get_data(struct tcw *tcw); +struct tccb *tcw_get_tccb(struct tcw *tcw); +struct tsb *tcw_get_tsb(struct tcw *tcw); + +void tcw_init(struct tcw *tcw, int r, int w); +void tcw_finalize(struct tcw *tcw, int num_tidaws); + +void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw); +void tcw_set_data(struct tcw *tcw, void *data, int use_tidal); +void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb); +void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb); + +void tccb_init(struct tccb *tccb, size_t tccb_size, u32 sac); +void tsb_init(struct tsb *tsb); +struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags, + void *cd, u8 cd_count, u32 count); +struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags, + void *addr, u32 count); + +#endif /* _ASM_S390_FCX_H */ diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h new file mode 100644 index 000000000..9acf48e53 --- /dev/null +++ b/arch/s390/include/asm/fpu/api.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * In-kernel FPU support functions + * + * + * Consider these guidelines before using in-kernel FPU functions: + * + * 1. Use kernel_fpu_begin() and kernel_fpu_end() to enclose all in-kernel + * use of floating-point or vector registers and instructions. + * + * 2. For kernel_fpu_begin(), specify the vector register range you want to + * use with the KERNEL_VXR_* constants. Consider these usage guidelines: + * + * a) If your function typically runs in process-context, use the lower + * half of the vector registers, for example, specify KERNEL_VXR_LOW. + * b) If your function typically runs in soft-irq or hard-irq context, + * prefer using the upper half of the vector registers, for example, + * specify KERNEL_VXR_HIGH. + * + * If you adhere to these guidelines, an interrupted process context + * does not require to save and restore vector registers because of + * disjoint register ranges. + * + * Also note that the __kernel_fpu_begin()/__kernel_fpu_end() functions + * includes logic to save and restore up to 16 vector registers at once. + * + * 3. You can nest kernel_fpu_begin()/kernel_fpu_end() by using different + * struct kernel_fpu states. Vector registers that are in use by outer + * levels are saved and restored. You can minimize the save and restore + * effort by choosing disjoint vector register ranges. + * + * 5. To use vector floating-point instructions, specify the KERNEL_FPC + * flag to save and restore floating-point controls in addition to any + * vector register range. + * + * 6. To use floating-point registers and instructions only, specify the + * KERNEL_FPR flag. This flag triggers a save and restore of vector + * registers V0 to V15 and floating-point controls. + * + * Copyright IBM Corp. 2015 + * Author(s): Hendrik Brueckner + */ + +#ifndef _ASM_S390_FPU_API_H +#define _ASM_S390_FPU_API_H + +#include +#include + +void save_fpu_regs(void); +void load_fpu_regs(void); +void __load_fpu_regs(void); + +static inline int test_fp_ctl(u32 fpc) +{ + u32 orig_fpc; + int rc; + + asm volatile( + " efpc %1\n" + " sfpc %2\n" + "0: sfpc %1\n" + " la %0,0\n" + "1:\n" + EX_TABLE(0b,1b) + : "=d" (rc), "=&d" (orig_fpc) + : "d" (fpc), "0" (-EINVAL)); + return rc; +} + +#define KERNEL_FPC 1 +#define KERNEL_VXR_V0V7 2 +#define KERNEL_VXR_V8V15 4 +#define KERNEL_VXR_V16V23 8 +#define KERNEL_VXR_V24V31 16 + +#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7|KERNEL_VXR_V8V15) +#define KERNEL_VXR_MID (KERNEL_VXR_V8V15|KERNEL_VXR_V16V23) +#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31) + +#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH) +#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW) + +struct kernel_fpu; + +/* + * Note the functions below must be called with preemption disabled. + * Do not enable preemption before calling __kernel_fpu_end() to prevent + * an corruption of an existing kernel FPU state. + * + * Prefer using the kernel_fpu_begin()/kernel_fpu_end() pair of functions. + */ +void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags); +void __kernel_fpu_end(struct kernel_fpu *state, u32 flags); + + +static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags) +{ + preempt_disable(); + state->mask = S390_lowcore.fpu_flags; + if (!test_cpu_flag(CIF_FPU)) + /* Save user space FPU state and register contents */ + save_fpu_regs(); + else if (state->mask & flags) + /* Save FPU/vector register in-use by the kernel */ + __kernel_fpu_begin(state, flags); + S390_lowcore.fpu_flags |= flags; +} + +static inline void kernel_fpu_end(struct kernel_fpu *state, u32 flags) +{ + S390_lowcore.fpu_flags = state->mask; + if (state->mask & flags) + /* Restore FPU/vector register in-use by the kernel */ + __kernel_fpu_end(state, flags); + preempt_enable(); +} + +#endif /* _ASM_S390_FPU_API_H */ diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h new file mode 100644 index 000000000..bbdadb1c9 --- /dev/null +++ b/arch/s390/include/asm/fpu/internal.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * FPU state and register content conversion primitives + * + * Copyright IBM Corp. 2015 + * Author(s): Hendrik Brueckner + */ + +#ifndef _ASM_S390_FPU_INTERNAL_H +#define _ASM_S390_FPU_INTERNAL_H + +#include +#include +#include + +static inline void save_vx_regs(__vector128 *vxrs) +{ + asm volatile( + " la 1,%0\n" + " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */ + " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */ + : "=Q" (*(struct vx_array *) vxrs) : : "1"); +} + +static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs) +{ + int i; + + for (i = 0; i < __NUM_FPRS; i++) + fprs[i].ui = vxrs[i].high; +} + +static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs) +{ + int i; + + for (i = 0; i < __NUM_FPRS; i++) + vxrs[i].high = fprs[i].ui; +} + +static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) +{ + fpregs->pad = 0; + fpregs->fpc = fpu->fpc; + if (MACHINE_HAS_VX) + convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs); + else + memcpy((freg_t *)&fpregs->fprs, fpu->fprs, + sizeof(fpregs->fprs)); +} + +static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu) +{ + fpu->fpc = fpregs->fpc; + if (MACHINE_HAS_VX) + convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs); + else + memcpy(fpu->fprs, (freg_t *)&fpregs->fprs, + sizeof(fpregs->fprs)); +} + +#endif /* _ASM_S390_FPU_INTERNAL_H */ diff --git a/arch/s390/include/asm/fpu/types.h b/arch/s390/include/asm/fpu/types.h new file mode 100644 index 000000000..d889e9436 --- /dev/null +++ b/arch/s390/include/asm/fpu/types.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * FPU data structures + * + * Copyright IBM Corp. 2015 + * Author(s): Hendrik Brueckner + */ + +#ifndef _ASM_S390_FPU_TYPES_H +#define _ASM_S390_FPU_TYPES_H + +#include + +struct fpu { + __u32 fpc; /* Floating-point control */ + void *regs; /* Pointer to the current save area */ + union { + /* Floating-point register save area */ + freg_t fprs[__NUM_FPRS]; + /* Vector register save area */ + __vector128 vxrs[__NUM_VXRS]; + }; +}; + +/* VX array structure for address operand constraints in inline assemblies */ +struct vx_array { __vector128 _[__NUM_VXRS]; }; + +/* In-kernel FPU state structure */ +struct kernel_fpu { + u32 mask; + u32 fpc; + union { + freg_t fprs[__NUM_FPRS]; + __vector128 vxrs[__NUM_VXRS]; + }; +}; + +#endif /* _ASM_S390_FPU_TYPES_H */ diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h new file mode 100644 index 000000000..5a82b08f0 --- /dev/null +++ b/arch/s390/include/asm/ftrace.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_FTRACE_H +#define _ASM_S390_FTRACE_H + +#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR +#define ARCH_SUPPORTS_FTRACE_OPS 1 +#define MCOUNT_INSN_SIZE 6 + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_CC_IS_CLANG +/* https://bugs.llvm.org/show_bug.cgi?id=41424 */ +#define ftrace_return_address(n) 0UL +#else +#define ftrace_return_address(n) __builtin_return_address(n) +#endif + +void ftrace_caller(void); + +extern void *ftrace_func; + +struct dyn_arch_ftrace { }; + +#define MCOUNT_ADDR 0 +#define FTRACE_ADDR ((unsigned long)ftrace_caller) + +#define KPROBE_ON_FTRACE_NOP 0 +#define KPROBE_ON_FTRACE_CALL 1 + +struct module; +struct dyn_ftrace; + +bool ftrace_need_init_nop(void); +#define ftrace_need_init_nop ftrace_need_init_nop + +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); +#define ftrace_init_nop ftrace_init_nop + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr; +} + +struct ftrace_regs { + struct pt_regs regs; +}; + +static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs) +{ + struct pt_regs *regs = &fregs->regs; + + if (test_pt_regs_flag(regs, PIF_FTRACE_FULL_REGS)) + return regs; + return NULL; +} + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +struct fgraph_ret_regs { + unsigned long gpr2; + unsigned long fp; +}; + +static __always_inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs) +{ + return ret_regs->gpr2; +} + +static __always_inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs) +{ + return ret_regs->fp; +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +static __always_inline unsigned long +ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs) +{ + return fregs->regs.psw.addr; +} + +static __always_inline void +ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, + unsigned long ip) +{ + fregs->regs.psw.addr = ip; +} + +#define ftrace_regs_get_argument(fregs, n) \ + regs_get_kernel_argument(&(fregs)->regs, n) +#define ftrace_regs_get_stack_pointer(fregs) \ + kernel_stack_pointer(&(fregs)->regs) +#define ftrace_regs_return_value(fregs) \ + regs_return_value(&(fregs)->regs) +#define ftrace_regs_set_return_value(fregs, ret) \ + regs_set_return_value(&(fregs)->regs, ret) +#define ftrace_override_function_with_return(fregs) \ + override_function_with_return(&(fregs)->regs) +#define ftrace_regs_query_register_offset(name) \ + regs_query_register_offset(name) + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +/* + * When an ftrace registered caller is tracing a function that is + * also set by a register_ftrace_direct() call, it needs to be + * differentiated in the ftrace_caller trampoline. To do this, + * place the direct caller in the ORIG_GPR2 part of pt_regs. This + * tells the ftrace_caller that there's a direct caller. + */ +static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr) +{ + struct pt_regs *regs = &fregs->regs; + regs->orig_gpr2 = addr; +} +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ + +/* + * Even though the system call numbers are identical for s390/s390x a + * different system call table is used for compat tasks. This may lead + * to e.g. incorrect or missing trace event sysfs files. + * Therefore simply do not trace compat system calls at all. + * See kernel/trace/trace_syscalls.c. + */ +#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS +static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) +{ + return is_compat_task(); +} + +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME +static inline bool arch_syscall_match_sym_name(const char *sym, + const char *name) +{ + /* + * Skip __s390_ and __s390x_ prefix - due to compat wrappers + * and aliasing some symbols of 64 bit system call functions + * may get the __s390_ prefix instead of the __s390x_ prefix. + */ + return !strcmp(sym + 7, name) || !strcmp(sym + 8, name); +} + +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_FUNCTION_TRACER + +#define FTRACE_NOP_INSN .word 0xc004, 0x0000, 0x0000 /* brcl 0,0 */ + +#ifndef CC_USING_HOTPATCH + +#define FTRACE_GEN_MCOUNT_RECORD(name) \ + .section __mcount_loc, "a", @progbits; \ + .quad name; \ + .previous; + +#else /* !CC_USING_HOTPATCH */ + +#define FTRACE_GEN_MCOUNT_RECORD(name) + +#endif /* !CC_USING_HOTPATCH */ + +#define FTRACE_GEN_NOP_ASM(name) \ + FTRACE_GEN_MCOUNT_RECORD(name) \ + FTRACE_NOP_INSN + +#else /* CONFIG_FUNCTION_TRACER */ + +#define FTRACE_GEN_NOP_ASM(name) + +#endif /* CONFIG_FUNCTION_TRACER */ + +#endif /* _ASM_S390_FTRACE_H */ diff --git a/arch/s390/include/asm/ftrace.lds.h b/arch/s390/include/asm/ftrace.lds.h new file mode 100644 index 000000000..968adfd41 --- /dev/null +++ b/arch/s390/include/asm/ftrace.lds.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#endif + +#define SIZEOF_MCOUNT_LOC_ENTRY 8 +#define SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE 24 +#define FTRACE_HOTPATCH_TRAMPOLINES_SIZE(n) \ + DIV_ROUND_UP(SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE * (n), \ + SIZEOF_MCOUNT_LOC_ENTRY) + +#ifdef CONFIG_FUNCTION_TRACER +#define FTRACE_HOTPATCH_TRAMPOLINES_TEXT \ + . = ALIGN(8); \ + __ftrace_hotpatch_trampolines_start = .; \ + . = . + FTRACE_HOTPATCH_TRAMPOLINES_SIZE(__stop_mcount_loc - \ + __start_mcount_loc); \ + __ftrace_hotpatch_trampolines_end = .; +#else +#define FTRACE_HOTPATCH_TRAMPOLINES_TEXT +#endif diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h new file mode 100644 index 000000000..eaeaeb3ff --- /dev/null +++ b/arch/s390/include/asm/futex.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_FUTEX_H +#define _ASM_S390_FUTEX_H + +#include +#include +#include +#include +#include + +#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ + asm volatile( \ + " sacf 256\n" \ + "0: l %1,0(%6)\n" \ + "1:"insn \ + "2: cs %1,%2,0(%6)\n" \ + "3: jl 1b\n" \ + " lhi %0,0\n" \ + "4: sacf 768\n" \ + EX_TABLE(0b,4b) EX_TABLE(1b,4b) \ + EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ + : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ + "=m" (*uaddr) \ + : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ + "m" (*uaddr) : "cc"); + +static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, + u32 __user *uaddr) +{ + int oldval = 0, newval, ret; + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("lr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("lr %2,%1\nar %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("lr %2,%1\nor %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("lr %2,%1\nnr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("lr %2,%1\nxr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + if (!ret) + *oval = oldval; + + return ret; +} + +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret; + + asm volatile( + " sacf 256\n" + "0: cs %1,%4,0(%5)\n" + "1: la %0,0\n" + "2: sacf 768\n" + EX_TABLE(0b,2b) EX_TABLE(1b,2b) + : "=d" (ret), "+d" (oldval), "=m" (*uaddr) + : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) + : "cc", "memory"); + *uval = oldval; + return ret; +} + +#endif /* _ASM_S390_FUTEX_H */ diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h new file mode 100644 index 000000000..5cc46e0dd --- /dev/null +++ b/arch/s390/include/asm/gmap.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * KVM guest address space mapping code + * + * Copyright IBM Corp. 2007, 2016 + * Author(s): Martin Schwidefsky + */ + +#ifndef _ASM_S390_GMAP_H +#define _ASM_S390_GMAP_H + +#include +#include + +/* Generic bits for GMAP notification on DAT table entry changes. */ +#define GMAP_NOTIFY_SHADOW 0x2 +#define GMAP_NOTIFY_MPROT 0x1 + +/* Status bits only for huge segment entries */ +#define _SEGMENT_ENTRY_GMAP_IN 0x8000 /* invalidation notify bit */ +#define _SEGMENT_ENTRY_GMAP_UC 0x4000 /* dirty (migration) */ + +/** + * struct gmap_struct - guest address space + * @list: list head for the mm->context gmap list + * @crst_list: list of all crst tables used in the guest address space + * @mm: pointer to the parent mm_struct + * @guest_to_host: radix tree with guest to host address translation + * @host_to_guest: radix tree with pointer to segment table entries + * @guest_table_lock: spinlock to protect all entries in the guest page table + * @ref_count: reference counter for the gmap structure + * @table: pointer to the page directory + * @asce: address space control element for gmap page table + * @pfault_enabled: defines if pfaults are applicable for the guest + * @guest_handle: protected virtual machine handle for the ultravisor + * @host_to_rmap: radix tree with gmap_rmap lists + * @children: list of shadow gmap structures + * @pt_list: list of all page tables used in the shadow guest address space + * @shadow_lock: spinlock to protect the shadow gmap list + * @parent: pointer to the parent gmap for shadow guest address spaces + * @orig_asce: ASCE for which the shadow page table has been created + * @edat_level: edat level to be used for the shadow translation + * @removed: flag to indicate if a shadow guest address space has been removed + * @initialized: flag to indicate if a shadow guest address space can be used + */ +struct gmap { + struct list_head list; + struct list_head crst_list; + struct mm_struct *mm; + struct radix_tree_root guest_to_host; + struct radix_tree_root host_to_guest; + spinlock_t guest_table_lock; + refcount_t ref_count; + unsigned long *table; + unsigned long asce; + unsigned long asce_end; + void *private; + bool pfault_enabled; + /* only set for protected virtual machines */ + unsigned long guest_handle; + /* Additional data for shadow guest address spaces */ + struct radix_tree_root host_to_rmap; + struct list_head children; + struct list_head pt_list; + spinlock_t shadow_lock; + struct gmap *parent; + unsigned long orig_asce; + int edat_level; + bool removed; + bool initialized; +}; + +/** + * struct gmap_rmap - reverse mapping for shadow page table entries + * @next: pointer to next rmap in the list + * @raddr: virtual rmap address in the shadow guest address space + */ +struct gmap_rmap { + struct gmap_rmap *next; + unsigned long raddr; +}; + +#define gmap_for_each_rmap(pos, head) \ + for (pos = (head); pos; pos = pos->next) + +#define gmap_for_each_rmap_safe(pos, n, head) \ + for (pos = (head); n = pos ? pos->next : NULL, pos; pos = n) + +/** + * struct gmap_notifier - notify function block for page invalidation + * @notifier_call: address of callback function + */ +struct gmap_notifier { + struct list_head list; + struct rcu_head rcu; + void (*notifier_call)(struct gmap *gmap, unsigned long start, + unsigned long end); +}; + +static inline int gmap_is_shadow(struct gmap *gmap) +{ + return !!gmap->parent; +} + +struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit); +void gmap_remove(struct gmap *gmap); +struct gmap *gmap_get(struct gmap *gmap); +void gmap_put(struct gmap *gmap); + +void gmap_enable(struct gmap *gmap); +void gmap_disable(struct gmap *gmap); +struct gmap *gmap_get_enabled(void); +int gmap_map_segment(struct gmap *gmap, unsigned long from, + unsigned long to, unsigned long len); +int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); +unsigned long __gmap_translate(struct gmap *, unsigned long gaddr); +unsigned long gmap_translate(struct gmap *, unsigned long gaddr); +int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr); +int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags); +void gmap_discard(struct gmap *, unsigned long from, unsigned long to); +void __gmap_zap(struct gmap *, unsigned long gaddr); +void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr); + +int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val); + +struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, + int edat_level); +int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level); +int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, + int fake); +int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, + int fake); +int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, + int fake); +int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, + int fake); +int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, + unsigned long *pgt, int *dat_protection, int *fake); +int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte); + +void gmap_register_pte_notifier(struct gmap_notifier *); +void gmap_unregister_pte_notifier(struct gmap_notifier *); + +int gmap_mprotect_notify(struct gmap *, unsigned long start, + unsigned long len, int prot); + +void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4], + unsigned long gaddr, unsigned long vmaddr); +int gmap_mark_unmergeable(void); +void s390_unlist_old_asce(struct gmap *gmap); +int s390_replace_asce(struct gmap *gmap); +void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns); +int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start, + unsigned long end, bool interruptible); + +/** + * s390_uv_destroy_range - Destroy a range of pages in the given mm. + * @mm: the mm on which to operate on + * @start: the start of the range + * @end: the end of the range + * + * This function will call cond_sched, so it should not generate stalls, but + * it will otherwise only return when it completed. + */ +static inline void s390_uv_destroy_range(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + (void)__s390_uv_destroy_range(mm, start, end, false); +} + +/** + * s390_uv_destroy_range_interruptible - Destroy a range of pages in the + * given mm, but stop when a fatal signal is received. + * @mm: the mm on which to operate on + * @start: the start of the range + * @end: the end of the range + * + * This function will call cond_sched, so it should not generate stalls. If + * a fatal signal is received, it will return with -EINTR immediately, + * without finishing destroying the whole range. Upon successful + * completion, 0 is returned. + */ +static inline int s390_uv_destroy_range_interruptible(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + return __s390_uv_destroy_range(mm, start, end, true); +} +#endif /* _ASM_S390_GMAP_H */ diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h new file mode 100644 index 000000000..58668ffb5 --- /dev/null +++ b/arch/s390/include/asm/hardirq.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999, 2000 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + * + * Derived from "include/asm-i386/hardirq.h" + */ + +#ifndef __ASM_HARDIRQ_H +#define __ASM_HARDIRQ_H + +#include + +#define local_softirq_pending() (S390_lowcore.softirq_pending) +#define set_softirq_pending(x) (S390_lowcore.softirq_pending = (x)) +#define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x)) + +#define __ARCH_IRQ_STAT +#define __ARCH_IRQ_EXIT_IRQS_DISABLED + +static inline void ack_bad_irq(unsigned int irq) +{ + printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); +} + +#endif /* __ASM_HARDIRQ_H */ diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h new file mode 100644 index 000000000..deb198a61 --- /dev/null +++ b/arch/s390/include/asm/hugetlb.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * IBM System z Huge TLB Page Support for Kernel. + * + * Copyright IBM Corp. 2008 + * Author(s): Gerald Schaefer + */ + +#ifndef _ASM_S390_HUGETLB_H +#define _ASM_S390_HUGETLB_H + +#include +#include + +#define hugetlb_free_pgd_range free_pgd_range +#define hugepages_supported() (MACHINE_HAS_EDAT1) + +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz); +void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); +pte_t huge_ptep_get(pte_t *ptep); +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); + +/* + * If the arch doesn't supply something else, assume that hugepage + * size aligned regions are ok without further preparation. + */ +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) +{ + struct hstate *h = hstate_file(file); + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (addr & ~huge_page_mask(h)) + return -EINVAL; + return 0; +} + +static inline void arch_clear_hugepage_flags(struct page *page) +{ + clear_bit(PG_arch_1, &page->flags); +} +#define arch_clear_hugepage_flags arch_clear_hugepage_flags + +static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) +{ + if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) + set_pte(ptep, __pte(_REGION3_ENTRY_EMPTY)); + else + set_pte(ptep, __pte(_SEGMENT_ENTRY_EMPTY)); +} + +static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + return huge_ptep_get_and_clear(vma->vm_mm, address, ptep); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + int changed = !pte_same(huge_ptep_get(ptep), pte); + if (changed) { + huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); + } + return changed; +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep); + __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); +} + +static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) +{ + return mk_pte(page, pgprot); +} + +static inline int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} + +static inline int huge_pte_none_mostly(pte_t pte) +{ + return huge_pte_none(pte); +} + +static inline int huge_pte_write(pte_t pte) +{ + return pte_write(pte); +} + +static inline int huge_pte_dirty(pte_t pte) +{ + return pte_dirty(pte); +} + +static inline pte_t huge_pte_mkwrite(pte_t pte) +{ + return pte_mkwrite_novma(pte); +} + +static inline pte_t huge_pte_mkdirty(pte_t pte) +{ + return pte_mkdirty(pte); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + +static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) +{ + return pte_modify(pte, newprot); +} + +static inline pte_t huge_pte_mkuffd_wp(pte_t pte) +{ + return pte; +} + +static inline pte_t huge_pte_clear_uffd_wp(pte_t pte) +{ + return pte; +} + +static inline int huge_pte_uffd_wp(pte_t pte) +{ + return 0; +} + +static inline bool gigantic_page_runtime_supported(void) +{ + return true; +} + +#endif /* _ASM_S390_HUGETLB_H */ diff --git a/arch/s390/include/asm/hw_irq.h b/arch/s390/include/asm/hw_irq.h new file mode 100644 index 000000000..9078b5b6b --- /dev/null +++ b/arch/s390/include/asm/hw_irq.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _HW_IRQ_H +#define _HW_IRQ_H + +#include +#include + +void __init init_airq_interrupts(void); +void __init init_cio_interrupts(void); + +#endif diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h new file mode 100644 index 000000000..59fcc3c72 --- /dev/null +++ b/arch/s390/include/asm/idals.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author(s)......: Holger Smolinski + * Martin Schwidefsky + * Bugreports.to..: + * Copyright IBM Corp. 2000 + * + * History of changes + * 07/24/00 new file + * 05/04/02 code restructuring. + */ + +#ifndef _S390_IDALS_H +#define _S390_IDALS_H + +#include +#include +#include +#include +#include +#include + +#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */ +#define IDA_BLOCK_SIZE (1L<> 31) != 0; +} + + +/* + * Return the number of idal words needed for an address/length pair. + */ +static inline unsigned int idal_nr_words(void *vaddr, unsigned int length) +{ + return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length + + (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG; +} + +/* + * Return the number of 2K IDA words needed for an address/length pair. + */ +static inline unsigned int idal_2k_nr_words(void *vaddr, unsigned int length) +{ + return ((__pa(vaddr) & (IDA_2K_BLOCK_SIZE - 1)) + length + + (IDA_2K_BLOCK_SIZE - 1)) >> IDA_2K_SIZE_LOG; +} + +/* + * Create the list of idal words for an address/length pair. + */ +static inline unsigned long *idal_create_words(unsigned long *idaws, + void *vaddr, unsigned int length) +{ + unsigned long paddr; + unsigned int cidaw; + + paddr = __pa(vaddr); + cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length + + (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG; + *idaws++ = paddr; + paddr &= -IDA_BLOCK_SIZE; + while (--cidaw > 0) { + paddr += IDA_BLOCK_SIZE; + *idaws++ = paddr; + } + return idaws; +} + +/* + * Sets the address of the data in CCW. + * If necessary it allocates an IDAL and sets the appropriate flags. + */ +static inline int +set_normalized_cda(struct ccw1 * ccw, void *vaddr) +{ + unsigned int nridaws; + unsigned long *idal; + + if (ccw->flags & CCW_FLAG_IDA) + return -EINVAL; + nridaws = idal_nr_words(vaddr, ccw->count); + if (nridaws > 0) { + idal = kmalloc(nridaws * sizeof(unsigned long), + GFP_ATOMIC | GFP_DMA ); + if (idal == NULL) + return -ENOMEM; + idal_create_words(idal, vaddr, ccw->count); + ccw->flags |= CCW_FLAG_IDA; + vaddr = idal; + } + ccw->cda = (__u32)(unsigned long) vaddr; + return 0; +} + +/* + * Releases any allocated IDAL related to the CCW. + */ +static inline void +clear_normalized_cda(struct ccw1 * ccw) +{ + if (ccw->flags & CCW_FLAG_IDA) { + kfree((void *)(unsigned long) ccw->cda); + ccw->flags &= ~CCW_FLAG_IDA; + } + ccw->cda = 0; +} + +/* + * Idal buffer extension + */ +struct idal_buffer { + size_t size; + size_t page_order; + void *data[]; +}; + +/* + * Allocate an idal buffer + */ +static inline struct idal_buffer * +idal_buffer_alloc(size_t size, int page_order) +{ + struct idal_buffer *ib; + int nr_chunks, nr_ptrs, i; + + nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG; + nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG; + ib = kmalloc(struct_size(ib, data, nr_ptrs), GFP_DMA | GFP_KERNEL); + if (ib == NULL) + return ERR_PTR(-ENOMEM); + ib->size = size; + ib->page_order = page_order; + for (i = 0; i < nr_ptrs; i++) { + if ((i & (nr_chunks - 1)) != 0) { + ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE; + continue; + } + ib->data[i] = (void *) + __get_free_pages(GFP_KERNEL, page_order); + if (ib->data[i] != NULL) + continue; + // Not enough memory + while (i >= nr_chunks) { + i -= nr_chunks; + free_pages((unsigned long) ib->data[i], + ib->page_order); + } + kfree(ib); + return ERR_PTR(-ENOMEM); + } + return ib; +} + +/* + * Free an idal buffer. + */ +static inline void +idal_buffer_free(struct idal_buffer *ib) +{ + int nr_chunks, nr_ptrs, i; + + nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG; + nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG; + for (i = 0; i < nr_ptrs; i += nr_chunks) + free_pages((unsigned long) ib->data[i], ib->page_order); + kfree(ib); +} + +/* + * Test if a idal list is really needed. + */ +static inline int +__idal_buffer_is_needed(struct idal_buffer *ib) +{ + return ib->size > (4096ul << ib->page_order) || + idal_is_needed(ib->data[0], ib->size); +} + +/* + * Set channel data address to idal buffer. + */ +static inline void +idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw) +{ + if (__idal_buffer_is_needed(ib)) { + // setup idals; + ccw->cda = (u32)(addr_t) ib->data; + ccw->flags |= CCW_FLAG_IDA; + } else + // we do not need idals - use direct addressing + ccw->cda = (u32)(addr_t) ib->data[0]; + ccw->count = ib->size; +} + +/* + * Copy count bytes from an idal buffer to user memory + */ +static inline size_t +idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count) +{ + size_t left; + int i; + + BUG_ON(count > ib->size); + for (i = 0; count > IDA_BLOCK_SIZE; i++) { + left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE); + if (left) + return left + count - IDA_BLOCK_SIZE; + to = (void __user *) to + IDA_BLOCK_SIZE; + count -= IDA_BLOCK_SIZE; + } + return copy_to_user(to, ib->data[i], count); +} + +/* + * Copy count bytes from user memory to an idal buffer + */ +static inline size_t +idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count) +{ + size_t left; + int i; + + BUG_ON(count > ib->size); + for (i = 0; count > IDA_BLOCK_SIZE; i++) { + left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE); + if (left) + return left + count - IDA_BLOCK_SIZE; + from = (void __user *) from + IDA_BLOCK_SIZE; + count -= IDA_BLOCK_SIZE; + } + return copy_from_user(ib->data[i], from, count); +} + +#endif diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h new file mode 100644 index 000000000..09f763b9e --- /dev/null +++ b/arch/s390/include/asm/idle.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2014 + * + * Author: Martin Schwidefsky + */ + +#ifndef _S390_IDLE_H +#define _S390_IDLE_H + +#include +#include + +struct s390_idle_data { + unsigned long idle_count; + unsigned long idle_time; + unsigned long clock_idle_enter; + unsigned long timer_idle_enter; + unsigned long mt_cycles_enter[8]; +}; + +extern struct device_attribute dev_attr_idle_count; +extern struct device_attribute dev_attr_idle_time_us; + +void psw_idle(struct s390_idle_data *data, unsigned long psw_mask); + +#endif /* _S390_IDLE_H */ diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h new file mode 100644 index 000000000..4453ad7c1 --- /dev/null +++ b/arch/s390/include/asm/io.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * Derived from "include/asm-i386/io.h" + */ + +#ifndef _S390_IO_H +#define _S390_IO_H + +#include +#include +#include +#include + +#define xlate_dev_mem_ptr xlate_dev_mem_ptr +void *xlate_dev_mem_ptr(phys_addr_t phys); +#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr +void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); + +#define IO_SPACE_LIMIT 0 + +/* + * I/O memory mapping functions. + */ +#define ioremap_prot ioremap_prot +#define iounmap iounmap + +#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL) + +#define ioremap_wc(addr, size) \ + ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL))) +#define ioremap_wt(addr, size) \ + ioremap_prot((addr), (size), pgprot_val(pgprot_writethrough(PAGE_KERNEL))) + +static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) +{ + return NULL; +} + +static inline void ioport_unmap(void __iomem *p) +{ +} + +#ifdef CONFIG_PCI + +/* + * s390 needs a private implementation of pci_iomap since ioremap with its + * offset parameter isn't sufficient. That's because BAR spaces are not + * disjunctive on s390 so we need the bar parameter of pci_iomap to find + * the corresponding device and create the mapping cookie. + */ +#define pci_iomap pci_iomap +#define pci_iomap_range pci_iomap_range +#define pci_iounmap pci_iounmap +#define pci_iomap_wc pci_iomap_wc +#define pci_iomap_wc_range pci_iomap_wc_range + +#define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count) +#define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count) +#define memset_io(dst, val, count) zpci_memset_io(dst, val, count) + +#define mmiowb() zpci_barrier() + +#define __raw_readb zpci_read_u8 +#define __raw_readw zpci_read_u16 +#define __raw_readl zpci_read_u32 +#define __raw_readq zpci_read_u64 +#define __raw_writeb zpci_write_u8 +#define __raw_writew zpci_write_u16 +#define __raw_writel zpci_write_u32 +#define __raw_writeq zpci_write_u64 + +#endif /* CONFIG_PCI */ + +#include + +#endif diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h new file mode 100644 index 000000000..b0d000324 --- /dev/null +++ b/arch/s390/include/asm/ipl.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * s390 (re)ipl support + * + * Copyright IBM Corp. 2007 + */ + +#ifndef _ASM_S390_IPL_H +#define _ASM_S390_IPL_H + +#include +#include +#include +#include +#include +#include + +struct ipl_parameter_block { + struct ipl_pl_hdr hdr; + union { + struct ipl_pb_hdr pb0_hdr; + struct ipl_pb0_common common; + struct ipl_pb0_fcp fcp; + struct ipl_pb0_ccw ccw; + struct ipl_pb0_eckd eckd; + struct ipl_pb0_nvme nvme; + char raw[PAGE_SIZE - sizeof(struct ipl_pl_hdr)]; + }; +} __packed __aligned(PAGE_SIZE); + +#define NSS_NAME_SIZE 8 + +#define IPL_BP_FCP_LEN (sizeof(struct ipl_pl_hdr) + \ + sizeof(struct ipl_pb0_fcp)) +#define IPL_BP0_FCP_LEN (sizeof(struct ipl_pb0_fcp)) + +#define IPL_BP_NVME_LEN (sizeof(struct ipl_pl_hdr) + \ + sizeof(struct ipl_pb0_nvme)) +#define IPL_BP0_NVME_LEN (sizeof(struct ipl_pb0_nvme)) + +#define IPL_BP_CCW_LEN (sizeof(struct ipl_pl_hdr) + \ + sizeof(struct ipl_pb0_ccw)) +#define IPL_BP0_CCW_LEN (sizeof(struct ipl_pb0_ccw)) + +#define IPL_BP_ECKD_LEN (sizeof(struct ipl_pl_hdr) + \ + sizeof(struct ipl_pb0_eckd)) +#define IPL_BP0_ECKD_LEN (sizeof(struct ipl_pb0_eckd)) + +#define IPL_MAX_SUPPORTED_VERSION (0) + +#define IPL_RB_CERT_UNKNOWN ((unsigned short)-1) + +#define DIAG308_VMPARM_SIZE (64) +#define DIAG308_SCPDATA_OFFSET offsetof(struct ipl_parameter_block, \ + fcp.scp_data) +#define DIAG308_SCPDATA_SIZE (PAGE_SIZE - DIAG308_SCPDATA_OFFSET) + +struct save_area; +struct save_area * __init save_area_alloc(bool is_boot_cpu); +struct save_area * __init save_area_boot_cpu(void); +void __init save_area_add_regs(struct save_area *, void *regs); +void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs); + +extern void s390_reset_system(void); +extern size_t ipl_block_get_ascii_vmparm(char *dest, size_t size, + const struct ipl_parameter_block *ipb); + +enum ipl_type { + IPL_TYPE_UNKNOWN = 1, + IPL_TYPE_CCW = 2, + IPL_TYPE_FCP = 4, + IPL_TYPE_FCP_DUMP = 8, + IPL_TYPE_NSS = 16, + IPL_TYPE_NVME = 32, + IPL_TYPE_NVME_DUMP = 64, + IPL_TYPE_ECKD = 128, + IPL_TYPE_ECKD_DUMP = 256, +}; + +struct ipl_info +{ + enum ipl_type type; + union { + struct { + struct ccw_dev_id dev_id; + } ccw; + struct { + struct ccw_dev_id dev_id; + } eckd; + struct { + struct ccw_dev_id dev_id; + u64 wwpn; + u64 lun; + } fcp; + struct { + u32 fid; + u32 nsid; + } nvme; + struct { + char name[NSS_NAME_SIZE + 1]; + } nss; + } data; +}; + +extern struct ipl_info ipl_info; +extern void setup_ipl(void); +extern void set_os_info_reipl_block(void); + +static inline bool is_ipl_type_dump(void) +{ + return (ipl_info.type == IPL_TYPE_FCP_DUMP) || + (ipl_info.type == IPL_TYPE_ECKD_DUMP) || + (ipl_info.type == IPL_TYPE_NVME_DUMP); +} + +struct ipl_report { + struct ipl_parameter_block *ipib; + struct list_head components; + struct list_head certificates; + size_t size; +}; + +struct ipl_report_component { + struct list_head list; + struct ipl_rb_component_entry entry; +}; + +struct ipl_report_certificate { + struct list_head list; + struct ipl_rb_certificate_entry entry; + void *key; +}; + +struct kexec_buf; +struct ipl_report *ipl_report_init(struct ipl_parameter_block *ipib); +void *ipl_report_finish(struct ipl_report *report); +int ipl_report_free(struct ipl_report *report); +int ipl_report_add_component(struct ipl_report *report, struct kexec_buf *kbuf, + unsigned char flags, unsigned short cert); +int ipl_report_add_certificate(struct ipl_report *report, void *key, + unsigned long addr, unsigned long len); + +/* + * DIAG 308 support + */ +enum diag308_subcode { + DIAG308_CLEAR_RESET = 0, + DIAG308_LOAD_NORMAL_RESET = 1, + DIAG308_REL_HSA = 2, + DIAG308_LOAD_CLEAR = 3, + DIAG308_LOAD_NORMAL_DUMP = 4, + DIAG308_SET = 5, + DIAG308_STORE = 6, + DIAG308_LOAD_NORMAL = 7, +}; + +enum diag308_subcode_flags { + DIAG308_FLAG_EI = 1UL << 16, +}; + +enum diag308_rc { + DIAG308_RC_OK = 0x0001, + DIAG308_RC_NOCONFIG = 0x0102, +}; + +extern int diag308(unsigned long subcode, void *addr); +extern void store_status(void (*fn)(void *), void *data); +extern void lgr_info_log(void); + +#endif /* _ASM_S390_IPL_H */ diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h new file mode 100644 index 000000000..89902f754 --- /dev/null +++ b/arch/s390/include/asm/irq.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_IRQ_H +#define _ASM_IRQ_H + +#define EXT_INTERRUPT 0 +#define IO_INTERRUPT 1 +#define THIN_INTERRUPT 2 + +#define NR_IRQS_BASE 3 + +#define NR_IRQS NR_IRQS_BASE +#define NR_IRQS_LEGACY NR_IRQS_BASE + +/* External interruption codes */ +#define EXT_IRQ_INTERRUPT_KEY 0x0040 +#define EXT_IRQ_CLK_COMP 0x1004 +#define EXT_IRQ_CPU_TIMER 0x1005 +#define EXT_IRQ_WARNING_TRACK 0x1007 +#define EXT_IRQ_MALFUNC_ALERT 0x1200 +#define EXT_IRQ_EMERGENCY_SIG 0x1201 +#define EXT_IRQ_EXTERNAL_CALL 0x1202 +#define EXT_IRQ_TIMING_ALERT 0x1406 +#define EXT_IRQ_MEASURE_ALERT 0x1407 +#define EXT_IRQ_SERVICE_SIG 0x2401 +#define EXT_IRQ_CP_SERVICE 0x2603 +#define EXT_IRQ_IUCV 0x4000 + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +enum interruption_class { + IRQEXT_CLK, + IRQEXT_EXC, + IRQEXT_EMS, + IRQEXT_TMR, + IRQEXT_TLA, + IRQEXT_PFL, + IRQEXT_DSD, + IRQEXT_VRT, + IRQEXT_SCP, + IRQEXT_IUC, + IRQEXT_CMS, + IRQEXT_CMC, + IRQEXT_FTP, + IRQIO_CIO, + IRQIO_DAS, + IRQIO_C15, + IRQIO_C70, + IRQIO_TAP, + IRQIO_VMR, + IRQIO_LCS, + IRQIO_CTC, + IRQIO_ADM, + IRQIO_CSC, + IRQIO_VIR, + IRQIO_QAI, + IRQIO_APB, + IRQIO_PCF, + IRQIO_PCD, + IRQIO_MSI, + IRQIO_VAI, + IRQIO_GAL, + NMI_NMI, + CPU_RST, + NR_ARCH_IRQS +}; + +struct irq_stat { + unsigned int irqs[NR_ARCH_IRQS]; +}; + +DECLARE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); + +static __always_inline void inc_irq_stat(enum interruption_class irq) +{ + __this_cpu_inc(irq_stat.irqs[irq]); +} + +struct ext_code { + union { + struct { + unsigned short subcode; + unsigned short code; + }; + unsigned int int_code; + }; +}; + +typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long); + +int register_external_irq(u16 code, ext_int_handler_t handler); +int unregister_external_irq(u16 code, ext_int_handler_t handler); + +enum irq_subclass { + IRQ_SUBCLASS_MEASUREMENT_ALERT = 5, + IRQ_SUBCLASS_SERVICE_SIGNAL = 9, +}; + +#define CR0_IRQ_SUBCLASS_MASK \ + ((1UL << (63 - 30)) /* Warning Track */ | \ + (1UL << (63 - 48)) /* Malfunction Alert */ | \ + (1UL << (63 - 49)) /* Emergency Signal */ | \ + (1UL << (63 - 50)) /* External Call */ | \ + (1UL << (63 - 52)) /* Clock Comparator */ | \ + (1UL << (63 - 53)) /* CPU Timer */ | \ + (1UL << (63 - 54)) /* Service Signal */ | \ + (1UL << (63 - 57)) /* Interrupt Key */ | \ + (1UL << (63 - 58)) /* Measurement Alert */ | \ + (1UL << (63 - 59)) /* Timing Alert */ | \ + (1UL << (63 - 62))) /* IUCV */ + +void irq_subclass_register(enum irq_subclass subclass); +void irq_subclass_unregister(enum irq_subclass subclass); + +#define irq_canonicalize(irq) (irq) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_IRQ_H */ diff --git a/arch/s390/include/asm/irq_work.h b/arch/s390/include/asm/irq_work.h new file mode 100644 index 000000000..603783766 --- /dev/null +++ b/arch/s390/include/asm/irq_work.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_IRQ_WORK_H +#define _ASM_S390_IRQ_WORK_H + +static inline bool arch_irq_work_has_interrupt(void) +{ + return true; +} + +void arch_irq_work_raise(void); + +#endif /* _ASM_S390_IRQ_WORK_H */ diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h new file mode 100644 index 000000000..02427b205 --- /dev/null +++ b/arch/s390/include/asm/irqflags.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2006, 2010 + * Author(s): Martin Schwidefsky + */ + +#ifndef __ASM_IRQFLAGS_H +#define __ASM_IRQFLAGS_H + +#include + +#define ARCH_IRQ_ENABLED (3UL << (BITS_PER_LONG - 8)) + +/* store then OR system mask. */ +#define __arch_local_irq_stosm(__or) \ +({ \ + unsigned long __mask; \ + asm volatile( \ + " stosm %0,%1" \ + : "=Q" (__mask) : "i" (__or) : "memory"); \ + __mask; \ +}) + +/* store then AND system mask. */ +#define __arch_local_irq_stnsm(__and) \ +({ \ + unsigned long __mask; \ + asm volatile( \ + " stnsm %0,%1" \ + : "=Q" (__mask) : "i" (__and) : "memory"); \ + __mask; \ +}) + +/* set system mask. */ +static __always_inline void __arch_local_irq_ssm(unsigned long flags) +{ + asm volatile("ssm %0" : : "Q" (flags) : "memory"); +} + +static __always_inline unsigned long arch_local_save_flags(void) +{ + return __arch_local_irq_stnsm(0xff); +} + +static __always_inline unsigned long arch_local_irq_save(void) +{ + return __arch_local_irq_stnsm(0xfc); +} + +static __always_inline void arch_local_irq_disable(void) +{ + arch_local_irq_save(); +} + +static __always_inline void arch_local_irq_enable(void) +{ + __arch_local_irq_stosm(0x03); +} + +/* This only restores external and I/O interrupt state */ +static __always_inline void arch_local_irq_restore(unsigned long flags) +{ + /* only disabled->disabled and disabled->enabled is valid */ + if (flags & ARCH_IRQ_ENABLED) + arch_local_irq_enable(); +} + +static __always_inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & ARCH_IRQ_ENABLED); +} + +static __always_inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#endif /* __ASM_IRQFLAGS_H */ diff --git a/arch/s390/include/asm/isc.h b/arch/s390/include/asm/isc.h new file mode 100644 index 000000000..b2cc1ec78 --- /dev/null +++ b/arch/s390/include/asm/isc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_ISC_H +#define _ASM_S390_ISC_H + +#include + +/* + * I/O interruption subclasses used by drivers. + * Please add all used iscs here so that it is possible to distribute + * isc usage between drivers. + * Reminder: 0 is highest priority, 7 lowest. + */ +#define MAX_ISC 7 + +/* Regular I/O interrupts. */ +#define IO_SCH_ISC 3 /* regular I/O subchannels */ +#define CONSOLE_ISC 1 /* console I/O subchannel */ +#define EADM_SCH_ISC 4 /* EADM subchannels */ +#define CHSC_SCH_ISC 7 /* CHSC subchannels */ +#define VFIO_CCW_ISC IO_SCH_ISC /* VFIO-CCW I/O subchannels */ +/* Adapter interrupts. */ +#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */ +#define PCI_ISC 2 /* PCI I/O subchannels */ +#define GAL_ISC 5 /* GIB alert */ +#define AP_ISC 6 /* adjunct processor (crypto) devices */ + +/* Functions for registration of I/O interruption subclasses */ +void isc_register(unsigned int isc); +void isc_unregister(unsigned int isc); + +#endif /* _ASM_S390_ISC_H */ diff --git a/arch/s390/include/asm/itcw.h b/arch/s390/include/asm/itcw.h new file mode 100644 index 000000000..59b739613 --- /dev/null +++ b/arch/s390/include/asm/itcw.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Functions for incremental construction of fcx enabled I/O control blocks. + * + * Copyright IBM Corp. 2008 + * Author(s): Peter Oberparleiter + */ + +#ifndef _ASM_S390_ITCW_H +#define _ASM_S390_ITCW_H + +#include +#include + +#define ITCW_OP_READ 0 +#define ITCW_OP_WRITE 1 + +struct itcw; + +struct tcw *itcw_get_tcw(struct itcw *itcw); +size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws); +struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg, + int max_tidaws, int intrg_max_tidaws); +struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd, + u8 cd_count, u32 count); +struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, + u32 count); +void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal); +void itcw_finalize(struct itcw *itcw); + +#endif /* _ASM_S390_ITCW_H */ diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h new file mode 100644 index 000000000..895f774bb --- /dev/null +++ b/arch/s390/include/asm/jump_label.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_JUMP_LABEL_H +#define _ASM_S390_JUMP_LABEL_H + +#define HAVE_JUMP_LABEL_BATCH + +#ifndef __ASSEMBLY__ + +#include +#include + +#define JUMP_LABEL_NOP_SIZE 6 + +#ifdef CONFIG_CC_IS_CLANG +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "i" +#elif __GNUC__ < 9 +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "X" +#else +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "jdd" +#endif + +/* + * We use a brcl 0, instruction for jump labels so it + * can be easily distinguished from a hotpatch generated instruction. + */ +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +{ + asm_volatile_goto("0: brcl 0,%l[label]\n" + ".pushsection __jump_table,\"aw\"\n" + ".balign 8\n" + ".long 0b-.,%l[label]-.\n" + ".quad %0+%1-.\n" + ".popsection\n" + : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label); + return false; +label: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm_volatile_goto("0: brcl 15,%l[label]\n" + ".pushsection __jump_table,\"aw\"\n" + ".balign 8\n" + ".long 0b-.,%l[label]-.\n" + ".quad %0+%1-.\n" + ".popsection\n" + : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label); + return false; +label: + return true; +} + +#endif /* __ASSEMBLY__ */ +#endif diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h new file mode 100644 index 000000000..0cffead0f --- /dev/null +++ b/arch/s390/include/asm/kasan.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_KASAN_H +#define __ASM_KASAN_H + +#include + +#ifdef CONFIG_KASAN + +#define KASAN_SHADOW_SCALE_SHIFT 3 +#define KASAN_SHADOW_SIZE \ + (_AC(1, UL) << (_REGION1_SHIFT - KASAN_SHADOW_SCALE_SHIFT)) +#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) +#define KASAN_SHADOW_START KASAN_SHADOW_OFFSET +#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) + +#endif + +#endif diff --git a/arch/s390/include/asm/kdebug.h b/arch/s390/include/asm/kdebug.h new file mode 100644 index 000000000..4377238e4 --- /dev/null +++ b/arch/s390/include/asm/kdebug.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _S390_KDEBUG_H +#define _S390_KDEBUG_H + +/* + * Feb 2006 Ported to s390 + */ + +struct pt_regs; + +enum die_val { + DIE_OOPS = 1, + DIE_BPT, + DIE_SSTEP, + DIE_PANIC, + DIE_NMI, + DIE_DIE, + DIE_NMIWATCHDOG, + DIE_KERNELDEBUG, + DIE_TRAP, + DIE_GPF, + DIE_CALL, + DIE_NMI_IPI, +}; + +extern void __noreturn die(struct pt_regs *, const char *); + +#endif diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h new file mode 100644 index 000000000..1bd08eb56 --- /dev/null +++ b/arch/s390/include/asm/kexec.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2005 + * + * Author(s): Rolf Adelsberger + * + */ + +#ifndef _S390_KEXEC_H +#define _S390_KEXEC_H + +#include + +#include +#include +#include +/* + * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. + * I.e. Maximum page that is mapped directly into kernel memory, + * and kmap is not required. + */ + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) + +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + +/* Maximum address we can use for the control pages */ +/* Not more than 2GB */ +#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) + +/* Allocate control page with GFP_DMA */ +#define KEXEC_CONTROL_MEMORY_GFP (GFP_DMA | __GFP_NORETRY) + +/* Maximum address we can use for the crash control pages */ +#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL) + +/* Allocate one page for the pdp and the second for the code */ +#define KEXEC_CONTROL_PAGE_SIZE 4096 + +/* Alignment of crashkernel memory */ +#define KEXEC_CRASH_MEM_ALIGN HPAGE_SIZE + +/* The native architecture */ +#define KEXEC_ARCH KEXEC_ARCH_S390 + +/* Allow kexec_file to load a segment to 0 */ +#define KEXEC_BUF_MEM_UNKNOWN -1 + +/* Provide a dummy definition to avoid build failures. */ +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) { } + +struct kimage; +struct s390_load_data { + /* Pointer to the kernel buffer. Used to register cmdline etc.. */ + void *kernel_buf; + + /* Load address of the kernel_buf. */ + unsigned long kernel_mem; + + /* Parmarea in the kernel buffer. */ + struct parmarea *parm; + + /* Total size of loaded segments in memory. Used as an offset. */ + size_t memsz; + + struct ipl_report *report; +}; + +int s390_verify_sig(const char *kernel, unsigned long kernel_len); +void *kexec_file_add_components(struct kimage *image, + int (*add_kernel)(struct kimage *image, + struct s390_load_data *data)); +int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val, + unsigned long addr); + +#define ARCH_HAS_KIMAGE_ARCH + +struct kimage_arch { + void *ipl_buf; +}; + +extern const struct kexec_file_ops s390_kexec_image_ops; +extern const struct kexec_file_ops s390_kexec_elf_ops; + +#ifdef CONFIG_CRASH_DUMP +void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); +#define crash_free_reserved_phys_range crash_free_reserved_phys_range + +void arch_kexec_protect_crashkres(void); +#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres + +void arch_kexec_unprotect_crashkres(void); +#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres +#endif + +#ifdef CONFIG_KEXEC_FILE +struct purgatory_info; +int arch_kexec_apply_relocations_add(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add + +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup +#endif +#endif /*_S390_KEXEC_H */ diff --git a/arch/s390/include/asm/kfence.h b/arch/s390/include/asm/kfence.h new file mode 100644 index 000000000..e47fd8cbe --- /dev/null +++ b/arch/s390/include/asm/kfence.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_S390_KFENCE_H +#define _ASM_S390_KFENCE_H + +#include +#include +#include +#include + +void __kernel_map_pages(struct page *page, int numpages, int enable); + +static __always_inline bool arch_kfence_init_pool(void) +{ + return true; +} + +#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK) + +/* + * Do not split kfence pool to 4k mapping with arch_kfence_init_pool(), + * but earlier where page table allocations still happen with memblock. + * Reason is that arch_kfence_init_pool() gets called when the system + * is still in a limbo state - disabling and enabling bottom halves is + * not yet allowed, but that is what our page_table_alloc() would do. + */ +static __always_inline void kfence_split_mapping(void) +{ +#ifdef CONFIG_KFENCE + unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT; + + set_memory_4k((unsigned long)__kfence_pool, pool_pages); +#endif +} + +static inline bool kfence_protect_page(unsigned long addr, bool protect) +{ + __kernel_map_pages(virt_to_page((void *)addr), 1, !protect); + return true; +} + +#endif /* _ASM_S390_KFENCE_H */ diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h new file mode 100644 index 000000000..83f732ca3 --- /dev/null +++ b/arch/s390/include/asm/kprobes.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef _ASM_S390_KPROBES_H +#define _ASM_S390_KPROBES_H +/* + * Kernel Probes (KProbes) + * + * Copyright IBM Corp. 2002, 2006 + * + * 2002-Oct Created by Vamsi Krishna S Kernel + * Probes initial implementation ( includes suggestions from + * Rusty Russell). + * 2004-Nov Modified for PPC64 by Ananth N Mavinakayanahalli + * + * 2005-Dec Used as a template for s390 by Mike Grundy + * + */ +#include +#include + +#define BREAKPOINT_INSTRUCTION 0x0002 + +#define FIXUP_PSW_NORMAL 0x08 +#define FIXUP_BRANCH_NOT_TAKEN 0x04 +#define FIXUP_RETURN_REGISTER 0x02 +#define FIXUP_NOT_REQUIRED 0x01 + +int probe_is_prohibited_opcode(u16 *insn); +int probe_get_fixup_type(u16 *insn); +int probe_is_insn_relative_long(u16 *insn); + +#ifdef CONFIG_KPROBES +#include +#include +#include + +#define __ARCH_WANT_KPROBES_INSN_SLOT + +struct pt_regs; +struct kprobe; + +typedef u16 kprobe_opcode_t; + +/* Maximum instruction size is 3 (16bit) halfwords: */ +#define MAX_INSN_SIZE 0x0003 +#define MAX_STACK_SIZE 64 +#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ + (((unsigned long)task_stack_page(current)) + THREAD_SIZE - (ADDR))) \ + ? (MAX_STACK_SIZE) \ + : (((unsigned long)task_stack_page(current)) + THREAD_SIZE - (ADDR))) + +#define kretprobe_blacklist_size 0 + +/* Architecture specific copy of original instruction */ +struct arch_specific_insn { + /* copy of original instruction */ + kprobe_opcode_t *insn; +}; + +struct prev_kprobe { + struct kprobe *kp; + unsigned long status; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned long kprobe_status; + unsigned long kprobe_saved_imask; + unsigned long kprobe_saved_ctl[3]; + struct prev_kprobe prev_kprobe; +}; + +void arch_remove_kprobe(struct kprobe *p); + +int kprobe_fault_handler(struct pt_regs *regs, int trapnr); +int kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data); + +#define flush_insn_slot(p) do { } while (0) + +#endif /* CONFIG_KPROBES */ +#endif /* _ASM_S390_KPROBES_H */ diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h new file mode 100644 index 000000000..427f9528a --- /dev/null +++ b/arch/s390/include/asm/kvm_host.h @@ -0,0 +1,1060 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * definition for kernel virtual machines on s390 + * + * Copyright IBM Corp. 2008, 2018 + * + * Author(s): Carsten Otte + */ + + +#ifndef ASM_KVM_HOST_H +#define ASM_KVM_HOST_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KVM_S390_BSCA_CPU_SLOTS 64 +#define KVM_S390_ESCA_CPU_SLOTS 248 +#define KVM_MAX_VCPUS 255 + +/* + * These seem to be used for allocating ->chip in the routing table, which we + * don't use. 1 is as small as we can get to reduce the needed memory. If we + * need to look at ->chip later on, we'll need to revisit this. + */ +#define KVM_NR_IRQCHIPS 1 +#define KVM_IRQCHIP_NUM_PINS 1 +#define KVM_HALT_POLL_NS_DEFAULT 50000 + +/* s390-specific vcpu->requests bit members */ +#define KVM_REQ_ENABLE_IBS KVM_ARCH_REQ(0) +#define KVM_REQ_DISABLE_IBS KVM_ARCH_REQ(1) +#define KVM_REQ_ICPT_OPEREXC KVM_ARCH_REQ(2) +#define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3) +#define KVM_REQ_STOP_MIGRATION KVM_ARCH_REQ(4) +#define KVM_REQ_VSIE_RESTART KVM_ARCH_REQ(5) +#define KVM_REQ_REFRESH_GUEST_PREFIX \ + KVM_ARCH_REQ_FLAGS(6, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) + +#define SIGP_CTRL_C 0x80 +#define SIGP_CTRL_SCN_MASK 0x3f + +union bsca_sigp_ctrl { + __u8 value; + struct { + __u8 c : 1; + __u8 r : 1; + __u8 scn : 6; + }; +}; + +union esca_sigp_ctrl { + __u16 value; + struct { + __u8 c : 1; + __u8 reserved: 7; + __u8 scn; + }; +}; + +struct esca_entry { + union esca_sigp_ctrl sigp_ctrl; + __u16 reserved1[3]; + __u64 sda; + __u64 reserved2[6]; +}; + +struct bsca_entry { + __u8 reserved0; + union bsca_sigp_ctrl sigp_ctrl; + __u16 reserved[3]; + __u64 sda; + __u64 reserved2[2]; +}; + +union ipte_control { + unsigned long val; + struct { + unsigned long k : 1; + unsigned long kh : 31; + unsigned long kg : 32; + }; +}; + +union sca_utility { + __u16 val; + struct { + __u16 mtcr : 1; + __u16 reserved : 15; + }; +}; + +struct bsca_block { + union ipte_control ipte_control; + __u64 reserved[5]; + __u64 mcn; + union sca_utility utility; + __u8 reserved2[6]; + struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS]; +}; + +struct esca_block { + union ipte_control ipte_control; + __u64 reserved1[6]; + union sca_utility utility; + __u8 reserved2[6]; + __u64 mcn[4]; + __u64 reserved3[20]; + struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS]; +}; + +/* + * This struct is used to store some machine check info from lowcore + * for machine checks that happen while the guest is running. + * This info in host's lowcore might be overwritten by a second machine + * check from host when host is in the machine check's high-level handling. + * The size is 24 bytes. + */ +struct mcck_volatile_info { + __u64 mcic; + __u64 failing_storage_address; + __u32 ext_damage_code; + __u32 reserved; +}; + +#define CR0_INITIAL_MASK (CR0_UNUSED_56 | CR0_INTERRUPT_KEY_SUBMASK | \ + CR0_MEASUREMENT_ALERT_SUBMASK) +#define CR14_INITIAL_MASK (CR14_UNUSED_32 | CR14_UNUSED_33 | \ + CR14_EXTERNAL_DAMAGE_SUBMASK) + +#define SIDAD_SIZE_MASK 0xff +#define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK) +#define sida_size(sie_block) \ + ((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE) + +#define CPUSTAT_STOPPED 0x80000000 +#define CPUSTAT_WAIT 0x10000000 +#define CPUSTAT_ECALL_PEND 0x08000000 +#define CPUSTAT_STOP_INT 0x04000000 +#define CPUSTAT_IO_INT 0x02000000 +#define CPUSTAT_EXT_INT 0x01000000 +#define CPUSTAT_RUNNING 0x00800000 +#define CPUSTAT_RETAINED 0x00400000 +#define CPUSTAT_TIMING_SUB 0x00020000 +#define CPUSTAT_SIE_SUB 0x00010000 +#define CPUSTAT_RRF 0x00008000 +#define CPUSTAT_SLSV 0x00004000 +#define CPUSTAT_SLSR 0x00002000 +#define CPUSTAT_ZARCH 0x00000800 +#define CPUSTAT_MCDS 0x00000100 +#define CPUSTAT_KSS 0x00000200 +#define CPUSTAT_SM 0x00000080 +#define CPUSTAT_IBS 0x00000040 +#define CPUSTAT_GED2 0x00000010 +#define CPUSTAT_G 0x00000008 +#define CPUSTAT_GED 0x00000004 +#define CPUSTAT_J 0x00000002 +#define CPUSTAT_P 0x00000001 + +struct kvm_s390_sie_block { + atomic_t cpuflags; /* 0x0000 */ + __u32 : 1; /* 0x0004 */ + __u32 prefix : 18; + __u32 : 1; + __u32 ibc : 12; + __u8 reserved08[4]; /* 0x0008 */ +#define PROG_IN_SIE (1<<0) + __u32 prog0c; /* 0x000c */ + union { + __u8 reserved10[16]; /* 0x0010 */ + struct { + __u64 pv_handle_cpu; + __u64 pv_handle_config; + }; + }; +#define PROG_BLOCK_SIE (1<<0) +#define PROG_REQUEST (1<<1) + atomic_t prog20; /* 0x0020 */ + __u8 reserved24[4]; /* 0x0024 */ + __u64 cputm; /* 0x0028 */ + __u64 ckc; /* 0x0030 */ + __u64 epoch; /* 0x0038 */ + __u32 svcc; /* 0x0040 */ +#define LCTL_CR0 0x8000 +#define LCTL_CR6 0x0200 +#define LCTL_CR9 0x0040 +#define LCTL_CR10 0x0020 +#define LCTL_CR11 0x0010 +#define LCTL_CR14 0x0002 + __u16 lctl; /* 0x0044 */ + __s16 icpua; /* 0x0046 */ +#define ICTL_OPEREXC 0x80000000 +#define ICTL_PINT 0x20000000 +#define ICTL_LPSW 0x00400000 +#define ICTL_STCTL 0x00040000 +#define ICTL_ISKE 0x00004000 +#define ICTL_SSKE 0x00002000 +#define ICTL_RRBE 0x00001000 +#define ICTL_TPROT 0x00000200 + __u32 ictl; /* 0x0048 */ +#define ECA_CEI 0x80000000 +#define ECA_IB 0x40000000 +#define ECA_SIGPI 0x10000000 +#define ECA_MVPGI 0x01000000 +#define ECA_AIV 0x00200000 +#define ECA_VX 0x00020000 +#define ECA_PROTEXCI 0x00002000 +#define ECA_APIE 0x00000008 +#define ECA_SII 0x00000001 + __u32 eca; /* 0x004c */ +#define ICPT_INST 0x04 +#define ICPT_PROGI 0x08 +#define ICPT_INSTPROGI 0x0C +#define ICPT_EXTREQ 0x10 +#define ICPT_EXTINT 0x14 +#define ICPT_IOREQ 0x18 +#define ICPT_WAIT 0x1c +#define ICPT_VALIDITY 0x20 +#define ICPT_STOP 0x28 +#define ICPT_OPEREXC 0x2C +#define ICPT_PARTEXEC 0x38 +#define ICPT_IOINST 0x40 +#define ICPT_KSS 0x5c +#define ICPT_MCHKREQ 0x60 +#define ICPT_INT_ENABLE 0x64 +#define ICPT_PV_INSTR 0x68 +#define ICPT_PV_NOTIFY 0x6c +#define ICPT_PV_PREF 0x70 + __u8 icptcode; /* 0x0050 */ + __u8 icptstatus; /* 0x0051 */ + __u16 ihcpu; /* 0x0052 */ + __u8 reserved54; /* 0x0054 */ +#define IICTL_CODE_NONE 0x00 +#define IICTL_CODE_MCHK 0x01 +#define IICTL_CODE_EXT 0x02 +#define IICTL_CODE_IO 0x03 +#define IICTL_CODE_RESTART 0x04 +#define IICTL_CODE_SPECIFICATION 0x10 +#define IICTL_CODE_OPERAND 0x11 + __u8 iictl; /* 0x0055 */ + __u16 ipa; /* 0x0056 */ + __u32 ipb; /* 0x0058 */ + __u32 scaoh; /* 0x005c */ +#define FPF_BPBC 0x20 + __u8 fpf; /* 0x0060 */ +#define ECB_GS 0x40 +#define ECB_TE 0x10 +#define ECB_SPECI 0x08 +#define ECB_SRSI 0x04 +#define ECB_HOSTPROTINT 0x02 +#define ECB_PTF 0x01 + __u8 ecb; /* 0x0061 */ +#define ECB2_CMMA 0x80 +#define ECB2_IEP 0x20 +#define ECB2_PFMFI 0x08 +#define ECB2_ESCA 0x04 +#define ECB2_ZPCI_LSI 0x02 + __u8 ecb2; /* 0x0062 */ +#define ECB3_AISI 0x20 +#define ECB3_AISII 0x10 +#define ECB3_DEA 0x08 +#define ECB3_AES 0x04 +#define ECB3_RI 0x01 + __u8 ecb3; /* 0x0063 */ +#define ESCA_SCAOL_MASK ~0x3fU + __u32 scaol; /* 0x0064 */ + __u8 sdf; /* 0x0068 */ + __u8 epdx; /* 0x0069 */ + __u8 cpnc; /* 0x006a */ + __u8 reserved6b; /* 0x006b */ + __u32 todpr; /* 0x006c */ +#define GISA_FORMAT1 0x00000001 + __u32 gd; /* 0x0070 */ + __u8 reserved74[12]; /* 0x0074 */ + __u64 mso; /* 0x0080 */ + __u64 msl; /* 0x0088 */ + psw_t gpsw; /* 0x0090 */ + __u64 gg14; /* 0x00a0 */ + __u64 gg15; /* 0x00a8 */ + __u8 reservedb0[8]; /* 0x00b0 */ +#define HPID_KVM 0x4 +#define HPID_VSIE 0x5 + __u8 hpid; /* 0x00b8 */ + __u8 reservedb9[7]; /* 0x00b9 */ + union { + struct { + __u32 eiparams; /* 0x00c0 */ + __u16 extcpuaddr; /* 0x00c4 */ + __u16 eic; /* 0x00c6 */ + }; + __u64 mcic; /* 0x00c0 */ + } __packed; + __u32 reservedc8; /* 0x00c8 */ + union { + struct { + __u16 pgmilc; /* 0x00cc */ + __u16 iprcc; /* 0x00ce */ + }; + __u32 edc; /* 0x00cc */ + } __packed; + union { + struct { + __u32 dxc; /* 0x00d0 */ + __u16 mcn; /* 0x00d4 */ + __u8 perc; /* 0x00d6 */ + __u8 peratmid; /* 0x00d7 */ + }; + __u64 faddr; /* 0x00d0 */ + } __packed; + __u64 peraddr; /* 0x00d8 */ + __u8 eai; /* 0x00e0 */ + __u8 peraid; /* 0x00e1 */ + __u8 oai; /* 0x00e2 */ + __u8 armid; /* 0x00e3 */ + __u8 reservede4[4]; /* 0x00e4 */ + union { + __u64 tecmc; /* 0x00e8 */ + struct { + __u16 subchannel_id; /* 0x00e8 */ + __u16 subchannel_nr; /* 0x00ea */ + __u32 io_int_parm; /* 0x00ec */ + __u32 io_int_word; /* 0x00f0 */ + }; + } __packed; + __u8 reservedf4[8]; /* 0x00f4 */ +#define CRYCB_FORMAT_MASK 0x00000003 +#define CRYCB_FORMAT0 0x00000000 +#define CRYCB_FORMAT1 0x00000001 +#define CRYCB_FORMAT2 0x00000003 + __u32 crycbd; /* 0x00fc */ + __u64 gcr[16]; /* 0x0100 */ + union { + __u64 gbea; /* 0x0180 */ + __u64 sidad; + }; + __u8 reserved188[8]; /* 0x0188 */ + __u64 sdnxo; /* 0x0190 */ + __u8 reserved198[8]; /* 0x0198 */ + __u32 fac; /* 0x01a0 */ + __u8 reserved1a4[20]; /* 0x01a4 */ + __u64 cbrlo; /* 0x01b8 */ + __u8 reserved1c0[8]; /* 0x01c0 */ +#define ECD_HOSTREGMGMT 0x20000000 +#define ECD_MEF 0x08000000 +#define ECD_ETOKENF 0x02000000 +#define ECD_ECC 0x00200000 + __u32 ecd; /* 0x01c8 */ + __u8 reserved1cc[18]; /* 0x01cc */ + __u64 pp; /* 0x01de */ + __u8 reserved1e6[2]; /* 0x01e6 */ + __u64 itdba; /* 0x01e8 */ + __u64 riccbd; /* 0x01f0 */ + __u64 gvrd; /* 0x01f8 */ +} __packed __aligned(512); + +struct kvm_s390_itdb { + __u8 data[256]; +}; + +struct sie_page { + struct kvm_s390_sie_block sie_block; + struct mcck_volatile_info mcck_info; /* 0x0200 */ + __u8 reserved218[360]; /* 0x0218 */ + __u64 pv_grregs[16]; /* 0x0380 */ + __u8 reserved400[512]; /* 0x0400 */ + struct kvm_s390_itdb itdb; /* 0x0600 */ + __u8 reserved700[2304]; /* 0x0700 */ +}; + +struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; + u64 exit_userspace; + u64 exit_null; + u64 exit_external_request; + u64 exit_io_request; + u64 exit_external_interrupt; + u64 exit_stop_request; + u64 exit_validity; + u64 exit_instruction; + u64 exit_pei; + u64 halt_no_poll_steal; + u64 instruction_lctl; + u64 instruction_lctlg; + u64 instruction_stctl; + u64 instruction_stctg; + u64 exit_program_interruption; + u64 exit_instr_and_program; + u64 exit_operation_exception; + u64 deliver_ckc; + u64 deliver_cputm; + u64 deliver_external_call; + u64 deliver_emergency_signal; + u64 deliver_service_signal; + u64 deliver_virtio; + u64 deliver_stop_signal; + u64 deliver_prefix_signal; + u64 deliver_restart_signal; + u64 deliver_program; + u64 deliver_io; + u64 deliver_machine_check; + u64 exit_wait_state; + u64 inject_ckc; + u64 inject_cputm; + u64 inject_external_call; + u64 inject_emergency_signal; + u64 inject_mchk; + u64 inject_pfault_init; + u64 inject_program; + u64 inject_restart; + u64 inject_set_prefix; + u64 inject_stop_signal; + u64 instruction_epsw; + u64 instruction_gs; + u64 instruction_io_other; + u64 instruction_lpsw; + u64 instruction_lpswe; + u64 instruction_pfmf; + u64 instruction_ptff; + u64 instruction_sck; + u64 instruction_sckpf; + u64 instruction_stidp; + u64 instruction_spx; + u64 instruction_stpx; + u64 instruction_stap; + u64 instruction_iske; + u64 instruction_ri; + u64 instruction_rrbe; + u64 instruction_sske; + u64 instruction_ipte_interlock; + u64 instruction_stsi; + u64 instruction_stfl; + u64 instruction_tb; + u64 instruction_tpi; + u64 instruction_tprot; + u64 instruction_tsch; + u64 instruction_sie; + u64 instruction_essa; + u64 instruction_sthyi; + u64 instruction_sigp_sense; + u64 instruction_sigp_sense_running; + u64 instruction_sigp_external_call; + u64 instruction_sigp_emergency; + u64 instruction_sigp_cond_emergency; + u64 instruction_sigp_start; + u64 instruction_sigp_stop; + u64 instruction_sigp_stop_store_status; + u64 instruction_sigp_store_status; + u64 instruction_sigp_store_adtl_status; + u64 instruction_sigp_arch; + u64 instruction_sigp_prefix; + u64 instruction_sigp_restart; + u64 instruction_sigp_init_cpu_reset; + u64 instruction_sigp_cpu_reset; + u64 instruction_sigp_unknown; + u64 instruction_diagnose_10; + u64 instruction_diagnose_44; + u64 instruction_diagnose_9c; + u64 diag_9c_ignored; + u64 diag_9c_forward; + u64 instruction_diagnose_258; + u64 instruction_diagnose_308; + u64 instruction_diagnose_500; + u64 instruction_diagnose_other; + u64 pfault_sync; +}; + +#define PGM_OPERATION 0x01 +#define PGM_PRIVILEGED_OP 0x02 +#define PGM_EXECUTE 0x03 +#define PGM_PROTECTION 0x04 +#define PGM_ADDRESSING 0x05 +#define PGM_SPECIFICATION 0x06 +#define PGM_DATA 0x07 +#define PGM_FIXED_POINT_OVERFLOW 0x08 +#define PGM_FIXED_POINT_DIVIDE 0x09 +#define PGM_DECIMAL_OVERFLOW 0x0a +#define PGM_DECIMAL_DIVIDE 0x0b +#define PGM_HFP_EXPONENT_OVERFLOW 0x0c +#define PGM_HFP_EXPONENT_UNDERFLOW 0x0d +#define PGM_HFP_SIGNIFICANCE 0x0e +#define PGM_HFP_DIVIDE 0x0f +#define PGM_SEGMENT_TRANSLATION 0x10 +#define PGM_PAGE_TRANSLATION 0x11 +#define PGM_TRANSLATION_SPEC 0x12 +#define PGM_SPECIAL_OPERATION 0x13 +#define PGM_OPERAND 0x15 +#define PGM_TRACE_TABEL 0x16 +#define PGM_VECTOR_PROCESSING 0x1b +#define PGM_SPACE_SWITCH 0x1c +#define PGM_HFP_SQUARE_ROOT 0x1d +#define PGM_PC_TRANSLATION_SPEC 0x1f +#define PGM_AFX_TRANSLATION 0x20 +#define PGM_ASX_TRANSLATION 0x21 +#define PGM_LX_TRANSLATION 0x22 +#define PGM_EX_TRANSLATION 0x23 +#define PGM_PRIMARY_AUTHORITY 0x24 +#define PGM_SECONDARY_AUTHORITY 0x25 +#define PGM_LFX_TRANSLATION 0x26 +#define PGM_LSX_TRANSLATION 0x27 +#define PGM_ALET_SPECIFICATION 0x28 +#define PGM_ALEN_TRANSLATION 0x29 +#define PGM_ALE_SEQUENCE 0x2a +#define PGM_ASTE_VALIDITY 0x2b +#define PGM_ASTE_SEQUENCE 0x2c +#define PGM_EXTENDED_AUTHORITY 0x2d +#define PGM_LSTE_SEQUENCE 0x2e +#define PGM_ASTE_INSTANCE 0x2f +#define PGM_STACK_FULL 0x30 +#define PGM_STACK_EMPTY 0x31 +#define PGM_STACK_SPECIFICATION 0x32 +#define PGM_STACK_TYPE 0x33 +#define PGM_STACK_OPERATION 0x34 +#define PGM_ASCE_TYPE 0x38 +#define PGM_REGION_FIRST_TRANS 0x39 +#define PGM_REGION_SECOND_TRANS 0x3a +#define PGM_REGION_THIRD_TRANS 0x3b +#define PGM_MONITOR 0x40 +#define PGM_PER 0x80 +#define PGM_CRYPTO_OPERATION 0x119 + +/* irq types in ascend order of priorities */ +enum irq_types { + IRQ_PEND_SET_PREFIX = 0, + IRQ_PEND_RESTART, + IRQ_PEND_SIGP_STOP, + IRQ_PEND_IO_ISC_7, + IRQ_PEND_IO_ISC_6, + IRQ_PEND_IO_ISC_5, + IRQ_PEND_IO_ISC_4, + IRQ_PEND_IO_ISC_3, + IRQ_PEND_IO_ISC_2, + IRQ_PEND_IO_ISC_1, + IRQ_PEND_IO_ISC_0, + IRQ_PEND_VIRTIO, + IRQ_PEND_PFAULT_DONE, + IRQ_PEND_PFAULT_INIT, + IRQ_PEND_EXT_HOST, + IRQ_PEND_EXT_SERVICE, + IRQ_PEND_EXT_SERVICE_EV, + IRQ_PEND_EXT_TIMING, + IRQ_PEND_EXT_CPU_TIMER, + IRQ_PEND_EXT_CLOCK_COMP, + IRQ_PEND_EXT_EXTERNAL, + IRQ_PEND_EXT_EMERGENCY, + IRQ_PEND_EXT_MALFUNC, + IRQ_PEND_EXT_IRQ_KEY, + IRQ_PEND_MCHK_REP, + IRQ_PEND_PROG, + IRQ_PEND_SVC, + IRQ_PEND_MCHK_EX, + IRQ_PEND_COUNT +}; + +/* We have 2M for virtio device descriptor pages. Smallest amount of + * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381 + */ +#define KVM_S390_MAX_VIRTIO_IRQS 87381 + +/* + * Repressible (non-floating) machine check interrupts + * subclass bits in MCIC + */ +#define MCHK_EXTD_BIT 58 +#define MCHK_DEGR_BIT 56 +#define MCHK_WARN_BIT 55 +#define MCHK_REP_MASK ((1UL << MCHK_DEGR_BIT) | \ + (1UL << MCHK_EXTD_BIT) | \ + (1UL << MCHK_WARN_BIT)) + +/* Exigent machine check interrupts subclass bits in MCIC */ +#define MCHK_SD_BIT 63 +#define MCHK_PD_BIT 62 +#define MCHK_EX_MASK ((1UL << MCHK_SD_BIT) | (1UL << MCHK_PD_BIT)) + +#define IRQ_PEND_EXT_MASK ((1UL << IRQ_PEND_EXT_IRQ_KEY) | \ + (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \ + (1UL << IRQ_PEND_EXT_CPU_TIMER) | \ + (1UL << IRQ_PEND_EXT_MALFUNC) | \ + (1UL << IRQ_PEND_EXT_EMERGENCY) | \ + (1UL << IRQ_PEND_EXT_EXTERNAL) | \ + (1UL << IRQ_PEND_EXT_TIMING) | \ + (1UL << IRQ_PEND_EXT_HOST) | \ + (1UL << IRQ_PEND_EXT_SERVICE) | \ + (1UL << IRQ_PEND_EXT_SERVICE_EV) | \ + (1UL << IRQ_PEND_VIRTIO) | \ + (1UL << IRQ_PEND_PFAULT_INIT) | \ + (1UL << IRQ_PEND_PFAULT_DONE)) + +#define IRQ_PEND_IO_MASK ((1UL << IRQ_PEND_IO_ISC_0) | \ + (1UL << IRQ_PEND_IO_ISC_1) | \ + (1UL << IRQ_PEND_IO_ISC_2) | \ + (1UL << IRQ_PEND_IO_ISC_3) | \ + (1UL << IRQ_PEND_IO_ISC_4) | \ + (1UL << IRQ_PEND_IO_ISC_5) | \ + (1UL << IRQ_PEND_IO_ISC_6) | \ + (1UL << IRQ_PEND_IO_ISC_7)) + +#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \ + (1UL << IRQ_PEND_MCHK_EX)) + +#define IRQ_PEND_EXT_II_MASK ((1UL << IRQ_PEND_EXT_CPU_TIMER) | \ + (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \ + (1UL << IRQ_PEND_EXT_EMERGENCY) | \ + (1UL << IRQ_PEND_EXT_EXTERNAL) | \ + (1UL << IRQ_PEND_EXT_SERVICE) | \ + (1UL << IRQ_PEND_EXT_SERVICE_EV)) + +struct kvm_s390_interrupt_info { + struct list_head list; + u64 type; + union { + struct kvm_s390_io_info io; + struct kvm_s390_ext_info ext; + struct kvm_s390_pgm_info pgm; + struct kvm_s390_emerg_info emerg; + struct kvm_s390_extcall_info extcall; + struct kvm_s390_prefix_info prefix; + struct kvm_s390_stop_info stop; + struct kvm_s390_mchk_info mchk; + }; +}; + +struct kvm_s390_irq_payload { + struct kvm_s390_io_info io; + struct kvm_s390_ext_info ext; + struct kvm_s390_pgm_info pgm; + struct kvm_s390_emerg_info emerg; + struct kvm_s390_extcall_info extcall; + struct kvm_s390_prefix_info prefix; + struct kvm_s390_stop_info stop; + struct kvm_s390_mchk_info mchk; +}; + +struct kvm_s390_local_interrupt { + spinlock_t lock; + DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); + struct kvm_s390_irq_payload irq; + unsigned long pending_irqs; +}; + +#define FIRQ_LIST_IO_ISC_0 0 +#define FIRQ_LIST_IO_ISC_1 1 +#define FIRQ_LIST_IO_ISC_2 2 +#define FIRQ_LIST_IO_ISC_3 3 +#define FIRQ_LIST_IO_ISC_4 4 +#define FIRQ_LIST_IO_ISC_5 5 +#define FIRQ_LIST_IO_ISC_6 6 +#define FIRQ_LIST_IO_ISC_7 7 +#define FIRQ_LIST_PFAULT 8 +#define FIRQ_LIST_VIRTIO 9 +#define FIRQ_LIST_COUNT 10 +#define FIRQ_CNTR_IO 0 +#define FIRQ_CNTR_SERVICE 1 +#define FIRQ_CNTR_VIRTIO 2 +#define FIRQ_CNTR_PFAULT 3 +#define FIRQ_MAX_COUNT 4 + +/* mask the AIS mode for a given ISC */ +#define AIS_MODE_MASK(isc) (0x80 >> isc) + +#define KVM_S390_AIS_MODE_ALL 0 +#define KVM_S390_AIS_MODE_SINGLE 1 + +struct kvm_s390_float_interrupt { + unsigned long pending_irqs; + unsigned long masked_irqs; + spinlock_t lock; + struct list_head lists[FIRQ_LIST_COUNT]; + int counters[FIRQ_MAX_COUNT]; + struct kvm_s390_mchk_info mchk; + struct kvm_s390_ext_info srv_signal; + int next_rr_cpu; + struct mutex ais_lock; + u8 simm; + u8 nimm; +}; + +struct kvm_hw_wp_info_arch { + unsigned long addr; + unsigned long phys_addr; + int len; + char *old_data; +}; + +struct kvm_hw_bp_info_arch { + unsigned long addr; + int len; +}; + +/* + * Only the upper 16 bits of kvm_guest_debug->control are arch specific. + * Further KVM_GUESTDBG flags which an be used from userspace can be found in + * arch/s390/include/uapi/asm/kvm.h + */ +#define KVM_GUESTDBG_EXIT_PENDING 0x10000000 + +#define guestdbg_enabled(vcpu) \ + (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) +#define guestdbg_sstep_enabled(vcpu) \ + (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) +#define guestdbg_hw_bp_enabled(vcpu) \ + (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) +#define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \ + (vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING)) + +#define KVM_GUESTDBG_VALID_MASK \ + (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP |\ + KVM_GUESTDBG_USE_HW_BP | KVM_GUESTDBG_EXIT_PENDING) + +struct kvm_guestdbg_info_arch { + unsigned long cr0; + unsigned long cr9; + unsigned long cr10; + unsigned long cr11; + struct kvm_hw_bp_info_arch *hw_bp_info; + struct kvm_hw_wp_info_arch *hw_wp_info; + int nr_hw_bp; + int nr_hw_wp; + unsigned long last_bp; +}; + +struct kvm_s390_pv_vcpu { + u64 handle; + unsigned long stor_base; +}; + +struct kvm_vcpu_arch { + struct kvm_s390_sie_block *sie_block; + /* if vsie is active, currently executed shadow sie control block */ + struct kvm_s390_sie_block *vsie_block; + unsigned int host_acrs[NUM_ACRS]; + struct gs_cb *host_gscb; + struct fpu host_fpregs; + struct kvm_s390_local_interrupt local_int; + struct hrtimer ckc_timer; + struct kvm_s390_pgm_info pgm; + struct gmap *gmap; + /* backup location for the currently enabled gmap when scheduled out */ + struct gmap *enabled_gmap; + struct kvm_guestdbg_info_arch guestdbg; + unsigned long pfault_token; + unsigned long pfault_select; + unsigned long pfault_compare; + bool cputm_enabled; + /* + * The seqcount protects updates to cputm_start and sie_block.cputm, + * this way we can have non-blocking reads with consistent values. + * Only the owning VCPU thread (vcpu->cpu) is allowed to change these + * values and to start/stop/enable/disable cpu timer accounting. + */ + seqcount_t cputm_seqcount; + __u64 cputm_start; + bool gs_enabled; + bool skey_enabled; + struct kvm_s390_pv_vcpu pv; + union diag318_info diag318_info; +}; + +struct kvm_vm_stat { + struct kvm_vm_stat_generic generic; + u64 inject_io; + u64 inject_float_mchk; + u64 inject_pfault_done; + u64 inject_service_signal; + u64 inject_virtio; + u64 aen_forward; +}; + +struct kvm_arch_memory_slot { +}; + +struct s390_map_info { + struct list_head list; + __u64 guest_addr; + __u64 addr; + struct page *page; +}; + +struct s390_io_adapter { + unsigned int id; + int isc; + bool maskable; + bool masked; + bool swap; + bool suppressible; +}; + +#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8) +#define MAX_S390_ADAPTER_MAPS 256 + +/* maximum size of facilities and facility mask is 2k bytes */ +#define S390_ARCH_FAC_LIST_SIZE_BYTE (1<<11) +#define S390_ARCH_FAC_LIST_SIZE_U64 \ + (S390_ARCH_FAC_LIST_SIZE_BYTE / sizeof(u64)) +#define S390_ARCH_FAC_MASK_SIZE_BYTE S390_ARCH_FAC_LIST_SIZE_BYTE +#define S390_ARCH_FAC_MASK_SIZE_U64 \ + (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) + +struct kvm_s390_cpu_model { + /* facility mask supported by kvm & hosting machine */ + __u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64]; + struct kvm_s390_vm_cpu_subfunc subfuncs; + /* facility list requested by guest (in dma page) */ + __u64 *fac_list; + u64 cpuid; + unsigned short ibc; + /* subset of available UV-features for pv-guests enabled by user space */ + struct kvm_s390_vm_cpu_uv_feat uv_feat_guest; +}; + +typedef int (*crypto_hook)(struct kvm_vcpu *vcpu); + +struct kvm_s390_crypto { + struct kvm_s390_crypto_cb *crycb; + struct rw_semaphore pqap_hook_rwsem; + crypto_hook *pqap_hook; + __u32 crycbd; + __u8 aes_kw; + __u8 dea_kw; + __u8 apie; +}; + +#define APCB0_MASK_SIZE 1 +struct kvm_s390_apcb0 { + __u64 apm[APCB0_MASK_SIZE]; /* 0x0000 */ + __u64 aqm[APCB0_MASK_SIZE]; /* 0x0008 */ + __u64 adm[APCB0_MASK_SIZE]; /* 0x0010 */ + __u64 reserved18; /* 0x0018 */ +}; + +#define APCB1_MASK_SIZE 4 +struct kvm_s390_apcb1 { + __u64 apm[APCB1_MASK_SIZE]; /* 0x0000 */ + __u64 aqm[APCB1_MASK_SIZE]; /* 0x0020 */ + __u64 adm[APCB1_MASK_SIZE]; /* 0x0040 */ + __u64 reserved60[4]; /* 0x0060 */ +}; + +struct kvm_s390_crypto_cb { + struct kvm_s390_apcb0 apcb0; /* 0x0000 */ + __u8 reserved20[0x0048 - 0x0020]; /* 0x0020 */ + __u8 dea_wrapping_key_mask[24]; /* 0x0048 */ + __u8 aes_wrapping_key_mask[32]; /* 0x0060 */ + struct kvm_s390_apcb1 apcb1; /* 0x0080 */ +}; + +struct kvm_s390_gisa { + union { + struct { /* common to all formats */ + u32 next_alert; + u8 ipm; + u8 reserved01[2]; + u8 iam; + }; + struct { /* format 0 */ + u32 next_alert; + u8 ipm; + u8 reserved01; + u8 : 6; + u8 g : 1; + u8 c : 1; + u8 iam; + u8 reserved02[4]; + u32 airq_count; + } g0; + struct { /* format 1 */ + u32 next_alert; + u8 ipm; + u8 simm; + u8 nimm; + u8 iam; + u8 aism[8]; + u8 : 6; + u8 g : 1; + u8 c : 1; + u8 reserved03[11]; + u32 airq_count; + } g1; + struct { + u64 word[4]; + } u64; + }; +}; + +struct kvm_s390_gib { + u32 alert_list_origin; + u32 reserved01; + u8:5; + u8 nisc:3; + u8 reserved03[3]; + u32 reserved04[5]; +}; + +/* + * sie_page2 has to be allocated as DMA because fac_list, crycb and + * gisa need 31bit addresses in the sie control block. + */ +struct sie_page2 { + __u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64]; /* 0x0000 */ + struct kvm_s390_crypto_cb crycb; /* 0x0800 */ + struct kvm_s390_gisa gisa; /* 0x0900 */ + struct kvm *kvm; /* 0x0920 */ + u8 reserved928[0x1000 - 0x928]; /* 0x0928 */ +}; + +struct kvm_s390_vsie { + struct mutex mutex; + struct radix_tree_root addr_to_page; + int page_count; + int next; + struct page *pages[KVM_MAX_VCPUS]; +}; + +struct kvm_s390_gisa_iam { + u8 mask; + spinlock_t ref_lock; + u32 ref_count[MAX_ISC + 1]; +}; + +struct kvm_s390_gisa_interrupt { + struct kvm_s390_gisa *origin; + struct kvm_s390_gisa_iam alert; + struct hrtimer timer; + u64 expires; + DECLARE_BITMAP(kicked_mask, KVM_MAX_VCPUS); +}; + +struct kvm_s390_pv { + u64 handle; + u64 guest_len; + unsigned long stor_base; + void *stor_var; + bool dumping; + void *set_aside; + struct list_head need_cleanup; + struct mmu_notifier mmu_notifier; +}; + +struct kvm_arch{ + void *sca; + int use_esca; + rwlock_t sca_lock; + debug_info_t *dbf; + struct kvm_s390_float_interrupt float_int; + struct kvm_device *flic; + struct gmap *gmap; + unsigned long mem_limit; + int css_support; + int use_irqchip; + int use_cmma; + int use_pfmfi; + int use_skf; + int use_zpci_interp; + int user_cpu_state_ctrl; + int user_sigp; + int user_stsi; + int user_instr0; + struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; + wait_queue_head_t ipte_wq; + int ipte_lock_count; + struct mutex ipte_mutex; + spinlock_t start_stop_lock; + struct sie_page2 *sie_page2; + struct kvm_s390_cpu_model model; + struct kvm_s390_crypto crypto; + struct kvm_s390_vsie vsie; + u8 epdx; + u64 epoch; + int migration_mode; + atomic64_t cmma_dirty_pages; + /* subset of available cpu features enabled by user space */ + DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); + /* indexed by vcpu_idx */ + DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS); + struct kvm_s390_gisa_interrupt gisa_int; + struct kvm_s390_pv pv; + struct list_head kzdev_list; + spinlock_t kzdev_list_lock; +}; + +#define KVM_HVA_ERR_BAD (-1UL) +#define KVM_HVA_ERR_RO_BAD (-2UL) + +static inline bool kvm_is_error_hva(unsigned long addr) +{ + return IS_ERR_VALUE(addr); +} + +#define ASYNC_PF_PER_VCPU 64 +struct kvm_arch_async_pf { + unsigned long pfault_token; +}; + +bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu); + +void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); + +bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); + +void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); + +static inline void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu) {} + +void kvm_arch_crypto_clear_masks(struct kvm *kvm); +void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, + unsigned long *aqm, unsigned long *adm); + +int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa); + +static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa) +{ + return __sie64a(virt_to_phys(sie_block), sie_block, rsa); +} + +extern char sie_exit; + +bool kvm_s390_pv_is_protected(struct kvm *kvm); +bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu); + +extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc); +extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc); + +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +static inline void kvm_arch_free_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} +static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} +static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) {} +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} + +#define __KVM_HAVE_ARCH_VM_FREE +void kvm_arch_free_vm(struct kvm *kvm); + +struct zpci_kvm_hook { + int (*kvm_register)(void *opaque, struct kvm *kvm); + void (*kvm_unregister)(void *opaque); +}; + +extern struct zpci_kvm_hook zpci_kvm_hook; + +#endif diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h new file mode 100644 index 000000000..df73a0527 --- /dev/null +++ b/arch/s390/include/asm/kvm_para.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * definition for paravirtual devices on s390 + * + * Copyright IBM Corp. 2008 + * + * Author(s): Christian Borntraeger + */ +/* + * Hypercalls for KVM on s390. The calling convention is similar to the + * s390 ABI, so we use R2-R6 for parameters 1-5. In addition we use R1 + * as hypercall number and R7 as parameter 6. The return value is + * written to R2. We use the diagnose instruction as hypercall. To avoid + * conflicts with existing diagnoses for LPAR and z/VM, we do not use + * the instruction encoded number, but specify the number in R1 and + * use 0x500 as KVM hypercall + * + * Copyright IBM Corp. 2007,2008 + * Author(s): Christian Borntraeger + */ +#ifndef __S390_KVM_PARA_H +#define __S390_KVM_PARA_H + +#include +#include + +#define HYPERCALL_FMT_0 +#define HYPERCALL_FMT_1 , "0" (r2) +#define HYPERCALL_FMT_2 , "d" (r3) HYPERCALL_FMT_1 +#define HYPERCALL_FMT_3 , "d" (r4) HYPERCALL_FMT_2 +#define HYPERCALL_FMT_4 , "d" (r5) HYPERCALL_FMT_3 +#define HYPERCALL_FMT_5 , "d" (r6) HYPERCALL_FMT_4 +#define HYPERCALL_FMT_6 , "d" (r7) HYPERCALL_FMT_5 + +#define HYPERCALL_PARM_0 +#define HYPERCALL_PARM_1 , unsigned long arg1 +#define HYPERCALL_PARM_2 HYPERCALL_PARM_1, unsigned long arg2 +#define HYPERCALL_PARM_3 HYPERCALL_PARM_2, unsigned long arg3 +#define HYPERCALL_PARM_4 HYPERCALL_PARM_3, unsigned long arg4 +#define HYPERCALL_PARM_5 HYPERCALL_PARM_4, unsigned long arg5 +#define HYPERCALL_PARM_6 HYPERCALL_PARM_5, unsigned long arg6 + +#define HYPERCALL_REGS_0 +#define HYPERCALL_REGS_1 \ + register unsigned long r2 asm("2") = arg1 +#define HYPERCALL_REGS_2 \ + HYPERCALL_REGS_1; \ + register unsigned long r3 asm("3") = arg2 +#define HYPERCALL_REGS_3 \ + HYPERCALL_REGS_2; \ + register unsigned long r4 asm("4") = arg3 +#define HYPERCALL_REGS_4 \ + HYPERCALL_REGS_3; \ + register unsigned long r5 asm("5") = arg4 +#define HYPERCALL_REGS_5 \ + HYPERCALL_REGS_4; \ + register unsigned long r6 asm("6") = arg5 +#define HYPERCALL_REGS_6 \ + HYPERCALL_REGS_5; \ + register unsigned long r7 asm("7") = arg6 + +#define HYPERCALL_ARGS_0 +#define HYPERCALL_ARGS_1 , arg1 +#define HYPERCALL_ARGS_2 HYPERCALL_ARGS_1, arg2 +#define HYPERCALL_ARGS_3 HYPERCALL_ARGS_2, arg3 +#define HYPERCALL_ARGS_4 HYPERCALL_ARGS_3, arg4 +#define HYPERCALL_ARGS_5 HYPERCALL_ARGS_4, arg5 +#define HYPERCALL_ARGS_6 HYPERCALL_ARGS_5, arg6 + +#define GENERATE_KVM_HYPERCALL_FUNC(args) \ +static inline \ +long __kvm_hypercall##args(unsigned long nr HYPERCALL_PARM_##args) \ +{ \ + register unsigned long __nr asm("1") = nr; \ + register long __rc asm("2"); \ + HYPERCALL_REGS_##args; \ + \ + asm volatile ( \ + " diag 2,4,0x500\n" \ + : "=d" (__rc) \ + : "d" (__nr) HYPERCALL_FMT_##args \ + : "memory", "cc"); \ + return __rc; \ +} \ + \ +static inline \ +long kvm_hypercall##args(unsigned long nr HYPERCALL_PARM_##args) \ +{ \ + diag_stat_inc(DIAG_STAT_X500); \ + return __kvm_hypercall##args(nr HYPERCALL_ARGS_##args); \ +} + +GENERATE_KVM_HYPERCALL_FUNC(0) +GENERATE_KVM_HYPERCALL_FUNC(1) +GENERATE_KVM_HYPERCALL_FUNC(2) +GENERATE_KVM_HYPERCALL_FUNC(3) +GENERATE_KVM_HYPERCALL_FUNC(4) +GENERATE_KVM_HYPERCALL_FUNC(5) +GENERATE_KVM_HYPERCALL_FUNC(6) + +/* kvm on s390 is always paravirtualization enabled */ +static inline int kvm_para_available(void) +{ + return 1; +} + +/* No feature bits are currently assigned for kvm on s390 */ +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + +static inline bool kvm_check_and_clear_guest_paused(void) +{ + return false; +} + +#endif /* __S390_KVM_PARA_H */ diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h new file mode 100644 index 000000000..df3fb7d82 --- /dev/null +++ b/arch/s390/include/asm/linkage.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#include + +#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT, 0x07 +#define __ALIGN_STR __stringify(__ALIGN) + +#endif diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h new file mode 100644 index 000000000..69ccc464a --- /dev/null +++ b/arch/s390/include/asm/lowcore.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 1999, 2012 + * Author(s): Hartmut Penner , + * Martin Schwidefsky , + * Denis Joseph Barrow, + */ + +#ifndef _ASM_S390_LOWCORE_H +#define _ASM_S390_LOWCORE_H + +#include +#include +#include +#include + +#define LC_ORDER 1 +#define LC_PAGES 2 + +struct pgm_tdb { + u64 data[32]; +}; + +struct lowcore { + __u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */ + __u32 ipl_parmblock_ptr; /* 0x0014 */ + __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */ + __u32 ext_params; /* 0x0080 */ + union { + struct { + __u16 ext_cpu_addr; /* 0x0084 */ + __u16 ext_int_code; /* 0x0086 */ + }; + __u32 ext_int_code_addr; + }; + __u32 svc_int_code; /* 0x0088 */ + union { + struct { + __u16 pgm_ilc; /* 0x008c */ + __u16 pgm_code; /* 0x008e */ + }; + __u32 pgm_int_code; + }; + __u32 data_exc_code; /* 0x0090 */ + __u16 mon_class_num; /* 0x0094 */ + union { + struct { + __u8 per_code; /* 0x0096 */ + __u8 per_atmid; /* 0x0097 */ + }; + __u16 per_code_combined; + }; + __u64 per_address; /* 0x0098 */ + __u8 exc_access_id; /* 0x00a0 */ + __u8 per_access_id; /* 0x00a1 */ + __u8 op_access_id; /* 0x00a2 */ + __u8 ar_mode_id; /* 0x00a3 */ + __u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */ + __u64 trans_exc_code; /* 0x00a8 */ + __u64 monitor_code; /* 0x00b0 */ + union { + struct { + __u16 subchannel_id; /* 0x00b8 */ + __u16 subchannel_nr; /* 0x00ba */ + __u32 io_int_parm; /* 0x00bc */ + __u32 io_int_word; /* 0x00c0 */ + }; + struct tpi_info tpi_info; /* 0x00b8 */ + }; + __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */ + __u32 stfl_fac_list; /* 0x00c8 */ + __u8 pad_0x00cc[0x00e8-0x00cc]; /* 0x00cc */ + __u64 mcck_interruption_code; /* 0x00e8 */ + __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ + __u32 external_damage_code; /* 0x00f4 */ + __u64 failing_storage_address; /* 0x00f8 */ + __u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */ + __u64 pgm_last_break; /* 0x0110 */ + __u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */ + psw_t restart_old_psw; /* 0x0120 */ + psw_t external_old_psw; /* 0x0130 */ + psw_t svc_old_psw; /* 0x0140 */ + psw_t program_old_psw; /* 0x0150 */ + psw_t mcck_old_psw; /* 0x0160 */ + psw_t io_old_psw; /* 0x0170 */ + __u8 pad_0x0180[0x01a0-0x0180]; /* 0x0180 */ + psw_t restart_psw; /* 0x01a0 */ + psw_t external_new_psw; /* 0x01b0 */ + psw_t svc_new_psw; /* 0x01c0 */ + psw_t program_new_psw; /* 0x01d0 */ + psw_t mcck_new_psw; /* 0x01e0 */ + psw_t io_new_psw; /* 0x01f0 */ + + /* Save areas. */ + __u64 save_area_sync[8]; /* 0x0200 */ + __u64 save_area_async[8]; /* 0x0240 */ + __u64 save_area_restart[1]; /* 0x0280 */ + + /* CPU flags. */ + __u64 cpu_flags; /* 0x0288 */ + + /* Return psws. */ + psw_t return_psw; /* 0x0290 */ + psw_t return_mcck_psw; /* 0x02a0 */ + + __u64 last_break; /* 0x02b0 */ + + /* CPU accounting and timing values. */ + __u64 sys_enter_timer; /* 0x02b8 */ + __u64 mcck_enter_timer; /* 0x02c0 */ + __u64 exit_timer; /* 0x02c8 */ + __u64 user_timer; /* 0x02d0 */ + __u64 guest_timer; /* 0x02d8 */ + __u64 system_timer; /* 0x02e0 */ + __u64 hardirq_timer; /* 0x02e8 */ + __u64 softirq_timer; /* 0x02f0 */ + __u64 steal_timer; /* 0x02f8 */ + __u64 avg_steal_timer; /* 0x0300 */ + __u64 last_update_timer; /* 0x0308 */ + __u64 last_update_clock; /* 0x0310 */ + __u64 int_clock; /* 0x0318 */ + __u8 pad_0x0320[0x0328-0x0320]; /* 0x0320 */ + __u64 clock_comparator; /* 0x0328 */ + __u64 boot_clock[2]; /* 0x0330 */ + + /* Current process. */ + __u64 current_task; /* 0x0340 */ + __u64 kernel_stack; /* 0x0348 */ + + /* Interrupt, DAT-off and restartstack. */ + __u64 async_stack; /* 0x0350 */ + __u64 nodat_stack; /* 0x0358 */ + __u64 restart_stack; /* 0x0360 */ + __u64 mcck_stack; /* 0x0368 */ + /* Restart function and parameter. */ + __u64 restart_fn; /* 0x0370 */ + __u64 restart_data; /* 0x0378 */ + __u32 restart_source; /* 0x0380 */ + __u32 restart_flags; /* 0x0384 */ + + /* Address space pointer. */ + __u64 kernel_asce; /* 0x0388 */ + __u64 user_asce; /* 0x0390 */ + + /* + * The lpp and current_pid fields form a + * 64-bit value that is set as program + * parameter with the LPP instruction. + */ + __u32 lpp; /* 0x0398 */ + __u32 current_pid; /* 0x039c */ + + /* SMP info area */ + __u32 cpu_nr; /* 0x03a0 */ + __u32 softirq_pending; /* 0x03a4 */ + __s32 preempt_count; /* 0x03a8 */ + __u32 spinlock_lockval; /* 0x03ac */ + __u32 spinlock_index; /* 0x03b0 */ + __u32 fpu_flags; /* 0x03b4 */ + __u64 percpu_offset; /* 0x03b8 */ + __u8 pad_0x03c0[0x03c8-0x03c0]; /* 0x03c0 */ + __u64 machine_flags; /* 0x03c8 */ + __u64 gmap; /* 0x03d0 */ + __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */ + + __u32 return_lpswe; /* 0x0400 */ + __u32 return_mcck_lpswe; /* 0x0404 */ + __u8 pad_0x040a[0x0e00-0x0408]; /* 0x0408 */ + + /* + * 0xe00 contains the address of the IPL Parameter Information + * block. Dump tools need IPIB for IPL after dump. + * Note: do not change the position of any fields in 0x0e00-0x0f00 + */ + __u64 ipib; /* 0x0e00 */ + __u32 ipib_checksum; /* 0x0e08 */ + __u64 vmcore_info; /* 0x0e0c */ + __u8 pad_0x0e14[0x0e18-0x0e14]; /* 0x0e14 */ + __u64 os_info; /* 0x0e18 */ + __u8 pad_0x0e20[0x11b0-0x0e20]; /* 0x0e20 */ + + /* Pointer to the machine check extended save area */ + __u64 mcesad; /* 0x11b0 */ + + /* 64 bit extparam used for pfault/diag 250: defined by architecture */ + __u64 ext_params2; /* 0x11B8 */ + __u8 pad_0x11c0[0x1200-0x11C0]; /* 0x11C0 */ + + /* CPU register save area: defined by architecture */ + __u64 floating_pt_save_area[16]; /* 0x1200 */ + __u64 gpregs_save_area[16]; /* 0x1280 */ + psw_t psw_save_area; /* 0x1300 */ + __u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */ + __u32 prefixreg_save_area; /* 0x1318 */ + __u32 fpt_creg_save_area; /* 0x131c */ + __u8 pad_0x1320[0x1324-0x1320]; /* 0x1320 */ + __u32 tod_progreg_save_area; /* 0x1324 */ + __u32 cpu_timer_save_area[2]; /* 0x1328 */ + __u32 clock_comp_save_area[2]; /* 0x1330 */ + __u64 last_break_save_area; /* 0x1338 */ + __u32 access_regs_save_area[16]; /* 0x1340 */ + __u64 cregs_save_area[16]; /* 0x1380 */ + __u8 pad_0x1400[0x1500-0x1400]; /* 0x1400 */ + /* Cryptography-counter designation */ + __u64 ccd; /* 0x1500 */ + /* AI-extension counter designation */ + __u64 aicd; /* 0x1508 */ + __u8 pad_0x1510[0x1800-0x1510]; /* 0x1510 */ + + /* Transaction abort diagnostic block */ + struct pgm_tdb pgm_tdb; /* 0x1800 */ + __u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */ +} __packed __aligned(8192); + +#define S390_lowcore (*((struct lowcore *) 0)) + +extern struct lowcore *lowcore_ptr[]; + +static inline void set_prefix(__u32 address) +{ + asm volatile("spx %0" : : "Q" (address) : "memory"); +} + +static inline __u32 store_prefix(void) +{ + __u32 address; + + asm volatile("stpx %0" : "=Q" (address)); + return address; +} + +#endif /* _ASM_S390_LOWCORE_H */ diff --git a/arch/s390/include/asm/maccess.h b/arch/s390/include/asm/maccess.h new file mode 100644 index 000000000..50225940d --- /dev/null +++ b/arch/s390/include/asm/maccess.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_S390_MACCESS_H +#define __ASM_S390_MACCESS_H + +#include + +#define MEMCPY_REAL_SIZE PAGE_SIZE +#define MEMCPY_REAL_MASK PAGE_MASK + +struct iov_iter; + +extern unsigned long __memcpy_real_area; +extern pte_t *memcpy_real_ptep; +size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count); +int memcpy_real(void *dest, unsigned long src, size_t count); +#ifdef CONFIG_CRASH_DUMP +int copy_oldmem_kernel(void *dst, unsigned long src, size_t count); +#endif + +#endif /* __ASM_S390_MACCESS_H */ diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h new file mode 100644 index 000000000..b85e13505 --- /dev/null +++ b/arch/s390/include/asm/mem_encrypt.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef S390_MEM_ENCRYPT_H__ +#define S390_MEM_ENCRYPT_H__ + +#ifndef __ASSEMBLY__ + +int set_memory_encrypted(unsigned long vaddr, int numpages); +int set_memory_decrypted(unsigned long vaddr, int numpages); + +#endif /* __ASSEMBLY__ */ + +#endif /* S390_MEM_ENCRYPT_H__ */ diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h new file mode 100644 index 000000000..829d68e2c --- /dev/null +++ b/arch/s390/include/asm/mmu.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __MMU_H +#define __MMU_H + +#include +#include +#include + +typedef struct { + spinlock_t lock; + cpumask_t cpu_attach_mask; + atomic_t flush_count; + unsigned int flush_mm; + struct list_head pgtable_list; + struct list_head gmap_list; + unsigned long gmap_asce; + unsigned long asce; + unsigned long asce_limit; + unsigned long vdso_base; + /* The mmu context belongs to a secure guest. */ + atomic_t protected_count; + /* + * The following bitfields need a down_write on the mm + * semaphore when they are written to. As they are only + * written once, they can be read without a lock. + * + * The mmu context allocates 4K page tables. + */ + unsigned int alloc_pgste:1; + /* The mmu context uses extended page tables. */ + unsigned int has_pgste:1; + /* The mmu context uses storage keys. */ + unsigned int uses_skeys:1; + /* The mmu context uses CMM. */ + unsigned int uses_cmm:1; + /* The gmaps associated with this context are allowed to use huge pages. */ + unsigned int allow_gmap_hpage_1m:1; +} mm_context_t; + +#define INIT_MM_CONTEXT(name) \ + .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \ + .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ + .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list), + +#endif diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h new file mode 100644 index 000000000..2a38af5a0 --- /dev/null +++ b/arch/s390/include/asm/mmu_context.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * + * Derived from "include/asm-i386/mmu_context.h" + */ + +#ifndef __S390_MMU_CONTEXT_H +#define __S390_MMU_CONTEXT_H + +#include +#include +#include +#include +#include +#include + +#define init_new_context init_new_context +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ + unsigned long asce_type, init_entry; + + spin_lock_init(&mm->context.lock); + INIT_LIST_HEAD(&mm->context.pgtable_list); + INIT_LIST_HEAD(&mm->context.gmap_list); + cpumask_clear(&mm->context.cpu_attach_mask); + atomic_set(&mm->context.flush_count, 0); + atomic_set(&mm->context.protected_count, 0); + mm->context.gmap_asce = 0; + mm->context.flush_mm = 0; +#ifdef CONFIG_PGSTE + mm->context.alloc_pgste = page_table_allocate_pgste || + test_thread_flag(TIF_PGSTE) || + (current->mm && current->mm->context.alloc_pgste); + mm->context.has_pgste = 0; + mm->context.uses_skeys = 0; + mm->context.uses_cmm = 0; + mm->context.allow_gmap_hpage_1m = 0; +#endif + switch (mm->context.asce_limit) { + default: + /* + * context created by exec, the value of asce_limit can + * only be zero in this case + */ + VM_BUG_ON(mm->context.asce_limit); + /* continue as 3-level task */ + mm->context.asce_limit = _REGION2_SIZE; + fallthrough; + case _REGION2_SIZE: + /* forked 3-level task */ + init_entry = _REGION3_ENTRY_EMPTY; + asce_type = _ASCE_TYPE_REGION3; + break; + case TASK_SIZE_MAX: + /* forked 5-level task */ + init_entry = _REGION1_ENTRY_EMPTY; + asce_type = _ASCE_TYPE_REGION1; + break; + case _REGION1_SIZE: + /* forked 4-level task */ + init_entry = _REGION2_ENTRY_EMPTY; + asce_type = _ASCE_TYPE_REGION2; + break; + } + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | + _ASCE_USER_BITS | asce_type; + crst_table_init((unsigned long *) mm->pgd, init_entry); + return 0; +} + +static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + int cpu = smp_processor_id(); + + if (next == &init_mm) + S390_lowcore.user_asce = s390_invalid_asce; + else + S390_lowcore.user_asce = next->context.asce; + cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); + /* Clear previous user-ASCE from CR7 */ + __ctl_load(s390_invalid_asce, 7, 7); + if (prev != next) + cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); +} +#define switch_mm_irqs_off switch_mm_irqs_off + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm_irqs_off(prev, next, tsk); + local_irq_restore(flags); +} + +#define finish_arch_post_lock_switch finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + + if (mm) { + preempt_disable(); + while (atomic_read(&mm->context.flush_count)) + cpu_relax(); + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + __tlb_flush_mm_lazy(mm); + preempt_enable(); + } + __ctl_load(S390_lowcore.user_asce, 7, 7); +} + +#define activate_mm activate_mm +static inline void activate_mm(struct mm_struct *prev, + struct mm_struct *next) +{ + switch_mm(prev, next, current); + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); + __ctl_load(S390_lowcore.user_asce, 7, 7); +} + +#include + +#endif /* __S390_MMU_CONTEXT_H */ diff --git a/arch/s390/include/asm/mmzone.h b/arch/s390/include/asm/mmzone.h new file mode 100644 index 000000000..73e3e7c69 --- /dev/null +++ b/arch/s390/include/asm/mmzone.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * NUMA support for s390 + * + * Copyright IBM Corp. 2015 + */ + +#ifndef _ASM_S390_MMZONE_H +#define _ASM_S390_MMZONE_H + +#ifdef CONFIG_NUMA + +extern struct pglist_data *node_data[]; +#define NODE_DATA(nid) (node_data[nid]) + +#endif /* CONFIG_NUMA */ +#endif /* _ASM_S390_MMZONE_H */ diff --git a/arch/s390/include/asm/module.h b/arch/s390/include/asm/module.h new file mode 100644 index 000000000..9f1eea158 --- /dev/null +++ b/arch/s390/include/asm/module.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_MODULE_H +#define _ASM_S390_MODULE_H + +#include + +/* + * This file contains the s390 architecture specific module code. + */ + +struct mod_arch_syminfo { + unsigned long got_offset; + unsigned long plt_offset; + int got_initialized; + int plt_initialized; +}; + +struct mod_arch_specific { + /* Starting offset of got in the module core memory. */ + unsigned long got_offset; + /* Starting offset of plt in the module core memory. */ + unsigned long plt_offset; + /* Size of the got. */ + unsigned long got_size; + /* Size of the plt. */ + unsigned long plt_size; + /* Number of symbols in syminfo. */ + int nsyms; + /* Additional symbol information (got and plt offsets). */ + struct mod_arch_syminfo *syminfo; +#ifdef CONFIG_FUNCTION_TRACER + /* Start of memory reserved for ftrace hotpatch trampolines. */ + struct ftrace_hotpatch_trampoline *trampolines_start; + /* End of memory reserved for ftrace hotpatch trampolines. */ + struct ftrace_hotpatch_trampoline *trampolines_end; + /* Next unused ftrace hotpatch trampoline slot. */ + struct ftrace_hotpatch_trampoline *next_trampoline; +#endif /* CONFIG_FUNCTION_TRACER */ +}; + +#endif /* _ASM_S390_MODULE_H */ diff --git a/arch/s390/include/asm/msi.h b/arch/s390/include/asm/msi.h new file mode 100644 index 000000000..399343ed9 --- /dev/null +++ b/arch/s390/include/asm/msi.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_MSI_H +#define _ASM_S390_MSI_H +#include + +/* + * Work around S390 not using irq_domain at all so we can't set + * IRQ_DOMAIN_FLAG_ISOLATED_MSI. See for an explanation how it works: + * + * https://lore.kernel.org/r/31af8174-35e9-ebeb-b9ef-74c90d4bfd93@linux.ibm.com/ + * + * Note this is less isolated than the ARM/x86 versions as userspace can trigger + * MSI belonging to kernel devices within the same gisa. + */ +#define arch_is_isolated_msi() true + +#endif diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h new file mode 100644 index 000000000..227466ce9 --- /dev/null +++ b/arch/s390/include/asm/nmi.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Machine check handler definitions + * + * Copyright IBM Corp. 2000, 2009 + * Author(s): Ingo Adlung , + * Martin Schwidefsky , + * Cornelia Huck , + */ + +#ifndef _ASM_S390_NMI_H +#define _ASM_S390_NMI_H + +#include +#include + +#define MCIC_SUBCLASS_MASK (1ULL<<63 | 1ULL<<62 | 1ULL<<61 | \ + 1ULL<<59 | 1ULL<<58 | 1ULL<<56 | \ + 1ULL<<55 | 1ULL<<54 | 1ULL<<53 | \ + 1ULL<<52 | 1ULL<<47 | 1ULL<<46 | \ + 1ULL<<45 | 1ULL<<44) +#define MCCK_CODE_SYSTEM_DAMAGE BIT(63) +#define MCCK_CODE_EXT_DAMAGE BIT(63 - 5) +#define MCCK_CODE_CP BIT(63 - 9) +#define MCCK_CODE_STG_ERROR BIT(63 - 16) +#define MCCK_CODE_STG_KEY_ERROR BIT(63 - 18) +#define MCCK_CODE_STG_DEGRAD BIT(63 - 19) +#define MCCK_CODE_PSW_MWP_VALID BIT(63 - 20) +#define MCCK_CODE_PSW_IA_VALID BIT(63 - 23) +#define MCCK_CODE_STG_FAIL_ADDR BIT(63 - 24) +#define MCCK_CODE_CR_VALID BIT(63 - 29) +#define MCCK_CODE_GS_VALID BIT(63 - 36) +#define MCCK_CODE_FC_VALID BIT(63 - 43) +#define MCCK_CODE_CPU_TIMER_VALID BIT(63 - 46) + +#ifndef __ASSEMBLY__ + +union mci { + unsigned long val; + struct { + u64 sd : 1; /* 00 system damage */ + u64 pd : 1; /* 01 instruction-processing damage */ + u64 sr : 1; /* 02 system recovery */ + u64 : 1; /* 03 */ + u64 cd : 1; /* 04 timing-facility damage */ + u64 ed : 1; /* 05 external damage */ + u64 : 1; /* 06 */ + u64 dg : 1; /* 07 degradation */ + u64 w : 1; /* 08 warning pending */ + u64 cp : 1; /* 09 channel-report pending */ + u64 sp : 1; /* 10 service-processor damage */ + u64 ck : 1; /* 11 channel-subsystem damage */ + u64 : 2; /* 12-13 */ + u64 b : 1; /* 14 backed up */ + u64 : 1; /* 15 */ + u64 se : 1; /* 16 storage error uncorrected */ + u64 sc : 1; /* 17 storage error corrected */ + u64 ke : 1; /* 18 storage-key error uncorrected */ + u64 ds : 1; /* 19 storage degradation */ + u64 wp : 1; /* 20 psw mwp validity */ + u64 ms : 1; /* 21 psw mask and key validity */ + u64 pm : 1; /* 22 psw program mask and cc validity */ + u64 ia : 1; /* 23 psw instruction address validity */ + u64 fa : 1; /* 24 failing storage address validity */ + u64 vr : 1; /* 25 vector register validity */ + u64 ec : 1; /* 26 external damage code validity */ + u64 fp : 1; /* 27 floating point register validity */ + u64 gr : 1; /* 28 general register validity */ + u64 cr : 1; /* 29 control register validity */ + u64 : 1; /* 30 */ + u64 st : 1; /* 31 storage logical validity */ + u64 ie : 1; /* 32 indirect storage error */ + u64 ar : 1; /* 33 access register validity */ + u64 da : 1; /* 34 delayed access exception */ + u64 : 1; /* 35 */ + u64 gs : 1; /* 36 guarded storage registers validity */ + u64 : 5; /* 37-41 */ + u64 pr : 1; /* 42 tod programmable register validity */ + u64 fc : 1; /* 43 fp control register validity */ + u64 ap : 1; /* 44 ancillary report */ + u64 : 1; /* 45 */ + u64 ct : 1; /* 46 cpu timer validity */ + u64 cc : 1; /* 47 clock comparator validity */ + u64 : 16; /* 47-63 */ + }; +}; + +#define MCESA_ORIGIN_MASK (~0x3ffUL) +#define MCESA_LC_MASK (0xfUL) +#define MCESA_MIN_SIZE (1024) +#define MCESA_MAX_SIZE (2048) + +struct mcesa { + u8 vector_save_area[1024]; + u8 guarded_storage_save_area[32]; +}; + +struct pt_regs; + +void nmi_alloc_mcesa_early(u64 *mcesad); +int nmi_alloc_mcesa(u64 *mcesad); +void nmi_free_mcesa(u64 *mcesad); + +void s390_handle_mcck(void); +void s390_do_machine_check(struct pt_regs *regs); + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_S390_NMI_H */ diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h new file mode 100644 index 000000000..82725cf78 --- /dev/null +++ b/arch/s390/include/asm/nospec-branch.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_EXPOLINE_H +#define _ASM_S390_EXPOLINE_H + +#ifndef __ASSEMBLY__ + +#include + +extern int nospec_disable; + +void nospec_init_branches(void); +void nospec_auto_detect(void); +void nospec_revert(s32 *start, s32 *end); + +static inline bool nospec_uses_trampoline(void) +{ + return __is_defined(CC_USING_EXPOLINE) && !nospec_disable; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_S390_EXPOLINE_H */ diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h new file mode 100644 index 000000000..7a946c42a --- /dev/null +++ b/arch/s390/include/asm/nospec-insn.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_NOSPEC_ASM_H +#define _ASM_S390_NOSPEC_ASM_H + +#include +#include + +#ifdef __ASSEMBLY__ + +#ifdef CC_USING_EXPOLINE + +/* + * The expoline macros are used to create thunks in the same format + * as gcc generates them. The 'comdat' section flag makes sure that + * the various thunks are merged into a single copy. + */ + .macro __THUNK_PROLOG_NAME name +#ifdef CONFIG_EXPOLINE_EXTERN + .pushsection .text,"ax",@progbits + __ALIGN +#else + .pushsection .text.\name,"axG",@progbits,\name,comdat +#endif + .globl \name + .hidden \name + .type \name,@function +\name: + CFI_STARTPROC + .endm + + .macro __THUNK_EPILOG_NAME name + CFI_ENDPROC +#ifdef CONFIG_EXPOLINE_EXTERN + .size \name, .-\name +#endif + .popsection + .endm + + .macro __THUNK_PROLOG_BR r1 + __THUNK_PROLOG_NAME __s390_indirect_jump_r\r1 + .endm + + .macro __THUNK_EPILOG_BR r1 + __THUNK_EPILOG_NAME __s390_indirect_jump_r\r1 + .endm + + .macro __THUNK_BR r1 + jg __s390_indirect_jump_r\r1 + .endm + + .macro __THUNK_BRASL r1,r2 + brasl \r1,__s390_indirect_jump_r\r2 + .endm + + .macro __DECODE_R expand,reg + .set .L__decode_fail,1 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \reg,%r\r1 + \expand \r1 + .set .L__decode_fail,0 + .endif + .endr + .if .L__decode_fail == 1 + .error "__DECODE_R failed" + .endif + .endm + + .macro __DECODE_RR expand,rsave,rtarget + .set .L__decode_fail,1 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \rsave,%r\r1 + .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \rtarget,%r\r2 + \expand \r1,\r2 + .set .L__decode_fail,0 + .endif + .endr + .endif + .endr + .if .L__decode_fail == 1 + .error "__DECODE_RR failed" + .endif + .endm + + .macro __THUNK_EX_BR reg + exrl 0,555f + j . +555: br \reg + .endm + +#ifdef CONFIG_EXPOLINE_EXTERN + .macro GEN_BR_THUNK reg + .endm + .macro GEN_BR_THUNK_EXTERN reg +#else + .macro GEN_BR_THUNK reg +#endif + __DECODE_R __THUNK_PROLOG_BR,\reg + __THUNK_EX_BR \reg + __DECODE_R __THUNK_EPILOG_BR,\reg + .endm + + .macro BR_EX reg +557: __DECODE_R __THUNK_BR,\reg + .pushsection .s390_indirect_branches,"a",@progbits + .long 557b-. + .popsection + .endm + + .macro BASR_EX rsave,rtarget +559: __DECODE_RR __THUNK_BRASL,\rsave,\rtarget + .pushsection .s390_indirect_branches,"a",@progbits + .long 559b-. + .popsection + .endm + +#else + .macro GEN_BR_THUNK reg + .endm + + .macro BR_EX reg + br \reg + .endm + + .macro BASR_EX rsave,rtarget + basr \rsave,\rtarget + .endm +#endif /* CC_USING_EXPOLINE */ + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_S390_NOSPEC_ASM_H */ diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h new file mode 100644 index 000000000..23cd5d1b7 --- /dev/null +++ b/arch/s390/include/asm/numa.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * NUMA support for s390 + * + * Declare the NUMA core code structures and functions. + * + * Copyright IBM Corp. 2015 + */ + +#ifndef _ASM_S390_NUMA_H +#define _ASM_S390_NUMA_H + +#ifdef CONFIG_NUMA + +#include + +void numa_setup(void); + +#else + +static inline void numa_setup(void) { } + +#endif /* CONFIG_NUMA */ + +#endif /* _ASM_S390_NUMA_H */ diff --git a/arch/s390/include/asm/os_info.h b/arch/s390/include/asm/os_info.h new file mode 100644 index 000000000..a4d2e103f --- /dev/null +++ b/arch/s390/include/asm/os_info.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * OS info memory interface + * + * Copyright IBM Corp. 2012 + * Author(s): Michael Holzheu + */ +#ifndef _ASM_S390_OS_INFO_H +#define _ASM_S390_OS_INFO_H + +#include + +#define OS_INFO_VERSION_MAJOR 1 +#define OS_INFO_VERSION_MINOR 1 +#define OS_INFO_MAGIC 0x4f53494e464f535aULL /* OSINFOSZ */ + +#define OS_INFO_VMCOREINFO 0 +#define OS_INFO_REIPL_BLOCK 1 +#define OS_INFO_FLAGS_ENTRY 2 + +#define OS_INFO_FLAG_REIPL_CLEAR (1UL << 0) + +struct os_info_entry { + u64 addr; + u64 size; + u32 csum; +} __packed; + +struct os_info { + u64 magic; + u32 csum; + u16 version_major; + u16 version_minor; + u64 crashkernel_addr; + u64 crashkernel_size; + struct os_info_entry entry[3]; + u8 reserved[4004]; +} __packed; + +void os_info_init(void); +void os_info_entry_add(int nr, void *ptr, u64 len); +void os_info_crashkernel_add(unsigned long base, unsigned long size); +u32 os_info_csum(struct os_info *os_info); + +#ifdef CONFIG_CRASH_DUMP +void *os_info_old_entry(int nr, unsigned long *size); +#else +static inline void *os_info_old_entry(int nr, unsigned long *size) +{ + return NULL; +} +#endif + +#endif /* _ASM_S390_OS_INFO_H */ diff --git a/arch/s390/include/asm/page-states.h b/arch/s390/include/asm/page-states.h new file mode 100644 index 000000000..c33c4deb5 --- /dev/null +++ b/arch/s390/include/asm/page-states.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2017 + * Author(s): Claudio Imbrenda + */ + +#ifndef PAGE_STATES_H +#define PAGE_STATES_H + +#define ESSA_GET_STATE 0 +#define ESSA_SET_STABLE 1 +#define ESSA_SET_UNUSED 2 +#define ESSA_SET_VOLATILE 3 +#define ESSA_SET_POT_VOLATILE 4 +#define ESSA_SET_STABLE_RESIDENT 5 +#define ESSA_SET_STABLE_IF_RESIDENT 6 +#define ESSA_SET_STABLE_NODAT 7 + +#define ESSA_MAX ESSA_SET_STABLE_NODAT + +#endif diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h new file mode 100644 index 000000000..cfec07433 --- /dev/null +++ b/arch/s390/include/asm/page.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999, 2000 + * Author(s): Hartmut Penner (hp@de.ibm.com) + */ + +#ifndef _S390_PAGE_H +#define _S390_PAGE_H + +#include +#include + +#define _PAGE_SHIFT 12 +#define _PAGE_SIZE (_AC(1, UL) << _PAGE_SHIFT) +#define _PAGE_MASK (~(_PAGE_SIZE - 1)) + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT _PAGE_SHIFT +#define PAGE_SIZE _PAGE_SIZE +#define PAGE_MASK _PAGE_MASK +#define PAGE_DEFAULT_ACC _AC(0, UL) +/* storage-protection override */ +#define PAGE_SPO_ACC 9 +#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4) + +#define HPAGE_SHIFT 20 +#define HPAGE_SIZE (1UL << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) +#define HUGE_MAX_HSTATE 2 + +#define ARCH_HAS_SETCLEAR_HUGE_PTE +#define ARCH_HAS_HUGE_PTE_TYPE +#define ARCH_HAS_PREPARE_HUGEPAGE +#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH + +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA + +#include +#ifndef __ASSEMBLY__ + +void __storage_key_init_range(unsigned long start, unsigned long end); + +static inline void storage_key_init_range(unsigned long start, unsigned long end) +{ + if (PAGE_DEFAULT_KEY != 0) + __storage_key_init_range(start, end); +} + +#define clear_page(page) memset((page), 0, PAGE_SIZE) + +/* + * copy_page uses the mvcl instruction with 0xb0 padding byte in order to + * bypass caches when copying a page. Especially when copying huge pages + * this keeps L1 and L2 data caches alive. + */ +static inline void copy_page(void *to, void *from) +{ + union register_pair dst, src; + + dst.even = (unsigned long) to; + dst.odd = 0x1000; + src.even = (unsigned long) from; + src.odd = 0xb0001000; + + asm volatile( + " mvcl %[dst],%[src]" + : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair) + : : "memory", "cc"); +} + +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +#define vma_alloc_zeroed_movable_folio(vma, vaddr) \ + vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false) + +/* + * These are used to make use of C type-checking.. + */ + +typedef struct { unsigned long pgprot; } pgprot_t; +typedef struct { unsigned long pgste; } pgste_t; +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pud; } pud_t; +typedef struct { unsigned long p4d; } p4d_t; +typedef struct { unsigned long pgd; } pgd_t; +typedef pte_t *pgtable_t; + +#define pgprot_val(x) ((x).pgprot) +#define pgste_val(x) ((x).pgste) + +static inline unsigned long pte_val(pte_t pte) +{ + return pte.pte; +} + +static inline unsigned long pmd_val(pmd_t pmd) +{ + return pmd.pmd; +} + +static inline unsigned long pud_val(pud_t pud) +{ + return pud.pud; +} + +static inline unsigned long p4d_val(p4d_t p4d) +{ + return p4d.p4d; +} + +static inline unsigned long pgd_val(pgd_t pgd) +{ + return pgd.pgd; +} + +#define __pgste(x) ((pgste_t) { (x) } ) +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pud(x) ((pud_t) { (x) } ) +#define __p4d(x) ((p4d_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +static inline void page_set_storage_key(unsigned long addr, + unsigned char skey, int mapped) +{ + if (!mapped) + asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0" + : : "d" (skey), "a" (addr)); + else + asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); +} + +static inline unsigned char page_get_storage_key(unsigned long addr) +{ + unsigned char skey; + + asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr)); + return skey; +} + +static inline int page_reset_referenced(unsigned long addr) +{ + int cc; + + asm volatile( + " rrbe 0,%1\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) : "a" (addr) : "cc"); + return cc; +} + +/* Bits int the storage key */ +#define _PAGE_CHANGED 0x02 /* HW changed bit */ +#define _PAGE_REFERENCED 0x04 /* HW referenced bit */ +#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */ +#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ + +struct page; +void arch_free_page(struct page *page, int order); +void arch_alloc_page(struct page *page, int order); +void arch_set_page_dat(struct page *page, int order); + +static inline int devmem_is_allowed(unsigned long pfn) +{ + return 0; +} + +#define HAVE_ARCH_FREE_PAGE +#define HAVE_ARCH_ALLOC_PAGE + +#if IS_ENABLED(CONFIG_PGSTE) +int arch_make_page_accessible(struct page *page); +#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE +#endif + +#define __PAGE_OFFSET 0x0UL +#define PAGE_OFFSET 0x0UL + +#define __pa(x) ((unsigned long)(x)) +#define __va(x) ((void *)(unsigned long)(x)) + +#define phys_to_pfn(phys) ((phys) >> PAGE_SHIFT) +#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) + +#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) +#define page_to_phys(page) pfn_to_phys(page_to_pfn(page)) + +static inline void *pfn_to_virt(unsigned long pfn) +{ + return __va(pfn_to_phys(pfn)); +} + +static inline unsigned long virt_to_pfn(const void *kaddr) +{ + return phys_to_pfn(__pa(kaddr)); +} + +#define pfn_to_kaddr(pfn) pfn_to_virt(pfn) + +#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) +#define page_to_virt(page) pfn_to_virt(page_to_pfn(page)) + +#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) + +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC + +#endif /* !__ASSEMBLY__ */ + +#include +#include + +#endif /* _S390_PAGE_H */ diff --git a/arch/s390/include/asm/pai.h b/arch/s390/include/asm/pai.h new file mode 100644 index 000000000..7d1888e3d --- /dev/null +++ b/arch/s390/include/asm/pai.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Processor Activity Instrumentation support for cryptography counters + * + * Copyright IBM Corp. 2022 + * Author(s): Thomas Richter + */ +#ifndef _ASM_S390_PAI_H +#define _ASM_S390_PAI_H + +#include +#include +#include + +struct qpaci_info_block { + u64 header; + struct { + u64 : 8; + u64 num_cc : 8; /* # of supported crypto counters */ + u64 : 9; + u64 num_nnpa : 7; /* # of supported NNPA counters */ + u64 : 32; + }; +}; + +static inline int qpaci(struct qpaci_info_block *info) +{ + /* Size of info (in double words minus one) */ + size_t size = sizeof(*info) / sizeof(u64) - 1; + int cc; + + asm volatile( + " lgr 0,%[size]\n" + " .insn s,0xb28f0000,%[info]\n" + " lgr %[size],0\n" + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=d" (cc), [info] "=Q" (*info), [size] "+&d" (size) + : + : "0", "cc", "memory"); + return cc ? (size + 1) * sizeof(u64) : 0; +} + +#define PAI_CRYPTO_BASE 0x1000 /* First event number */ +#define PAI_CRYPTO_MAXCTR 256 /* Max # of event counters */ +#define PAI_CRYPTO_KERNEL_OFFSET 2048 +#define PAI_NNPA_BASE 0x1800 /* First event number */ +#define PAI_NNPA_MAXCTR 128 /* Max # of event counters */ + +DECLARE_STATIC_KEY_FALSE(pai_key); + +static __always_inline void pai_kernel_enter(struct pt_regs *regs) +{ + if (!IS_ENABLED(CONFIG_PERF_EVENTS)) + return; + if (!static_branch_unlikely(&pai_key)) + return; + if (!S390_lowcore.ccd) + return; + if (!user_mode(regs)) + return; + WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd | PAI_CRYPTO_KERNEL_OFFSET); +} + +static __always_inline void pai_kernel_exit(struct pt_regs *regs) +{ + if (!IS_ENABLED(CONFIG_PERF_EVENTS)) + return; + if (!static_branch_unlikely(&pai_key)) + return; + if (!S390_lowcore.ccd) + return; + if (!user_mode(regs)) + return; + WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd & ~PAI_CRYPTO_KERNEL_OFFSET); +} + +enum paievt_mode { + PAI_MODE_NONE, + PAI_MODE_SAMPLING, + PAI_MODE_COUNTING, +}; + +#endif diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h new file mode 100644 index 000000000..b248694e0 --- /dev/null +++ b/arch/s390/include/asm/pci.h @@ -0,0 +1,327 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_S390_PCI_H +#define __ASM_S390_PCI_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#define PCIBIOS_MIN_IO 0x1000 +#define PCIBIOS_MIN_MEM 0x10000000 + +#define pcibios_assign_all_busses() (0) + +void __iomem *pci_iomap(struct pci_dev *, int, unsigned long); +void pci_iounmap(struct pci_dev *, void __iomem *); +int pci_domain_nr(struct pci_bus *); +int pci_proc_domain(struct pci_bus *); + +#define ZPCI_BUS_NR 0 /* default bus number */ + +#define ZPCI_NR_DMA_SPACES 1 +#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS +#define ZPCI_DOMAIN_BITMAP_SIZE (1 << 16) + +#ifdef PCI +#if (ZPCI_NR_DEVICES > ZPCI_DOMAIN_BITMAP_SIZE) +# error ZPCI_NR_DEVICES can not be bigger than ZPCI_DOMAIN_BITMAP_SIZE +#endif +#endif /* PCI */ + +/* PCI Function Controls */ +#define ZPCI_FC_FN_ENABLED 0x80 +#define ZPCI_FC_ERROR 0x40 +#define ZPCI_FC_BLOCKED 0x20 +#define ZPCI_FC_DMA_ENABLED 0x10 + +#define ZPCI_FMB_DMA_COUNTER_VALID (1 << 23) + +struct zpci_fmb_fmt0 { + u64 dma_rbytes; + u64 dma_wbytes; +}; + +struct zpci_fmb_fmt1 { + u64 rx_bytes; + u64 rx_packets; + u64 tx_bytes; + u64 tx_packets; +}; + +struct zpci_fmb_fmt2 { + u64 consumed_work_units; + u64 max_work_units; +}; + +struct zpci_fmb_fmt3 { + u64 tx_bytes; +}; + +struct zpci_fmb { + u32 format : 8; + u32 fmt_ind : 24; + u32 samples; + u64 last_update; + /* common counters */ + u64 ld_ops; + u64 st_ops; + u64 stb_ops; + u64 rpcit_ops; + /* format specific counters */ + union { + struct zpci_fmb_fmt0 fmt0; + struct zpci_fmb_fmt1 fmt1; + struct zpci_fmb_fmt2 fmt2; + struct zpci_fmb_fmt3 fmt3; + }; +} __packed __aligned(128); + +enum zpci_state { + ZPCI_FN_STATE_STANDBY = 0, + ZPCI_FN_STATE_CONFIGURED = 1, + ZPCI_FN_STATE_RESERVED = 2, +}; + +struct zpci_bar_struct { + struct resource *res; /* bus resource */ + void __iomem *mio_wb; + void __iomem *mio_wt; + u32 val; /* bar start & 3 flag bits */ + u16 map_idx; /* index into bar mapping array */ + u8 size; /* order 2 exponent */ +}; + +struct s390_domain; +struct kvm_zdev; + +#define ZPCI_FUNCTIONS_PER_BUS 256 +struct zpci_bus { + struct kref kref; + struct pci_bus *bus; + struct zpci_dev *function[ZPCI_FUNCTIONS_PER_BUS]; + struct list_head resources; + struct list_head bus_next; + struct resource bus_resource; + int pchid; + int domain_nr; + bool multifunction; + enum pci_bus_speed max_bus_speed; +}; + +/* Private data per function */ +struct zpci_dev { + struct zpci_bus *zbus; + struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */ + struct list_head iommu_list; + struct kref kref; + struct rcu_head rcu; + struct hotplug_slot hotplug_slot; + + enum zpci_state state; + u32 fid; /* function ID, used by sclp */ + u32 fh; /* function handle, used by insn's */ + u32 gisa; /* GISA designation for passthrough */ + u16 vfn; /* virtual function number */ + u16 pchid; /* physical channel ID */ + u16 maxstbl; /* Maximum store block size */ + u8 pfgid; /* function group ID */ + u8 pft; /* pci function type */ + u8 port; + u8 dtsm; /* Supported DT mask */ + u8 rid_available : 1; + u8 has_hp_slot : 1; + u8 has_resources : 1; + u8 is_physfn : 1; + u8 util_str_avail : 1; + u8 irqs_registered : 1; + u8 reserved : 2; + unsigned int devfn; /* DEVFN part of the RID*/ + + struct mutex lock; + u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ + u32 uid; /* user defined id */ + u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */ + + /* IRQ stuff */ + u64 msi_addr; /* MSI address */ + unsigned int max_msi; /* maximum number of MSI's */ + unsigned int msi_first_bit; + unsigned int msi_nr_irqs; + struct airq_iv *aibv; /* adapter interrupt bit vector */ + unsigned long aisb; /* number of the summary bit */ + + /* DMA stuff */ + unsigned long *dma_table; + int tlb_refresh; + + spinlock_t iommu_bitmap_lock; + unsigned long *iommu_bitmap; + unsigned long *lazy_bitmap; + unsigned long iommu_size; + unsigned long iommu_pages; + unsigned int next_bit; + + struct iommu_device iommu_dev; /* IOMMU core handle */ + + char res_name[16]; + bool mio_capable; + struct zpci_bar_struct bars[PCI_STD_NUM_BARS]; + + u64 start_dma; /* Start of available DMA addresses */ + u64 end_dma; /* End of available DMA addresses */ + u64 dma_mask; /* DMA address space mask */ + + /* Function measurement block */ + struct zpci_fmb *fmb; + u16 fmb_update; /* update interval */ + u16 fmb_length; + /* software counters */ + atomic64_t allocated_pages; + atomic64_t mapped_pages; + atomic64_t unmapped_pages; + + u8 version; + enum pci_bus_speed max_bus_speed; + + struct dentry *debugfs_dev; + + /* IOMMU and passthrough */ + struct s390_domain *s390_domain; /* s390 IOMMU domain data */ + struct kvm_zdev *kzdev; + struct mutex kzdev_lock; +}; + +static inline bool zdev_enabled(struct zpci_dev *zdev) +{ + return (zdev->fh & (1UL << 31)) ? true : false; +} + +extern const struct attribute_group *zpci_attr_groups[]; +extern unsigned int s390_pci_force_floating __initdata; +extern unsigned int s390_pci_no_rid; + +extern union zpci_sic_iib *zpci_aipb; +extern struct airq_iv *zpci_aif_sbv; + +/* ----------------------------------------------------------------------------- + Prototypes +----------------------------------------------------------------------------- */ +/* Base stuff */ +struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state); +int zpci_enable_device(struct zpci_dev *); +int zpci_disable_device(struct zpci_dev *); +int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh); +int zpci_deconfigure_device(struct zpci_dev *zdev); +void zpci_device_reserved(struct zpci_dev *zdev); +bool zpci_is_device_configured(struct zpci_dev *zdev); + +int zpci_hot_reset_device(struct zpci_dev *zdev); +int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64, u8 *); +int zpci_unregister_ioat(struct zpci_dev *, u8); +void zpci_remove_reserved_devices(void); +void zpci_update_fh(struct zpci_dev *zdev, u32 fh); + +/* CLP */ +int clp_setup_writeback_mio(void); +int clp_scan_pci_devices(void); +int clp_query_pci_fn(struct zpci_dev *zdev); +int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as); +int clp_disable_fh(struct zpci_dev *zdev, u32 *fh); +int clp_get_state(u32 fid, enum zpci_state *state); +int clp_refresh_fh(u32 fid, u32 *fh); + +/* UID */ +void update_uid_checking(bool new); + +/* IOMMU Interface */ +int zpci_init_iommu(struct zpci_dev *zdev); +void zpci_destroy_iommu(struct zpci_dev *zdev); + +#ifdef CONFIG_PCI +static inline bool zpci_use_mio(struct zpci_dev *zdev) +{ + return static_branch_likely(&have_mio) && zdev->mio_capable; +} + +/* Error handling and recovery */ +void zpci_event_error(void *); +void zpci_event_availability(void *); +bool zpci_is_enabled(void); +#else /* CONFIG_PCI */ +static inline void zpci_event_error(void *e) {} +static inline void zpci_event_availability(void *e) {} +#endif /* CONFIG_PCI */ + +#ifdef CONFIG_HOTPLUG_PCI_S390 +int zpci_init_slot(struct zpci_dev *); +void zpci_exit_slot(struct zpci_dev *); +#else /* CONFIG_HOTPLUG_PCI_S390 */ +static inline int zpci_init_slot(struct zpci_dev *zdev) +{ + return 0; +} +static inline void zpci_exit_slot(struct zpci_dev *zdev) {} +#endif /* CONFIG_HOTPLUG_PCI_S390 */ + +/* Helpers */ +static inline struct zpci_dev *to_zpci(struct pci_dev *pdev) +{ + struct zpci_bus *zbus = pdev->sysdata; + + return zbus->function[pdev->devfn]; +} + +static inline struct zpci_dev *to_zpci_dev(struct device *dev) +{ + return to_zpci(to_pci_dev(dev)); +} + +struct zpci_dev *get_zdev_by_fid(u32); + +/* DMA */ +int zpci_dma_init(void); +void zpci_dma_exit(void); +int zpci_dma_init_device(struct zpci_dev *zdev); +int zpci_dma_exit_device(struct zpci_dev *zdev); + +/* IRQ */ +int __init zpci_irq_init(void); +void __init zpci_irq_exit(void); + +/* FMB */ +int zpci_fmb_enable_device(struct zpci_dev *); +int zpci_fmb_disable_device(struct zpci_dev *); + +/* Debug */ +int zpci_debug_init(void); +void zpci_debug_exit(void); +void zpci_debug_init_device(struct zpci_dev *, const char *); +void zpci_debug_exit_device(struct zpci_dev *); + +/* Error handling */ +int zpci_report_error(struct pci_dev *, struct zpci_report_error_header *); +int zpci_clear_error_state(struct zpci_dev *zdev); +int zpci_reset_load_store_blocked(struct zpci_dev *zdev); + +#ifdef CONFIG_NUMA + +/* Returns the node based on PCI bus */ +static inline int __pcibus_to_node(const struct pci_bus *bus) +{ + return NUMA_NO_NODE; +} + +static inline const struct cpumask * +cpumask_of_pcibus(const struct pci_bus *bus) +{ + return cpu_online_mask; +} + +#endif /* CONFIG_NUMA */ + +#endif diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h new file mode 100644 index 000000000..d6189ed14 --- /dev/null +++ b/arch/s390/include/asm/pci_clp.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_PCI_CLP_H +#define _ASM_S390_PCI_CLP_H + +#include + +/* + * Call Logical Processor - Command Codes + */ +#define CLP_SLPC 0x0001 +#define CLP_LIST_PCI 0x0002 +#define CLP_QUERY_PCI_FN 0x0003 +#define CLP_QUERY_PCI_FNGRP 0x0004 +#define CLP_SET_PCI_FN 0x0005 + +/* PCI function handle list entry */ +struct clp_fh_list_entry { + u16 device_id; + u16 vendor_id; + u32 config_state : 1; + u32 : 31; + u32 fid; /* PCI function id */ + u32 fh; /* PCI function handle */ +} __packed; + +#define CLP_RC_SETPCIFN_FH 0x0101 /* Invalid PCI fn handle */ +#define CLP_RC_SETPCIFN_FHOP 0x0102 /* Fn handle not valid for op */ +#define CLP_RC_SETPCIFN_DMAAS 0x0103 /* Invalid DMA addr space */ +#define CLP_RC_SETPCIFN_RES 0x0104 /* Insufficient resources */ +#define CLP_RC_SETPCIFN_ALRDY 0x0105 /* Fn already in requested state */ +#define CLP_RC_SETPCIFN_ERR 0x0106 /* Fn in permanent error state */ +#define CLP_RC_SETPCIFN_RECPND 0x0107 /* Error recovery pending */ +#define CLP_RC_SETPCIFN_BUSY 0x0108 /* Fn busy */ +#define CLP_RC_LISTPCI_BADRT 0x010a /* Resume token not recognized */ +#define CLP_RC_QUERYPCIFG_PFGID 0x010b /* Unrecognized PFGID */ + +/* request or response block header length */ +#define LIST_PCI_HDR_LEN 32 + +/* Number of function handles fitting in response block */ +#define CLP_FH_LIST_NR_ENTRIES \ + ((CLP_BLK_SIZE - 2 * LIST_PCI_HDR_LEN) \ + / sizeof(struct clp_fh_list_entry)) + +#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */ +#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */ +#define CLP_SET_ENABLE_MIO 2 +#define CLP_SET_DISABLE_MIO 3 + +#define CLP_UTIL_STR_LEN 64 +#define CLP_PFIP_NR_SEGMENTS 4 + +extern bool zpci_unique_uid; + +struct clp_rsp_slpc_pci { + struct clp_rsp_hdr hdr; + u32 reserved2[4]; + u32 lpif[8]; + u32 reserved3[4]; + u32 vwb : 1; + u32 : 1; + u32 mio_wb : 6; + u32 : 24; + u32 reserved5[3]; + u32 lpic[8]; +} __packed; + +/* List PCI functions request */ +struct clp_req_list_pci { + struct clp_req_hdr hdr; + u64 resume_token; + u64 reserved2; +} __packed; + +/* List PCI functions response */ +struct clp_rsp_list_pci { + struct clp_rsp_hdr hdr; + u64 resume_token; + u32 reserved2; + u16 max_fn; + u8 : 7; + u8 uid_checking : 1; + u8 entry_size; + struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES]; +} __packed; + +struct mio_info { + u32 valid : 6; + u32 : 26; + u32 : 32; + struct { + u64 wb; + u64 wt; + } addr[PCI_STD_NUM_BARS]; + u32 reserved[6]; +} __packed; + +/* Query PCI function request */ +struct clp_req_query_pci { + struct clp_req_hdr hdr; + u32 fh; /* function handle */ + u32 reserved2; + u64 reserved3; +} __packed; + +/* Query PCI function response */ +struct clp_rsp_query_pci { + struct clp_rsp_hdr hdr; + u16 vfn; /* virtual fn number */ + u16 : 3; + u16 rid_avail : 1; + u16 is_physfn : 1; + u16 reserved1 : 1; + u16 mio_addr_avail : 1; + u16 util_str_avail : 1; /* utility string available? */ + u16 pfgid : 8; /* pci function group id */ + u32 fid; /* pci function id */ + u8 bar_size[PCI_STD_NUM_BARS]; + u16 pchid; + __le32 bar[PCI_STD_NUM_BARS]; + u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ + u16 : 12; + u16 port : 4; + u8 fmb_len; + u8 pft; /* pci function type */ + u64 sdma; /* start dma as */ + u64 edma; /* end dma as */ +#define ZPCI_RID_MASK_DEVFN 0x00ff + u16 rid; /* BUS/DEVFN PCI address */ + u16 reserved0; + u32 reserved[10]; + u32 uid; /* user defined id */ + u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */ + u32 reserved2[16]; + struct mio_info mio; +} __packed; + +/* Query PCI function group request */ +struct clp_req_query_pci_grp { + struct clp_req_hdr hdr; + u32 reserved2 : 24; + u32 pfgid : 8; /* function group id */ + u32 reserved3; + u64 reserved4; +} __packed; + +/* Query PCI function group response */ +struct clp_rsp_query_pci_grp { + struct clp_rsp_hdr hdr; + u16 : 4; + u16 noi : 12; /* number of interrupts */ + u8 version; + u8 : 6; + u8 frame : 1; + u8 refresh : 1; /* TLB refresh mode */ + u16 : 3; + u16 maxstbl : 13; /* Maximum store block size */ + u16 mui; + u8 dtsm; /* Supported DT mask */ + u8 reserved3; + u16 maxfaal; + u16 : 4; + u16 dnoi : 12; + u16 maxcpu; + u64 dasm; /* dma address space mask */ + u64 msia; /* MSI address */ + u64 reserved4; + u64 reserved5; +} __packed; + +/* Set PCI function request */ +struct clp_req_set_pci { + struct clp_req_hdr hdr; + u32 fh; /* function handle */ + u16 reserved2; + u8 oc; /* operation controls */ + u8 ndas; /* number of dma spaces */ + u32 reserved3; + u32 gisa; /* GISA designation */ +} __packed; + +/* Set PCI function response */ +struct clp_rsp_set_pci { + struct clp_rsp_hdr hdr; + u32 fh; /* function handle */ + u32 reserved1; + u64 reserved2; + struct mio_info mio; +} __packed; + +/* Combined request/response block structures used by clp insn */ +struct clp_req_rsp_slpc_pci { + struct clp_req_slpc request; + struct clp_rsp_slpc_pci response; +} __packed; + +struct clp_req_rsp_list_pci { + struct clp_req_list_pci request; + struct clp_rsp_list_pci response; +} __packed; + +struct clp_req_rsp_set_pci { + struct clp_req_set_pci request; + struct clp_rsp_set_pci response; +} __packed; + +struct clp_req_rsp_query_pci { + struct clp_req_query_pci request; + struct clp_rsp_query_pci response; +} __packed; + +struct clp_req_rsp_query_pci_grp { + struct clp_req_query_pci_grp request; + struct clp_rsp_query_pci_grp response; +} __packed; + +#endif diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h new file mode 100644 index 000000000..3bb4e7e33 --- /dev/null +++ b/arch/s390/include/asm/pci_debug.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _S390_ASM_PCI_DEBUG_H +#define _S390_ASM_PCI_DEBUG_H + +#include + +extern debug_info_t *pci_debug_msg_id; +extern debug_info_t *pci_debug_err_id; + +#define zpci_dbg(imp, fmt, args...) \ + debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args) + +#define zpci_err(text...) \ + do { \ + char debug_buffer[16]; \ + snprintf(debug_buffer, 16, text); \ + debug_text_event(pci_debug_err_id, 0, debug_buffer); \ + } while (0) + +static inline void zpci_err_hex_level(int level, void *addr, int len) +{ + debug_event(pci_debug_err_id, level, addr, len); +} + +static inline void zpci_err_hex(void *addr, int len) +{ + zpci_err_hex_level(0, addr, len); +} + +#endif diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h new file mode 100644 index 000000000..7119c04c5 --- /dev/null +++ b/arch/s390/include/asm/pci_dma.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_PCI_DMA_H +#define _ASM_S390_PCI_DMA_H + +/* I/O Translation Anchor (IOTA) */ +enum zpci_ioat_dtype { + ZPCI_IOTA_STO = 0, + ZPCI_IOTA_RTTO = 1, + ZPCI_IOTA_RSTO = 2, + ZPCI_IOTA_RFTO = 3, + ZPCI_IOTA_PFAA = 4, + ZPCI_IOTA_IOPFAA = 5, + ZPCI_IOTA_IOPTO = 7 +}; + +#define ZPCI_IOTA_IOT_ENABLED 0x800UL +#define ZPCI_IOTA_DT_ST (ZPCI_IOTA_STO << 2) +#define ZPCI_IOTA_DT_RT (ZPCI_IOTA_RTTO << 2) +#define ZPCI_IOTA_DT_RS (ZPCI_IOTA_RSTO << 2) +#define ZPCI_IOTA_DT_RF (ZPCI_IOTA_RFTO << 2) +#define ZPCI_IOTA_DT_PF (ZPCI_IOTA_PFAA << 2) +#define ZPCI_IOTA_FS_4K 0 +#define ZPCI_IOTA_FS_1M 1 +#define ZPCI_IOTA_FS_2G 2 +#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5) + +#define ZPCI_TABLE_SIZE_RT (1UL << 42) + +#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST) +#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT) +#define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS) +#define ZPCI_IOTA_RFTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RF) +#define ZPCI_IOTA_RFAA_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_PF | ZPCI_IOTA_FS_2G) + +/* I/O Region and segment tables */ +#define ZPCI_INDEX_MASK 0x7ffUL + +#define ZPCI_TABLE_TYPE_MASK 0xc +#define ZPCI_TABLE_TYPE_RFX 0xc +#define ZPCI_TABLE_TYPE_RSX 0x8 +#define ZPCI_TABLE_TYPE_RTX 0x4 +#define ZPCI_TABLE_TYPE_SX 0x0 + +#define ZPCI_TABLE_LEN_RFX 0x3 +#define ZPCI_TABLE_LEN_RSX 0x3 +#define ZPCI_TABLE_LEN_RTX 0x3 + +#define ZPCI_TABLE_OFFSET_MASK 0xc0 +#define ZPCI_TABLE_SIZE 0x4000 +#define ZPCI_TABLE_ALIGN ZPCI_TABLE_SIZE +#define ZPCI_TABLE_ENTRY_SIZE (sizeof(unsigned long)) +#define ZPCI_TABLE_ENTRIES (ZPCI_TABLE_SIZE / ZPCI_TABLE_ENTRY_SIZE) + +#define ZPCI_TABLE_BITS 11 +#define ZPCI_PT_BITS 8 +#define ZPCI_ST_SHIFT (ZPCI_PT_BITS + PAGE_SHIFT) +#define ZPCI_RT_SHIFT (ZPCI_ST_SHIFT + ZPCI_TABLE_BITS) + +#define ZPCI_RTE_FLAG_MASK 0x3fffUL +#define ZPCI_RTE_ADDR_MASK (~ZPCI_RTE_FLAG_MASK) +#define ZPCI_STE_FLAG_MASK 0x7ffUL +#define ZPCI_STE_ADDR_MASK (~ZPCI_STE_FLAG_MASK) + +/* I/O Page tables */ +#define ZPCI_PTE_VALID_MASK 0x400 +#define ZPCI_PTE_INVALID 0x400 +#define ZPCI_PTE_VALID 0x000 +#define ZPCI_PT_SIZE 0x800 +#define ZPCI_PT_ALIGN ZPCI_PT_SIZE +#define ZPCI_PT_ENTRIES (ZPCI_PT_SIZE / ZPCI_TABLE_ENTRY_SIZE) +#define ZPCI_PT_MASK (ZPCI_PT_ENTRIES - 1) + +#define ZPCI_PTE_FLAG_MASK 0xfffUL +#define ZPCI_PTE_ADDR_MASK (~ZPCI_PTE_FLAG_MASK) + +/* Shared bits */ +#define ZPCI_TABLE_VALID 0x00 +#define ZPCI_TABLE_INVALID 0x20 +#define ZPCI_TABLE_PROTECTED 0x200 +#define ZPCI_TABLE_UNPROTECTED 0x000 + +#define ZPCI_TABLE_VALID_MASK 0x20 +#define ZPCI_TABLE_PROT_MASK 0x200 + +static inline unsigned int calc_rtx(dma_addr_t ptr) +{ + return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK; +} + +static inline unsigned int calc_sx(dma_addr_t ptr) +{ + return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK; +} + +static inline unsigned int calc_px(dma_addr_t ptr) +{ + return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK; +} + +static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa) +{ + *entry &= ZPCI_PTE_FLAG_MASK; + *entry |= (pfaa & ZPCI_PTE_ADDR_MASK); +} + +static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto) +{ + *entry &= ZPCI_RTE_FLAG_MASK; + *entry |= (sto & ZPCI_RTE_ADDR_MASK); + *entry |= ZPCI_TABLE_TYPE_RTX; +} + +static inline void set_st_pto(unsigned long *entry, phys_addr_t pto) +{ + *entry &= ZPCI_STE_FLAG_MASK; + *entry |= (pto & ZPCI_STE_ADDR_MASK); + *entry |= ZPCI_TABLE_TYPE_SX; +} + +static inline void validate_rt_entry(unsigned long *entry) +{ + *entry &= ~ZPCI_TABLE_VALID_MASK; + *entry &= ~ZPCI_TABLE_OFFSET_MASK; + *entry |= ZPCI_TABLE_VALID; + *entry |= ZPCI_TABLE_LEN_RTX; +} + +static inline void validate_st_entry(unsigned long *entry) +{ + *entry &= ~ZPCI_TABLE_VALID_MASK; + *entry |= ZPCI_TABLE_VALID; +} + +static inline void invalidate_pt_entry(unsigned long *entry) +{ + WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID); + *entry &= ~ZPCI_PTE_VALID_MASK; + *entry |= ZPCI_PTE_INVALID; +} + +static inline void validate_pt_entry(unsigned long *entry) +{ + WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID); + *entry &= ~ZPCI_PTE_VALID_MASK; + *entry |= ZPCI_PTE_VALID; +} + +static inline void entry_set_protected(unsigned long *entry) +{ + *entry &= ~ZPCI_TABLE_PROT_MASK; + *entry |= ZPCI_TABLE_PROTECTED; +} + +static inline void entry_clr_protected(unsigned long *entry) +{ + *entry &= ~ZPCI_TABLE_PROT_MASK; + *entry |= ZPCI_TABLE_UNPROTECTED; +} + +static inline int reg_entry_isvalid(unsigned long entry) +{ + return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID; +} + +static inline int pt_entry_isvalid(unsigned long entry) +{ + return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID; +} + +static inline unsigned long *get_rt_sto(unsigned long entry) +{ + if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) + return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK); + else + return NULL; + +} + +static inline unsigned long *get_st_pto(unsigned long entry) +{ + if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) + return phys_to_virt(entry & ZPCI_STE_ADDR_MASK); + else + return NULL; +} + +/* Prototypes */ +void dma_free_seg_table(unsigned long); +unsigned long *dma_alloc_cpu_table(gfp_t gfp); +void dma_cleanup_tables(unsigned long *); +unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr, + gfp_t gfp); +void dma_update_cpu_trans(unsigned long *entry, phys_addr_t page_addr, int flags); + +extern const struct dma_map_ops s390_pci_dma_ops; + + +#endif diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h new file mode 100644 index 000000000..e5f57cfe1 --- /dev/null +++ b/arch/s390/include/asm/pci_insn.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_PCI_INSN_H +#define _ASM_S390_PCI_INSN_H + +#include + +/* Load/Store status codes */ +#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4 +#define ZPCI_PCI_ST_FUNC_IN_ERR 8 +#define ZPCI_PCI_ST_BLOCKED 12 +#define ZPCI_PCI_ST_INSUF_RES 16 +#define ZPCI_PCI_ST_INVAL_AS 20 +#define ZPCI_PCI_ST_FUNC_ALREADY_ENABLED 24 +#define ZPCI_PCI_ST_DMA_AS_NOT_ENABLED 28 +#define ZPCI_PCI_ST_2ND_OP_IN_INV_AS 36 +#define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40 +#define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44 + +/* Load/Store return codes */ +#define ZPCI_PCI_LS_OK 0 +#define ZPCI_PCI_LS_ERR 1 +#define ZPCI_PCI_LS_BUSY 2 +#define ZPCI_PCI_LS_INVAL_HANDLE 3 + +/* Load/Store address space identifiers */ +#define ZPCI_PCIAS_MEMIO_0 0 +#define ZPCI_PCIAS_MEMIO_1 1 +#define ZPCI_PCIAS_MEMIO_2 2 +#define ZPCI_PCIAS_MEMIO_3 3 +#define ZPCI_PCIAS_MEMIO_4 4 +#define ZPCI_PCIAS_MEMIO_5 5 +#define ZPCI_PCIAS_CFGSPC 15 + +/* Modify PCI Function Controls */ +#define ZPCI_MOD_FC_REG_INT 2 +#define ZPCI_MOD_FC_DEREG_INT 3 +#define ZPCI_MOD_FC_REG_IOAT 4 +#define ZPCI_MOD_FC_DEREG_IOAT 5 +#define ZPCI_MOD_FC_REREG_IOAT 6 +#define ZPCI_MOD_FC_RESET_ERROR 7 +#define ZPCI_MOD_FC_RESET_BLOCK 9 +#define ZPCI_MOD_FC_SET_MEASURE 10 +#define ZPCI_MOD_FC_REG_INT_D 16 +#define ZPCI_MOD_FC_DEREG_INT_D 17 + +/* FIB function controls */ +#define ZPCI_FIB_FC_ENABLED 0x80 +#define ZPCI_FIB_FC_ERROR 0x40 +#define ZPCI_FIB_FC_LS_BLOCKED 0x20 +#define ZPCI_FIB_FC_DMAAS_REG 0x10 + +/* FIB function controls */ +#define ZPCI_FIB_FC_ENABLED 0x80 +#define ZPCI_FIB_FC_ERROR 0x40 +#define ZPCI_FIB_FC_LS_BLOCKED 0x20 +#define ZPCI_FIB_FC_DMAAS_REG 0x10 + +struct zpci_fib_fmt0 { + u32 : 1; + u32 isc : 3; /* Interrupt subclass */ + u32 noi : 12; /* Number of interrupts */ + u32 : 2; + u32 aibvo : 6; /* Adapter interrupt bit vector offset */ + u32 sum : 1; /* Adapter int summary bit enabled */ + u32 : 1; + u32 aisbo : 6; /* Adapter int summary bit offset */ + u32 : 32; + u64 aibv; /* Adapter int bit vector address */ + u64 aisb; /* Adapter int summary bit address */ +}; + +struct zpci_fib_fmt1 { + u32 : 4; + u32 noi : 12; + u32 : 16; + u32 dibvo : 16; + u32 : 16; + u64 : 64; + u64 : 64; +}; + +/* Function Information Block */ +struct zpci_fib { + u32 fmt : 8; /* format */ + u32 : 24; + u32 : 32; + u8 fc; /* function controls */ + u64 : 56; + u64 pba; /* PCI base address */ + u64 pal; /* PCI address limit */ + u64 iota; /* I/O Translation Anchor */ + union { + struct zpci_fib_fmt0 fmt0; + struct zpci_fib_fmt1 fmt1; + }; + u64 fmb_addr; /* Function measurement block address and key */ + u32 : 32; + u32 gd; +} __packed __aligned(8); + +/* Set Interruption Controls Operation Controls */ +#define SIC_IRQ_MODE_ALL 0 +#define SIC_IRQ_MODE_SINGLE 1 +#define SIC_SET_AENI_CONTROLS 2 +#define SIC_IRQ_MODE_DIRECT 4 +#define SIC_IRQ_MODE_D_ALL 16 +#define SIC_IRQ_MODE_D_SINGLE 17 +#define SIC_IRQ_MODE_SET_CPU 18 + +/* directed interruption information block */ +struct zpci_diib { + u32 : 1; + u32 isc : 3; + u32 : 28; + u16 : 16; + u16 nr_cpus; + u64 disb_addr; + u64 : 64; + u64 : 64; +} __packed __aligned(8); + +/* cpu directed interruption information block */ +struct zpci_cdiib { + u64 : 64; + u64 dibv_addr; + u64 : 64; + u64 : 64; + u64 : 64; +} __packed __aligned(8); + +/* adapter interruption parameters block */ +struct zpci_aipb { + u64 faisb; + u64 gait; + u16 : 13; + u16 afi : 3; + u32 : 32; + u16 faal; +} __packed __aligned(8); + +union zpci_sic_iib { + struct zpci_diib diib; + struct zpci_cdiib cdiib; + struct zpci_aipb aipb; +}; + +DECLARE_STATIC_KEY_FALSE(have_mio); + +u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status); +int zpci_refresh_trans(u64 fn, u64 addr, u64 range); +int __zpci_load(u64 *data, u64 req, u64 offset); +int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len); +int __zpci_store(u64 data, u64 req, u64 offset); +int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len); +int __zpci_store_block(const u64 *data, u64 req, u64 offset); +void zpci_barrier(void); +int zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib); + +#endif diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h new file mode 100644 index 000000000..2686bee80 --- /dev/null +++ b/arch/s390/include/asm/pci_io.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_PCI_IO_H +#define _ASM_S390_PCI_IO_H + +#ifdef CONFIG_PCI + +#include +#include +#include + +/* I/O size constraints */ +#define ZPCI_MAX_READ_SIZE 8 +#define ZPCI_MAX_WRITE_SIZE 128 +#define ZPCI_BOUNDARY_SIZE (1 << 12) +#define ZPCI_BOUNDARY_MASK (ZPCI_BOUNDARY_SIZE - 1) + +/* I/O Map */ +#define ZPCI_IOMAP_SHIFT 48 +#define ZPCI_IOMAP_ADDR_SHIFT 62 +#define ZPCI_IOMAP_ADDR_BASE (1UL << ZPCI_IOMAP_ADDR_SHIFT) +#define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1) +#define ZPCI_IOMAP_MAX_ENTRIES \ + (1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT)) +#define ZPCI_IOMAP_ADDR_IDX_MASK \ + ((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK) + +struct zpci_iomap_entry { + u32 fh; + u8 bar; + u16 count; +}; + +extern struct zpci_iomap_entry *zpci_iomap_start; + +#define ZPCI_ADDR(idx) (ZPCI_IOMAP_ADDR_BASE | ((u64) idx << ZPCI_IOMAP_SHIFT)) +#define ZPCI_IDX(addr) \ + (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> ZPCI_IOMAP_SHIFT) +#define ZPCI_OFFSET(addr) \ + ((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK) + +#define ZPCI_CREATE_REQ(handle, space, len) \ + ((u64) handle << 32 | space << 16 | len) + +#define zpci_read(LENGTH, RETTYPE) \ +static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \ +{ \ + u64 data; \ + int rc; \ + \ + rc = zpci_load(&data, addr, LENGTH); \ + if (rc) \ + data = -1ULL; \ + return (RETTYPE) data; \ +} + +#define zpci_write(LENGTH, VALTYPE) \ +static inline void zpci_write_##VALTYPE(VALTYPE val, \ + const volatile void __iomem *addr) \ +{ \ + u64 data = (VALTYPE) val; \ + \ + zpci_store(addr, data, LENGTH); \ +} + +zpci_read(8, u64) +zpci_read(4, u32) +zpci_read(2, u16) +zpci_read(1, u8) +zpci_write(8, u64) +zpci_write(4, u32) +zpci_write(2, u16) +zpci_write(1, u8) + +static inline int zpci_write_single(volatile void __iomem *dst, const void *src, + unsigned long len) +{ + u64 val; + + switch (len) { + case 1: + val = (u64) *((u8 *) src); + break; + case 2: + val = (u64) *((u16 *) src); + break; + case 4: + val = (u64) *((u32 *) src); + break; + case 8: + val = (u64) *((u64 *) src); + break; + default: + val = 0; /* let FW report error */ + break; + } + return zpci_store(dst, val, len); +} + +static inline int zpci_read_single(void *dst, const volatile void __iomem *src, + unsigned long len) +{ + u64 data; + int cc; + + cc = zpci_load(&data, src, len); + if (cc) + goto out; + + switch (len) { + case 1: + *((u8 *) dst) = (u8) data; + break; + case 2: + *((u16 *) dst) = (u16) data; + break; + case 4: + *((u32 *) dst) = (u32) data; + break; + case 8: + *((u64 *) dst) = (u64) data; + break; + } +out: + return cc; +} + +int zpci_write_block(volatile void __iomem *dst, const void *src, + unsigned long len); + +static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max) +{ + int offset = dst & ZPCI_BOUNDARY_MASK; + int size; + + size = min3(len, ZPCI_BOUNDARY_SIZE - offset, max); + if (IS_ALIGNED(src, 8) && IS_ALIGNED(dst, 8) && IS_ALIGNED(size, 8)) + return size; + + if (size >= 8) + return 8; + return rounddown_pow_of_two(size); +} + +static inline int zpci_memcpy_fromio(void *dst, + const volatile void __iomem *src, + unsigned long n) +{ + int size, rc = 0; + + while (n > 0) { + size = zpci_get_max_io_size((u64 __force) src, + (u64) dst, n, + ZPCI_MAX_READ_SIZE); + rc = zpci_read_single(dst, src, size); + if (rc) + break; + src += size; + dst += size; + n -= size; + } + return rc; +} + +static inline int zpci_memcpy_toio(volatile void __iomem *dst, + const void *src, unsigned long n) +{ + int size, rc = 0; + + if (!src) + return -EINVAL; + + while (n > 0) { + size = zpci_get_max_io_size((u64 __force) dst, + (u64) src, n, + ZPCI_MAX_WRITE_SIZE); + if (size > 8) /* main path */ + rc = zpci_write_block(dst, src, size); + else + rc = zpci_write_single(dst, src, size); + if (rc) + break; + src += size; + dst += size; + n -= size; + } + return rc; +} + +static inline int zpci_memset_io(volatile void __iomem *dst, + unsigned char val, size_t count) +{ + u8 *src = kmalloc(count, GFP_KERNEL); + int rc; + + if (src == NULL) + return -ENOMEM; + memset(src, val, count); + + rc = zpci_memcpy_toio(dst, src, count); + kfree(src); + return rc; +} + +#endif /* CONFIG_PCI */ + +#endif /* _ASM_S390_PCI_IO_H */ diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h new file mode 100644 index 000000000..264095dd8 --- /dev/null +++ b/arch/s390/include/asm/percpu.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ARCH_S390_PERCPU__ +#define __ARCH_S390_PERCPU__ + +#include +#include + +/* + * s390 uses its own implementation for per cpu data, the offset of + * the cpu local data area is cached in the cpu's lowcore memory. + */ +#define __my_cpu_offset S390_lowcore.percpu_offset + +/* + * For 64 bit module code, the module may be more than 4G above the + * per cpu area, use weak definitions to force the compiler to + * generate external references. + */ +#if defined(MODULE) +#define ARCH_NEEDS_WEAK_PER_CPU +#endif + +/* + * We use a compare-and-swap loop since that uses less cpu cycles than + * disabling and enabling interrupts like the generic variant would do. + */ +#define arch_this_cpu_to_op_simple(pcp, val, op) \ +({ \ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ old__, new__, prev__; \ + pcp_op_T__ *ptr__; \ + preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + prev__ = READ_ONCE(*ptr__); \ + do { \ + old__ = prev__; \ + new__ = old__ op (val); \ + prev__ = cmpxchg(ptr__, old__, new__); \ + } while (prev__ != old__); \ + preempt_enable_notrace(); \ + new__; \ +}) + +#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &) +#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &) +#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) +#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) + +#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES + +#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &) +#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &) +#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) +#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) + +#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \ +{ \ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ val__ = (val); \ + pcp_op_T__ old__, *ptr__; \ + preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + if (__builtin_constant_p(val__) && \ + ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ + asm volatile( \ + op2 " %[ptr__],%[val__]\n" \ + : [ptr__] "+Q" (*ptr__) \ + : [val__] "i" ((szcast)val__) \ + : "cc"); \ + } else { \ + asm volatile( \ + op1 " %[old__],%[val__],%[ptr__]\n" \ + : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ + : [val__] "d" (val__) \ + : "cc"); \ + } \ + preempt_enable_notrace(); \ +} + +#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int) +#define this_cpu_add_8(pcp, val) arch_this_cpu_add(pcp, val, "laag", "agsi", long) + +#define arch_this_cpu_add_return(pcp, val, op) \ +({ \ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ val__ = (val); \ + pcp_op_T__ old__, *ptr__; \ + preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + asm volatile( \ + op " %[old__],%[val__],%[ptr__]\n" \ + : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ + : [val__] "d" (val__) \ + : "cc"); \ + preempt_enable_notrace(); \ + old__ + val__; \ +}) + +#define this_cpu_add_return_4(pcp, val) arch_this_cpu_add_return(pcp, val, "laa") +#define this_cpu_add_return_8(pcp, val) arch_this_cpu_add_return(pcp, val, "laag") + +#define arch_this_cpu_to_op(pcp, val, op) \ +{ \ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ val__ = (val); \ + pcp_op_T__ old__, *ptr__; \ + preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + asm volatile( \ + op " %[old__],%[val__],%[ptr__]\n" \ + : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ + : [val__] "d" (val__) \ + : "cc"); \ + preempt_enable_notrace(); \ +} + +#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan") +#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, "lang") +#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao") +#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog") + +#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +#define arch_this_cpu_cmpxchg(pcp, oval, nval) \ +({ \ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ ret__; \ + pcp_op_T__ *ptr__; \ + preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + ret__ = cmpxchg(ptr__, oval, nval); \ + preempt_enable_notrace(); \ + ret__; \ +}) + +#define this_cpu_cmpxchg_1(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) +#define this_cpu_cmpxchg_2(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) +#define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) +#define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) + +#define this_cpu_cmpxchg64(pcp, o, n) this_cpu_cmpxchg_8(pcp, o, n) + +#define this_cpu_cmpxchg128(pcp, oval, nval) \ +({ \ + typedef typeof(pcp) pcp_op_T__; \ + u128 old__, new__, ret__; \ + pcp_op_T__ *ptr__; \ + old__ = oval; \ + new__ = nval; \ + preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + ret__ = cmpxchg128((void *)ptr__, old__, new__); \ + preempt_enable_notrace(); \ + ret__; \ +}) + +#define arch_this_cpu_xchg(pcp, nval) \ +({ \ + typeof(pcp) *ptr__; \ + typeof(pcp) ret__; \ + preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + ret__ = xchg(ptr__, nval); \ + preempt_enable_notrace(); \ + ret__; \ +}) + +#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval) +#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval) +#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval) +#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval) + +#include + +#endif /* __ARCH_S390_PERCPU__ */ diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h new file mode 100644 index 000000000..9917e2717 --- /dev/null +++ b/arch/s390/include/asm/perf_event.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Performance event support - s390 specific definitions. + * + * Copyright IBM Corp. 2009, 2017 + * Author(s): Martin Schwidefsky + * Hendrik Brueckner + */ + +#ifndef _ASM_S390_PERF_EVENT_H +#define _ASM_S390_PERF_EVENT_H + +#include +#include +#include + +/* Per-CPU flags for PMU states */ +#define PMU_F_RESERVED 0x1000 +#define PMU_F_ENABLED 0x2000 +#define PMU_F_IN_USE 0x4000 +#define PMU_F_ERR_IBE 0x0100 +#define PMU_F_ERR_LSDA 0x0200 +#define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA) + +/* Perf definitions for PMU event attributes in sysfs */ +extern __init const struct attribute_group **cpumf_cf_event_group(void); +extern ssize_t cpumf_events_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page); +#define EVENT_VAR(_cat, _name) event_attr_##_cat##_##_name +#define EVENT_PTR(_cat, _name) (&EVENT_VAR(_cat, _name).attr.attr) + +#define CPUMF_EVENT_ATTR(cat, name, id) \ + PMU_EVENT_ATTR(name, EVENT_VAR(cat, name), id, cpumf_events_sysfs_show) +#define CPUMF_EVENT_PTR(cat, name) EVENT_PTR(cat, name) + + +/* Perf callbacks */ +struct pt_regs; +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs) perf_misc_flags(regs) +#define perf_arch_bpf_user_pt_regs(regs) ®s->user_regs + +/* Perf pt_regs extension for sample-data-entry indicators */ +struct perf_sf_sde_regs { + unsigned char in_guest:1; /* guest sample */ + unsigned long reserved:63; /* reserved */ +}; + +/* Perf PMU definitions for the counter facility */ +#define PERF_CPUM_CF_MAX_CTR 0xffffUL /* Max ctr for ECCTR */ + +/* Perf PMU definitions for the sampling facility */ +#define PERF_CPUM_SF_MAX_CTR 2 +#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */ +#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */ +#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */ +#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */ +#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */ +#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \ + PERF_CPUM_SF_DIAG_MODE) +#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */ + +#define REG_NONE 0 +#define REG_OVERFLOW 1 +#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config) +#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc) +#define TEAR_REG(hwc) ((hwc)->last_tag) +#define SAMPL_RATE(hwc) ((hwc)->event_base) +#define SAMPL_FLAGS(hwc) ((hwc)->config_base) +#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) +#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE) + +#define perf_arch_fetch_caller_regs(regs, __ip) do { \ + (regs)->psw.addr = (__ip); \ + (regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \ + offsetof(struct stack_frame, back_chain); \ +} while (0) + +#endif /* _ASM_S390_PERF_EVENT_H */ diff --git a/arch/s390/include/asm/pfault.h b/arch/s390/include/asm/pfault.h new file mode 100644 index 000000000..a1bee4a1e --- /dev/null +++ b/arch/s390/include/asm/pfault.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 1999, 2023 + */ +#ifndef _ASM_S390_PFAULT_H +#define _ASM_S390_PFAULT_H + +#include + +int __pfault_init(void); +void __pfault_fini(void); + +static inline int pfault_init(void) +{ + if (IS_ENABLED(CONFIG_PFAULT)) + return __pfault_init(); + return -EOPNOTSUPP; +} + +static inline void pfault_fini(void) +{ + if (IS_ENABLED(CONFIG_PFAULT)) + __pfault_fini(); +} + +#endif /* _ASM_S390_PFAULT_H */ diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h new file mode 100644 index 000000000..376b4b23b --- /dev/null +++ b/arch/s390/include/asm/pgalloc.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999, 2000 + * Author(s): Hartmut Penner (hp@de.ibm.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * Derived from "include/asm-i386/pgalloc.h" + * Copyright (C) 1994 Linus Torvalds + */ + +#ifndef _S390_PGALLOC_H +#define _S390_PGALLOC_H + +#include +#include +#include +#include + +#define CRST_ALLOC_ORDER 2 + +unsigned long *crst_table_alloc(struct mm_struct *); +void crst_table_free(struct mm_struct *, unsigned long *); + +unsigned long *page_table_alloc(struct mm_struct *); +struct page *page_table_alloc_pgste(struct mm_struct *mm); +void page_table_free(struct mm_struct *, unsigned long *); +void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); +void page_table_free_pgste(struct page *page); +extern int page_table_allocate_pgste; + +static inline void crst_table_init(unsigned long *crst, unsigned long entry) +{ + memset64((u64 *)crst, entry, _CRST_ENTRIES); +} + +int crst_table_upgrade(struct mm_struct *mm, unsigned long limit); + +static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr, + unsigned long len) +{ + int rc; + + if (addr + len > mm->context.asce_limit && + addr + len <= TASK_SIZE) { + rc = crst_table_upgrade(mm, addr + len); + if (rc) + return (unsigned long) rc; + } + return addr; +} + +static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) +{ + unsigned long *table = crst_table_alloc(mm); + + if (table) + crst_table_init(table, _REGION2_ENTRY_EMPTY); + return (p4d_t *) table; +} + +static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) +{ + if (!mm_p4d_folded(mm)) + crst_table_free(mm, (unsigned long *) p4d); +} + +static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) +{ + unsigned long *table = crst_table_alloc(mm); + if (table) + crst_table_init(table, _REGION3_ENTRY_EMPTY); + return (pud_t *) table; +} + +static inline void pud_free(struct mm_struct *mm, pud_t *pud) +{ + if (!mm_pud_folded(mm)) + crst_table_free(mm, (unsigned long *) pud); +} + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) +{ + unsigned long *table = crst_table_alloc(mm); + + if (!table) + return NULL; + crst_table_init(table, _SEGMENT_ENTRY_EMPTY); + if (!pagetable_pmd_ctor(virt_to_ptdesc(table))) { + crst_table_free(mm, table); + return NULL; + } + return (pmd_t *) table; +} + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + if (mm_pmd_folded(mm)) + return; + pagetable_pmd_dtor(virt_to_ptdesc(pmd)); + crst_table_free(mm, (unsigned long *) pmd); +} + +static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) +{ + set_pgd(pgd, __pgd(_REGION1_ENTRY | __pa(p4d))); +} + +static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) +{ + set_p4d(p4d, __p4d(_REGION2_ENTRY | __pa(pud))); +} + +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd))); +} + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + return (pgd_t *) crst_table_alloc(mm); +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + crst_table_free(mm, (unsigned long *) pgd); +} + +static inline void pmd_populate(struct mm_struct *mm, + pmd_t *pmd, pgtable_t pte) +{ + set_pmd(pmd, __pmd(_SEGMENT_ENTRY | __pa(pte))); +} + +#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte) + +/* + * page table entry allocation/free routines. + */ +#define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm)) +#define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm)) + +#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) +#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) + +/* arch use pte_free_defer() implementation in arch/s390/mm/pgalloc.c */ +#define pte_free_defer pte_free_defer +void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable); + +void vmem_map_init(void); +void *vmem_crst_alloc(unsigned long val); +pte_t *vmem_pte_alloc(void); + +unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages); +void base_asce_free(unsigned long asce); + +#endif /* _S390_PGALLOC_H */ diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h new file mode 100644 index 000000000..fb3ee7758 --- /dev/null +++ b/arch/s390/include/asm/pgtable.h @@ -0,0 +1,1872 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999, 2000 + * Author(s): Hartmut Penner (hp@de.ibm.com) + * Ulrich Weigand (weigand@de.ibm.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * Derived from "include/asm-i386/pgtable.h" + */ + +#ifndef _ASM_S390_PGTABLE_H +#define _ASM_S390_PGTABLE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern pgd_t swapper_pg_dir[]; +extern pgd_t invalid_pg_dir[]; +extern void paging_init(void); +extern unsigned long s390_invalid_asce; + +enum { + PG_DIRECT_MAP_4K = 0, + PG_DIRECT_MAP_1M, + PG_DIRECT_MAP_2G, + PG_DIRECT_MAP_MAX +}; + +extern atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]); + +static inline void update_page_count(int level, long count) +{ + if (IS_ENABLED(CONFIG_PROC_FS)) + atomic_long_add(count, &direct_pages_count[level]); +} + +/* + * The S390 doesn't have any external MMU info: the kernel page + * tables contain all the necessary information. + */ +#define update_mmu_cache(vma, address, ptep) do { } while (0) +#define update_mmu_cache_range(vmf, vma, addr, ptep, nr) do { } while (0) +#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) + +/* + * ZERO_PAGE is a global shared page that is always zero; used + * for zero-mapped memory areas etc.. + */ + +extern unsigned long empty_zero_page; +extern unsigned long zero_page_mask; + +#define ZERO_PAGE(vaddr) \ + (virt_to_page((void *)(empty_zero_page + \ + (((unsigned long)(vaddr)) &zero_page_mask)))) +#define __HAVE_COLOR_ZERO_PAGE + +/* TODO: s390 cannot support io_remap_pfn_range... */ + +#define pte_ERROR(e) \ + pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pmd_ERROR(e) \ + pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) +#define pud_ERROR(e) \ + pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) +#define p4d_ERROR(e) \ + pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e)) +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) + +/* + * The vmalloc and module area will always be on the topmost area of the + * kernel mapping. 512GB are reserved for vmalloc by default. + * At the top of the vmalloc area a 2GB area is reserved where modules + * will reside. That makes sure that inter module branches always + * happen without trampolines and in addition the placement within a + * 2GB frame is branch prediction unit friendly. + */ +extern unsigned long __bootdata_preserved(VMALLOC_START); +extern unsigned long __bootdata_preserved(VMALLOC_END); +#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN) +extern struct page *__bootdata_preserved(vmemmap); +extern unsigned long __bootdata_preserved(vmemmap_size); + +extern unsigned long __bootdata_preserved(MODULES_VADDR); +extern unsigned long __bootdata_preserved(MODULES_END); +#define MODULES_VADDR MODULES_VADDR +#define MODULES_END MODULES_END +#define MODULES_LEN (1UL << 31) + +static inline int is_module_addr(void *addr) +{ + BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); + if (addr < (void *)MODULES_VADDR) + return 0; + if (addr > (void *)MODULES_END) + return 0; + return 1; +} + +/* + * A 64 bit pagetable entry of S390 has following format: + * | PFRA |0IPC| OS | + * 0000000000111111111122222222223333333333444444444455555555556666 + * 0123456789012345678901234567890123456789012345678901234567890123 + * + * I Page-Invalid Bit: Page is not available for address-translation + * P Page-Protection Bit: Store access not possible for page + * C Change-bit override: HW is not required to set change bit + * + * A 64 bit segmenttable entry of S390 has following format: + * | P-table origin | TT + * 0000000000111111111122222222223333333333444444444455555555556666 + * 0123456789012345678901234567890123456789012345678901234567890123 + * + * I Segment-Invalid Bit: Segment is not available for address-translation + * C Common-Segment Bit: Segment is not private (PoP 3-30) + * P Page-Protection Bit: Store access not possible for page + * TT Type 00 + * + * A 64 bit region table entry of S390 has following format: + * | S-table origin | TF TTTL + * 0000000000111111111122222222223333333333444444444455555555556666 + * 0123456789012345678901234567890123456789012345678901234567890123 + * + * I Segment-Invalid Bit: Segment is not available for address-translation + * TT Type 01 + * TF + * TL Table length + * + * The 64 bit regiontable origin of S390 has following format: + * | region table origon | DTTL + * 0000000000111111111122222222223333333333444444444455555555556666 + * 0123456789012345678901234567890123456789012345678901234567890123 + * + * X Space-Switch event: + * G Segment-Invalid Bit: + * P Private-Space Bit: + * S Storage-Alteration: + * R Real space + * TL Table-Length: + * + * A storage key has the following format: + * | ACC |F|R|C|0| + * 0 3 4 5 6 7 + * ACC: access key + * F : fetch protection bit + * R : referenced bit + * C : changed bit + */ + +/* Hardware bits in the page table entry */ +#define _PAGE_NOEXEC 0x100 /* HW no-execute bit */ +#define _PAGE_PROTECT 0x200 /* HW read-only bit */ +#define _PAGE_INVALID 0x400 /* HW invalid bit */ +#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ + +/* Software bits in the page table entry */ +#define _PAGE_PRESENT 0x001 /* SW pte present bit */ +#define _PAGE_YOUNG 0x004 /* SW pte young bit */ +#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ +#define _PAGE_READ 0x010 /* SW pte read bit */ +#define _PAGE_WRITE 0x020 /* SW pte write bit */ +#define _PAGE_SPECIAL 0x040 /* SW associated with special page */ +#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ + +#ifdef CONFIG_MEM_SOFT_DIRTY +#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */ +#else +#define _PAGE_SOFT_DIRTY 0x000 +#endif + +#define _PAGE_SW_BITS 0xffUL /* All SW bits */ + +#define _PAGE_SWP_EXCLUSIVE _PAGE_LARGE /* SW pte exclusive swap bit */ + +/* Set of bits not changed in pte_modify */ +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \ + _PAGE_YOUNG | _PAGE_SOFT_DIRTY) + +/* + * Mask of bits that must not be changed with RDP. Allow only _PAGE_PROTECT + * HW bit and all SW bits. + */ +#define _PAGE_RDP_MASK ~(_PAGE_PROTECT | _PAGE_SW_BITS) + +/* + * handle_pte_fault uses pte_present and pte_none to find out the pte type + * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to + * distinguish present from not-present ptes. It is changed only with the page + * table lock held. + * + * The following table gives the different possible bit combinations for + * the pte hardware and software bits in the last 12 bits of a pte + * (. unassigned bit, x don't care, t swap type): + * + * 842100000000 + * 000084210000 + * 000000008421 + * .IR.uswrdy.p + * empty .10.00000000 + * swap .11..ttttt.0 + * prot-none, clean, old .11.xx0000.1 + * prot-none, clean, young .11.xx0001.1 + * prot-none, dirty, old .11.xx0010.1 + * prot-none, dirty, young .11.xx0011.1 + * read-only, clean, old .11.xx0100.1 + * read-only, clean, young .01.xx0101.1 + * read-only, dirty, old .11.xx0110.1 + * read-only, dirty, young .01.xx0111.1 + * read-write, clean, old .11.xx1100.1 + * read-write, clean, young .01.xx1101.1 + * read-write, dirty, old .10.xx1110.1 + * read-write, dirty, young .00.xx1111.1 + * HW-bits: R read-only, I invalid + * SW-bits: p present, y young, d dirty, r read, w write, s special, + * u unused, l large + * + * pte_none is true for the bit pattern .10.00000000, pte == 0x400 + * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200 + * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001 + */ + +/* Bits in the segment/region table address-space-control-element */ +#define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */ +#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ +#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ +#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ +#define _ASCE_REAL_SPACE 0x20 /* real space control */ +#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ +#define _ASCE_TYPE_REGION1 0x0c /* region first table type */ +#define _ASCE_TYPE_REGION2 0x08 /* region second table type */ +#define _ASCE_TYPE_REGION3 0x04 /* region third table type */ +#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ +#define _ASCE_TABLE_LENGTH 0x03 /* region table length */ + +/* Bits in the region table entry */ +#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ +#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */ +#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */ +#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */ +#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */ +#define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */ +#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ +#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ +#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ +#define _REGION_ENTRY_LENGTH 0x03 /* region third length */ + +#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) +#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) +#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) +#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) +#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) +#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) + +#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */ +#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */ +#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */ +#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */ +#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */ +#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */ + +#ifdef CONFIG_MEM_SOFT_DIRTY +#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */ +#else +#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */ +#endif + +#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL + +/* Bits in the segment table entry */ +#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL +#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL +#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL +#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ +#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */ +#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */ +#define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */ +#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ +#define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */ + +#define _SEGMENT_ENTRY (0) +#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) + +#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */ +#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ +#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ +#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */ +#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */ + +#ifdef CONFIG_MEM_SOFT_DIRTY +#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */ +#else +#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */ +#endif + +#define _CRST_ENTRIES 2048 /* number of region/segment table entries */ +#define _PAGE_ENTRIES 256 /* number of page table entries */ + +#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8) +#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8) + +#define _REGION1_SHIFT 53 +#define _REGION2_SHIFT 42 +#define _REGION3_SHIFT 31 +#define _SEGMENT_SHIFT 20 + +#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT) +#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT) +#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT) +#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT) +#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT) + +#define _REGION1_SIZE (1UL << _REGION1_SHIFT) +#define _REGION2_SIZE (1UL << _REGION2_SHIFT) +#define _REGION3_SIZE (1UL << _REGION3_SHIFT) +#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT) + +#define _REGION1_MASK (~(_REGION1_SIZE - 1)) +#define _REGION2_MASK (~(_REGION2_SIZE - 1)) +#define _REGION3_MASK (~(_REGION3_SIZE - 1)) +#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1)) + +#define PMD_SHIFT _SEGMENT_SHIFT +#define PUD_SHIFT _REGION3_SHIFT +#define P4D_SHIFT _REGION2_SHIFT +#define PGDIR_SHIFT _REGION1_SHIFT + +#define PMD_SIZE _SEGMENT_SIZE +#define PUD_SIZE _REGION3_SIZE +#define P4D_SIZE _REGION2_SIZE +#define PGDIR_SIZE _REGION1_SIZE + +#define PMD_MASK _SEGMENT_MASK +#define PUD_MASK _REGION3_MASK +#define P4D_MASK _REGION2_MASK +#define PGDIR_MASK _REGION1_MASK + +#define PTRS_PER_PTE _PAGE_ENTRIES +#define PTRS_PER_PMD _CRST_ENTRIES +#define PTRS_PER_PUD _CRST_ENTRIES +#define PTRS_PER_P4D _CRST_ENTRIES +#define PTRS_PER_PGD _CRST_ENTRIES + +/* + * Segment table and region3 table entry encoding + * (R = read-only, I = invalid, y = young bit): + * dy..R...I...wr + * prot-none, clean, old 00..1...1...00 + * prot-none, clean, young 01..1...1...00 + * prot-none, dirty, old 10..1...1...00 + * prot-none, dirty, young 11..1...1...00 + * read-only, clean, old 00..1...1...01 + * read-only, clean, young 01..1...0...01 + * read-only, dirty, old 10..1...1...01 + * read-only, dirty, young 11..1...0...01 + * read-write, clean, old 00..1...1...11 + * read-write, clean, young 01..1...0...11 + * read-write, dirty, old 10..0...1...11 + * read-write, dirty, young 11..0...0...11 + * The segment table origin is used to distinguish empty (origin==0) from + * read-write, old segment table entries (origin!=0) + * HW-bits: R read-only, I invalid + * SW-bits: y young, d dirty, r read, w write + */ + +/* Page status table bits for virtualization */ +#define PGSTE_ACC_BITS 0xf000000000000000UL +#define PGSTE_FP_BIT 0x0800000000000000UL +#define PGSTE_PCL_BIT 0x0080000000000000UL +#define PGSTE_HR_BIT 0x0040000000000000UL +#define PGSTE_HC_BIT 0x0020000000000000UL +#define PGSTE_GR_BIT 0x0004000000000000UL +#define PGSTE_GC_BIT 0x0002000000000000UL +#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ +#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ +#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */ + +/* Guest Page State used for virtualization */ +#define _PGSTE_GPS_ZERO 0x0000000080000000UL +#define _PGSTE_GPS_NODAT 0x0000000040000000UL +#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL +#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL +#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL +#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL +#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK + +/* + * A user page table pointer has the space-switch-event bit, the + * private-space-control bit and the storage-alteration-event-control + * bit set. A kernel page table pointer doesn't need them. + */ +#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ + _ASCE_ALT_EVENT) + +/* + * Page protection definitions. + */ +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT) +#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \ + _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) +#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \ + _PAGE_INVALID | _PAGE_PROTECT) +#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) +#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _PAGE_INVALID | _PAGE_PROTECT) + +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) +#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ + _PAGE_PROTECT | _PAGE_NOEXEC) +#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _PAGE_YOUNG | _PAGE_DIRTY) + +/* + * On s390 the page table entry has an invalid bit and a read-only bit. + * Read permission implies execute permission and write permission + * implies read permission. + */ + /*xwr*/ + +/* + * Segment entry (large page) protection definitions. + */ +#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ + _SEGMENT_ENTRY_PROTECT) +#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \ + _SEGMENT_ENTRY_READ | \ + _SEGMENT_ENTRY_NOEXEC) +#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \ + _SEGMENT_ENTRY_READ) +#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \ + _SEGMENT_ENTRY_WRITE | \ + _SEGMENT_ENTRY_NOEXEC) +#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \ + _SEGMENT_ENTRY_WRITE) +#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \ + _SEGMENT_ENTRY_LARGE | \ + _SEGMENT_ENTRY_READ | \ + _SEGMENT_ENTRY_WRITE | \ + _SEGMENT_ENTRY_YOUNG | \ + _SEGMENT_ENTRY_DIRTY | \ + _SEGMENT_ENTRY_NOEXEC) +#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \ + _SEGMENT_ENTRY_LARGE | \ + _SEGMENT_ENTRY_READ | \ + _SEGMENT_ENTRY_YOUNG | \ + _SEGMENT_ENTRY_PROTECT | \ + _SEGMENT_ENTRY_NOEXEC) +#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \ + _SEGMENT_ENTRY_LARGE | \ + _SEGMENT_ENTRY_READ | \ + _SEGMENT_ENTRY_WRITE | \ + _SEGMENT_ENTRY_YOUNG | \ + _SEGMENT_ENTRY_DIRTY) + +/* + * Region3 entry (large page) protection definitions. + */ + +#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \ + _REGION3_ENTRY_LARGE | \ + _REGION3_ENTRY_READ | \ + _REGION3_ENTRY_WRITE | \ + _REGION3_ENTRY_YOUNG | \ + _REGION3_ENTRY_DIRTY | \ + _REGION_ENTRY_NOEXEC) +#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \ + _REGION3_ENTRY_LARGE | \ + _REGION3_ENTRY_READ | \ + _REGION3_ENTRY_YOUNG | \ + _REGION_ENTRY_PROTECT | \ + _REGION_ENTRY_NOEXEC) +#define REGION3_KERNEL_EXEC __pgprot(_REGION_ENTRY_TYPE_R3 | \ + _REGION3_ENTRY_LARGE | \ + _REGION3_ENTRY_READ | \ + _REGION3_ENTRY_WRITE | \ + _REGION3_ENTRY_YOUNG | \ + _REGION3_ENTRY_DIRTY) + +static inline bool mm_p4d_folded(struct mm_struct *mm) +{ + return mm->context.asce_limit <= _REGION1_SIZE; +} +#define mm_p4d_folded(mm) mm_p4d_folded(mm) + +static inline bool mm_pud_folded(struct mm_struct *mm) +{ + return mm->context.asce_limit <= _REGION2_SIZE; +} +#define mm_pud_folded(mm) mm_pud_folded(mm) + +static inline bool mm_pmd_folded(struct mm_struct *mm) +{ + return mm->context.asce_limit <= _REGION3_SIZE; +} +#define mm_pmd_folded(mm) mm_pmd_folded(mm) + +static inline int mm_has_pgste(struct mm_struct *mm) +{ +#ifdef CONFIG_PGSTE + if (unlikely(mm->context.has_pgste)) + return 1; +#endif + return 0; +} + +static inline int mm_is_protected(struct mm_struct *mm) +{ +#ifdef CONFIG_PGSTE + if (unlikely(atomic_read(&mm->context.protected_count))) + return 1; +#endif + return 0; +} + +static inline int mm_alloc_pgste(struct mm_struct *mm) +{ +#ifdef CONFIG_PGSTE + if (unlikely(mm->context.alloc_pgste)) + return 1; +#endif + return 0; +} + +static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) +{ + return __pte(pte_val(pte) & ~pgprot_val(prot)); +} + +static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) +{ + return __pte(pte_val(pte) | pgprot_val(prot)); +} + +static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) +{ + return __pmd(pmd_val(pmd) & ~pgprot_val(prot)); +} + +static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) +{ + return __pmd(pmd_val(pmd) | pgprot_val(prot)); +} + +static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot) +{ + return __pud(pud_val(pud) & ~pgprot_val(prot)); +} + +static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot) +{ + return __pud(pud_val(pud) | pgprot_val(prot)); +} + +/* + * In the case that a guest uses storage keys + * faults should no longer be backed by zero pages + */ +#define mm_forbids_zeropage mm_has_pgste +static inline int mm_uses_skeys(struct mm_struct *mm) +{ +#ifdef CONFIG_PGSTE + if (mm->context.uses_skeys) + return 1; +#endif + return 0; +} + +static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new) +{ + union register_pair r1 = { .even = old, .odd = new, }; + unsigned long address = (unsigned long)ptr | 1; + + asm volatile( + " csp %[r1],%[address]" + : [r1] "+&d" (r1.pair), "+m" (*ptr) + : [address] "d" (address) + : "cc"); +} + +static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new) +{ + union register_pair r1 = { .even = old, .odd = new, }; + unsigned long address = (unsigned long)ptr | 1; + + asm volatile( + " cspg %[r1],%[address]" + : [r1] "+&d" (r1.pair), "+m" (*ptr) + : [address] "d" (address) + : "cc"); +} + +#define CRDTE_DTT_PAGE 0x00UL +#define CRDTE_DTT_SEGMENT 0x10UL +#define CRDTE_DTT_REGION3 0x14UL +#define CRDTE_DTT_REGION2 0x18UL +#define CRDTE_DTT_REGION1 0x1cUL + +static inline void crdte(unsigned long old, unsigned long new, + unsigned long *table, unsigned long dtt, + unsigned long address, unsigned long asce) +{ + union register_pair r1 = { .even = old, .odd = new, }; + union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, }; + + asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0" + : [r1] "+&d" (r1.pair) + : [r2] "d" (r2.pair), [asce] "a" (asce) + : "memory", "cc"); +} + +/* + * pgd/p4d/pud/pmd/pte query functions + */ +static inline int pgd_folded(pgd_t pgd) +{ + return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1; +} + +static inline int pgd_present(pgd_t pgd) +{ + if (pgd_folded(pgd)) + return 1; + return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; +} + +static inline int pgd_none(pgd_t pgd) +{ + if (pgd_folded(pgd)) + return 0; + return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; +} + +static inline int pgd_bad(pgd_t pgd) +{ + if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1) + return 0; + return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0; +} + +static inline unsigned long pgd_pfn(pgd_t pgd) +{ + unsigned long origin_mask; + + origin_mask = _REGION_ENTRY_ORIGIN; + return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT; +} + +static inline int p4d_folded(p4d_t p4d) +{ + return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2; +} + +static inline int p4d_present(p4d_t p4d) +{ + if (p4d_folded(p4d)) + return 1; + return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL; +} + +static inline int p4d_none(p4d_t p4d) +{ + if (p4d_folded(p4d)) + return 0; + return p4d_val(p4d) == _REGION2_ENTRY_EMPTY; +} + +static inline unsigned long p4d_pfn(p4d_t p4d) +{ + unsigned long origin_mask; + + origin_mask = _REGION_ENTRY_ORIGIN; + return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT; +} + +static inline int pud_folded(pud_t pud) +{ + return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3; +} + +static inline int pud_present(pud_t pud) +{ + if (pud_folded(pud)) + return 1; + return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; +} + +static inline int pud_none(pud_t pud) +{ + if (pud_folded(pud)) + return 0; + return pud_val(pud) == _REGION3_ENTRY_EMPTY; +} + +#define pud_leaf pud_large +static inline int pud_large(pud_t pud) +{ + if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) + return 0; + return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); +} + +#define pmd_leaf pmd_large +static inline int pmd_large(pmd_t pmd) +{ + return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; +} + +static inline int pmd_bad(pmd_t pmd) +{ + if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd)) + return 1; + return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; +} + +static inline int pud_bad(pud_t pud) +{ + unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK; + + if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud)) + return 1; + if (type < _REGION_ENTRY_TYPE_R3) + return 0; + return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0; +} + +static inline int p4d_bad(p4d_t p4d) +{ + unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK; + + if (type > _REGION_ENTRY_TYPE_R2) + return 1; + if (type < _REGION_ENTRY_TYPE_R2) + return 0; + return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0; +} + +static inline int pmd_present(pmd_t pmd) +{ + return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY; +} + +static inline int pmd_none(pmd_t pmd) +{ + return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY; +} + +#define pmd_write pmd_write +static inline int pmd_write(pmd_t pmd) +{ + return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; +} + +#define pud_write pud_write +static inline int pud_write(pud_t pud) +{ + return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0; +} + +static inline int pmd_dirty(pmd_t pmd) +{ + return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; +} + +#define pmd_young pmd_young +static inline int pmd_young(pmd_t pmd) +{ + return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; +} + +static inline int pte_present(pte_t pte) +{ + /* Bit pattern: (pte & 0x001) == 0x001 */ + return (pte_val(pte) & _PAGE_PRESENT) != 0; +} + +static inline int pte_none(pte_t pte) +{ + /* Bit pattern: pte == 0x400 */ + return pte_val(pte) == _PAGE_INVALID; +} + +static inline int pte_swap(pte_t pte) +{ + /* Bit pattern: (pte & 0x201) == 0x200 */ + return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT)) + == _PAGE_PROTECT; +} + +static inline int pte_special(pte_t pte) +{ + return (pte_val(pte) & _PAGE_SPECIAL); +} + +#define __HAVE_ARCH_PTE_SAME +static inline int pte_same(pte_t a, pte_t b) +{ + return pte_val(a) == pte_val(b); +} + +#ifdef CONFIG_NUMA_BALANCING +static inline int pte_protnone(pte_t pte) +{ + return pte_present(pte) && !(pte_val(pte) & _PAGE_READ); +} + +static inline int pmd_protnone(pmd_t pmd) +{ + /* pmd_large(pmd) implies pmd_present(pmd) */ + return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); +} +#endif + +static inline int pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE)); +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE)); +} + +static inline int pte_soft_dirty(pte_t pte) +{ + return pte_val(pte) & _PAGE_SOFT_DIRTY; +} +#define pte_swp_soft_dirty pte_soft_dirty + +static inline pte_t pte_mksoft_dirty(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY)); +} +#define pte_swp_mksoft_dirty pte_mksoft_dirty + +static inline pte_t pte_clear_soft_dirty(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY)); +} +#define pte_swp_clear_soft_dirty pte_clear_soft_dirty + +static inline int pmd_soft_dirty(pmd_t pmd) +{ + return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY; +} + +static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) +{ + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY)); +} + +static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) +{ + return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY)); +} + +/* + * query functions pte_write/pte_dirty/pte_young only work if + * pte_present() is true. Undefined behaviour if not.. + */ +static inline int pte_write(pte_t pte) +{ + return (pte_val(pte) & _PAGE_WRITE) != 0; +} + +static inline int pte_dirty(pte_t pte) +{ + return (pte_val(pte) & _PAGE_DIRTY) != 0; +} + +static inline int pte_young(pte_t pte) +{ + return (pte_val(pte) & _PAGE_YOUNG) != 0; +} + +#define __HAVE_ARCH_PTE_UNUSED +static inline int pte_unused(pte_t pte) +{ + return pte_val(pte) & _PAGE_UNUSED; +} + +/* + * Extract the pgprot value from the given pte while at the same time making it + * usable for kernel address space mappings where fault driven dirty and + * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID + * must not be set. + */ +static inline pgprot_t pte_pgprot(pte_t pte) +{ + unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK; + + if (pte_write(pte)) + pte_flags |= pgprot_val(PAGE_KERNEL); + else + pte_flags |= pgprot_val(PAGE_KERNEL_RO); + pte_flags |= pte_val(pte) & mio_wb_bit_mask; + + return __pgprot(pte_flags); +} + +/* + * pgd/pmd/pte modification functions + */ + +static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) +{ + WRITE_ONCE(*pgdp, pgd); +} + +static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) +{ + WRITE_ONCE(*p4dp, p4d); +} + +static inline void set_pud(pud_t *pudp, pud_t pud) +{ + WRITE_ONCE(*pudp, pud); +} + +static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) +{ + WRITE_ONCE(*pmdp, pmd); +} + +static inline void set_pte(pte_t *ptep, pte_t pte) +{ + WRITE_ONCE(*ptep, pte); +} + +static inline void pgd_clear(pgd_t *pgd) +{ + if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) + set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY)); +} + +static inline void p4d_clear(p4d_t *p4d) +{ + if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) + set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY)); +} + +static inline void pud_clear(pud_t *pud) +{ + if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) + set_pud(pud, __pud(_REGION3_ENTRY_EMPTY)); +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); +} + +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + set_pte(ptep, __pte(_PAGE_INVALID)); +} + +/* + * The following pte modification functions only work if + * pte_present() is true. Undefined behaviour if not.. + */ +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK)); + pte = set_pte_bit(pte, newprot); + /* + * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX + * has the invalid bit set, clear it again for readable, young pages + */ + if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) + pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID)); + /* + * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page + * protection bit set, clear it again for writable, dirty pages + */ + if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) + pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); + return pte; +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE)); + return set_pte_bit(pte, __pgprot(_PAGE_PROTECT)); +} + +static inline pte_t pte_mkwrite_novma(pte_t pte) +{ + pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE)); + if (pte_val(pte) & _PAGE_DIRTY) + pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY)); + return set_pte_bit(pte, __pgprot(_PAGE_PROTECT)); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY)); + if (pte_val(pte) & _PAGE_WRITE) + pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG)); + return set_pte_bit(pte, __pgprot(_PAGE_INVALID)); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG)); + if (pte_val(pte) & _PAGE_READ) + pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID)); + return pte; +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL)); +} + +#ifdef CONFIG_HUGETLB_PAGE +static inline pte_t pte_mkhuge(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(_PAGE_LARGE)); +} +#endif + +#define IPTE_GLOBAL 0 +#define IPTE_LOCAL 1 + +#define IPTE_NODAT 0x400 +#define IPTE_GUEST_ASCE 0x800 + +static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep, + unsigned long opt, unsigned long asce, + int local) +{ + unsigned long pto; + + pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1); + asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%[asce],%[m4]" + : "+m" (*ptep) + : [r1] "a" (pto), [r2] "a" ((addr & PAGE_MASK) | opt), + [asce] "a" (asce), [m4] "i" (local)); +} + +static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep, + unsigned long opt, unsigned long asce, + int local) +{ + unsigned long pto = __pa(ptep); + + if (__builtin_constant_p(opt) && opt == 0) { + /* Invalidation + TLB flush for the pte */ + asm volatile( + " ipte %[r1],%[r2],0,%[m4]" + : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address), + [m4] "i" (local)); + return; + } + + /* Invalidate ptes with options + TLB flush of the ptes */ + opt = opt | (asce & _ASCE_ORIGIN); + asm volatile( + " ipte %[r1],%[r2],%[r3],%[m4]" + : [r2] "+a" (address), [r3] "+a" (opt) + : [r1] "a" (pto), [m4] "i" (local) : "memory"); +} + +static __always_inline void __ptep_ipte_range(unsigned long address, int nr, + pte_t *ptep, int local) +{ + unsigned long pto = __pa(ptep); + + /* Invalidate a range of ptes + TLB flush of the ptes */ + do { + asm volatile( + " ipte %[r1],%[r2],%[r3],%[m4]" + : [r2] "+a" (address), [r3] "+a" (nr) + : [r1] "a" (pto), [m4] "i" (local) : "memory"); + } while (nr != 255); +} + +/* + * This is hard to understand. ptep_get_and_clear and ptep_clear_flush + * both clear the TLB for the unmapped pte. The reason is that + * ptep_get_and_clear is used in common code (e.g. change_pte_range) + * to modify an active pte. The sequence is + * 1) ptep_get_and_clear + * 2) set_pte_at + * 3) flush_tlb_range + * On s390 the tlb needs to get flushed with the modification of the pte + * if the pte is active. The only way how this can be implemented is to + * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range + * is a nop. + */ +pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t); +pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t); + +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + pte_t pte = *ptep; + + pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte)); + return pte_young(pte); +} + +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +static inline int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + return ptep_test_and_clear_young(vma, address, ptep); +} + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_t res; + + res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); + /* At this point the reference through the mapping is still present */ + if (mm_is_protected(mm) && pte_present(res)) + uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); + return res; +} + +#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION +pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *); +void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long, + pte_t *, pte_t, pte_t); + +#define __HAVE_ARCH_PTEP_CLEAR_FLUSH +static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + pte_t res; + + res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID)); + /* At this point the reference through the mapping is still present */ + if (mm_is_protected(vma->vm_mm) && pte_present(res)) + uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); + return res; +} + +/* + * The batched pte unmap code uses ptep_get_and_clear_full to clear the + * ptes. Here an optimization is possible. tlb_gather_mmu flushes all + * tlbs of an mm if it can guarantee that the ptes of the mm_struct + * cannot be accessed while the batched unmap is running. In this case + * full==1 and a simple pte_clear is enough. See tlb.h. + */ +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, int full) +{ + pte_t res; + + if (full) { + res = *ptep; + set_pte(ptep, __pte(_PAGE_INVALID)); + } else { + res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); + } + /* Nothing to do */ + if (!mm_is_protected(mm) || !pte_present(res)) + return res; + /* + * At this point the reference through the mapping is still present. + * The notifier should have destroyed all protected vCPUs at this + * point, so the destroy should be successful. + */ + if (full && !uv_destroy_owned_page(pte_val(res) & PAGE_MASK)) + return res; + /* + * If something went wrong and the page could not be destroyed, or + * if this is not a mm teardown, the slower export is used as + * fallback instead. + */ + uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); + return res; +} + +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_t pte = *ptep; + + if (pte_write(pte)) + ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte)); +} + +/* + * Check if PTEs only differ in _PAGE_PROTECT HW bit, but also allow SW PTE + * bits in the comparison. Those might change e.g. because of dirty and young + * tracking. + */ +static inline int pte_allow_rdp(pte_t old, pte_t new) +{ + /* + * Only allow changes from RO to RW + */ + if (!(pte_val(old) & _PAGE_PROTECT) || pte_val(new) & _PAGE_PROTECT) + return 0; + + return (pte_val(old) & _PAGE_RDP_MASK) == (pte_val(new) & _PAGE_RDP_MASK); +} + +static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) +{ + /* + * RDP might not have propagated the PTE protection reset to all CPUs, + * so there could be spurious TLB protection faults. + * NOTE: This will also be called when a racing pagetable update on + * another thread already installed the correct PTE. Both cases cannot + * really be distinguished. + * Therefore, only do the local TLB flush when RDP can be used, and the + * PTE does not have _PAGE_PROTECT set, to avoid unnecessary overhead. + * A local RDP can be used to do the flush. + */ + if (MACHINE_HAS_RDP && !(pte_val(*ptep) & _PAGE_PROTECT)) + __ptep_rdp(address, ptep, 0, 0, 1); +} +#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault + +void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t new); + +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +static inline int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t entry, int dirty) +{ + if (pte_same(*ptep, entry)) + return 0; + if (MACHINE_HAS_RDP && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry)) + ptep_reset_dat_prot(vma->vm_mm, addr, ptep, entry); + else + ptep_xchg_direct(vma->vm_mm, addr, ptep, entry); + return 1; +} + +/* + * Additional functions to handle KVM guest page tables + */ +void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t entry); +void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +void ptep_notify(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long bits); +int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr, + pte_t *ptep, int prot, unsigned long bit); +void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, + pte_t *ptep , int reset); +void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, + pte_t *sptep, pte_t *tptep, pte_t pte); +void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep); + +bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address, + pte_t *ptep); +int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, + unsigned char key, bool nq); +int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, + unsigned char key, unsigned char *oldkey, + bool nq, bool mr, bool mc); +int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr); +int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, + unsigned char *key); + +int set_pgste_bits(struct mm_struct *mm, unsigned long addr, + unsigned long bits, unsigned long value); +int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep); +int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, + unsigned long *oldpte, unsigned long *oldpgste); +void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr); +void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr); +void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr); +void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); + +#define pgprot_writecombine pgprot_writecombine +pgprot_t pgprot_writecombine(pgprot_t prot); + +#define pgprot_writethrough pgprot_writethrough +pgprot_t pgprot_writethrough(pgprot_t prot); + +/* + * Set multiple PTEs to consecutive pages with a single call. All PTEs + * are within the same folio, PMD and VMA. + */ +static inline void set_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t entry, unsigned int nr) +{ + if (pte_present(entry)) + entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED)); + if (mm_has_pgste(mm)) { + for (;;) { + ptep_set_pte_at(mm, addr, ptep, entry); + if (--nr == 0) + break; + ptep++; + entry = __pte(pte_val(entry) + PAGE_SIZE); + addr += PAGE_SIZE; + } + } else { + for (;;) { + set_pte(ptep, entry); + if (--nr == 0) + break; + ptep++; + entry = __pte(pte_val(entry) + PAGE_SIZE); + } + } +} +#define set_ptes set_ptes + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) +{ + pte_t __pte; + + __pte = __pte(physpage | pgprot_val(pgprot)); + if (!MACHINE_HAS_NX) + __pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC)); + return pte_mkyoung(__pte); +} + +static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) +{ + unsigned long physpage = page_to_phys(page); + pte_t __pte = mk_pte_phys(physpage, pgprot); + + if (pte_write(__pte) && PageDirty(page)) + __pte = pte_mkdirty(__pte); + return __pte; +} + +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) +#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1)) +#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) + +#define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN)) +#define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN)) + +static inline unsigned long pmd_deref(pmd_t pmd) +{ + unsigned long origin_mask; + + origin_mask = _SEGMENT_ENTRY_ORIGIN; + if (pmd_large(pmd)) + origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; + return (unsigned long)__va(pmd_val(pmd) & origin_mask); +} + +static inline unsigned long pmd_pfn(pmd_t pmd) +{ + return __pa(pmd_deref(pmd)) >> PAGE_SHIFT; +} + +static inline unsigned long pud_deref(pud_t pud) +{ + unsigned long origin_mask; + + origin_mask = _REGION_ENTRY_ORIGIN; + if (pud_large(pud)) + origin_mask = _REGION3_ENTRY_ORIGIN_LARGE; + return (unsigned long)__va(pud_val(pud) & origin_mask); +} + +static inline unsigned long pud_pfn(pud_t pud) +{ + return __pa(pud_deref(pud)) >> PAGE_SHIFT; +} + +/* + * The pgd_offset function *always* adds the index for the top-level + * region/segment table. This is done to get a sequence like the + * following to work: + * pgdp = pgd_offset(current->mm, addr); + * pgd = READ_ONCE(*pgdp); + * p4dp = p4d_offset(&pgd, addr); + * ... + * The subsequent p4d_offset, pud_offset and pmd_offset functions + * only add an index if they dereferenced the pointer. + */ +static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address) +{ + unsigned long rste; + unsigned int shift; + + /* Get the first entry of the top level table */ + rste = pgd_val(*pgd); + /* Pick up the shift from the table type of the first entry */ + shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20; + return pgd + ((address >> shift) & (PTRS_PER_PGD - 1)); +} + +#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address) + +static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address) +{ + if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1) + return (p4d_t *) pgd_deref(pgd) + p4d_index(address); + return (p4d_t *) pgdp; +} +#define p4d_offset_lockless p4d_offset_lockless + +static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address) +{ + return p4d_offset_lockless(pgdp, *pgdp, address); +} + +static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address) +{ + if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2) + return (pud_t *) p4d_deref(p4d) + pud_index(address); + return (pud_t *) p4dp; +} +#define pud_offset_lockless pud_offset_lockless + +static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address) +{ + return pud_offset_lockless(p4dp, *p4dp, address); +} +#define pud_offset pud_offset + +static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address) +{ + if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3) + return (pmd_t *) pud_deref(pud) + pmd_index(address); + return (pmd_t *) pudp; +} +#define pmd_offset_lockless pmd_offset_lockless + +static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address) +{ + return pmd_offset_lockless(pudp, *pudp, address); +} +#define pmd_offset pmd_offset + +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return (unsigned long) pmd_deref(pmd); +} + +static inline bool gup_fast_permitted(unsigned long start, unsigned long end) +{ + return end <= current->mm->context.asce_limit; +} +#define gup_fast_permitted gup_fast_permitted + +#define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot)) +#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) +#define pte_page(x) pfn_to_page(pte_pfn(x)) + +#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) +#define pud_page(pud) pfn_to_page(pud_pfn(pud)) +#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d)) +#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE)); + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); +} + +static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) +{ + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE)); + if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); + return pmd; +} + +static inline pmd_t pmd_mkclean(pmd_t pmd) +{ + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY)); + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY)); + if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); + return pmd; +} + +static inline pud_t pud_wrprotect(pud_t pud) +{ + pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE)); + return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); +} + +static inline pud_t pud_mkwrite(pud_t pud) +{ + pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE)); + if (pud_val(pud) & _REGION3_ENTRY_DIRTY) + pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); + return pud; +} + +static inline pud_t pud_mkclean(pud_t pud) +{ + pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY)); + return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); +} + +static inline pud_t pud_mkdirty(pud_t pud) +{ + pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY)); + if (pud_val(pud) & _REGION3_ENTRY_WRITE) + pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); + return pud; +} + +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) +static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) +{ + /* + * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX + * (see __Pxxx / __Sxxx). Convert to segment table entry format. + */ + if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) + return pgprot_val(SEGMENT_NONE); + if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) + return pgprot_val(SEGMENT_RO); + if (pgprot_val(pgprot) == pgprot_val(PAGE_RX)) + return pgprot_val(SEGMENT_RX); + if (pgprot_val(pgprot) == pgprot_val(PAGE_RW)) + return pgprot_val(SEGMENT_RW); + return pgprot_val(SEGMENT_RWX); +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); + if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); + return pmd; +} + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); +} + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + unsigned long mask; + + mask = _SEGMENT_ENTRY_ORIGIN_LARGE; + mask |= _SEGMENT_ENTRY_DIRTY; + mask |= _SEGMENT_ENTRY_YOUNG; + mask |= _SEGMENT_ENTRY_LARGE; + mask |= _SEGMENT_ENTRY_SOFT_DIRTY; + pmd = __pmd(pmd_val(pmd) & mask); + pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot))); + if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); + if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); + return pmd; +} + +static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) +{ + return __pmd(physpage + massage_pgprot_pmd(pgprot)); +} + +#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ + +static inline void __pmdp_csp(pmd_t *pmdp) +{ + csp((unsigned int *)pmdp + 1, pmd_val(*pmdp), + pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); +} + +#define IDTE_GLOBAL 0 +#define IDTE_LOCAL 1 + +#define IDTE_PTOA 0x0800 +#define IDTE_NODAT 0x1000 +#define IDTE_GUEST_ASCE 0x2000 + +static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp, + unsigned long opt, unsigned long asce, + int local) +{ + unsigned long sto; + + sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t); + if (__builtin_constant_p(opt) && opt == 0) { + /* flush without guest asce */ + asm volatile( + " idte %[r1],0,%[r2],%[m4]" + : "+m" (*pmdp) + : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)), + [m4] "i" (local) + : "cc" ); + } else { + /* flush with guest asce */ + asm volatile( + " idte %[r1],%[r3],%[r2],%[m4]" + : "+m" (*pmdp) + : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt), + [r3] "a" (asce), [m4] "i" (local) + : "cc" ); + } +} + +static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp, + unsigned long opt, unsigned long asce, + int local) +{ + unsigned long r3o; + + r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t); + r3o |= _ASCE_TYPE_REGION3; + if (__builtin_constant_p(opt) && opt == 0) { + /* flush without guest asce */ + asm volatile( + " idte %[r1],0,%[r2],%[m4]" + : "+m" (*pudp) + : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)), + [m4] "i" (local) + : "cc"); + } else { + /* flush with guest asce */ + asm volatile( + " idte %[r1],%[r3],%[r2],%[m4]" + : "+m" (*pudp) + : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt), + [r3] "a" (asce), [m4] "i" (local) + : "cc" ); + } +} + +pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t); +pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t); +pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t); + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + +#define __HAVE_ARCH_PGTABLE_DEPOSIT +void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); + +#define __HAVE_ARCH_PGTABLE_WITHDRAW +pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +static inline int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + VM_BUG_ON(addr & ~HPAGE_MASK); + + entry = pmd_mkyoung(entry); + if (dirty) + entry = pmd_mkdirty(entry); + if (pmd_val(*pmdp) == pmd_val(entry)) + return 0; + pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry); + return 1; +} + +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + + pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd)); + return pmd_young(pmd); +} + +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) +{ + VM_BUG_ON(addr & ~HPAGE_MASK); + return pmdp_test_and_clear_young(vma, addr, pmdp); +} + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t entry) +{ + if (!MACHINE_HAS_NX) + entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC)); + set_pmd(pmdp, entry); +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE)); + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); +} + +#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); +} + +#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL +static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, + unsigned long addr, + pmd_t *pmdp, int full) +{ + if (full) { + pmd_t pmd = *pmdp; + set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); + return pmd; + } + return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); +} + +#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH +static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) +{ + return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); +} + +#define __HAVE_ARCH_PMDP_INVALIDATE +static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) +{ + pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); + + return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd); +} + +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + + if (pmd_write(pmd)) + pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd)); +} + +static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); +} +#define pmdp_collapse_flush pmdp_collapse_flush + +#define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot)) +#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) + +static inline int pmd_trans_huge(pmd_t pmd) +{ + return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; +} + +#define has_transparent_hugepage has_transparent_hugepage +static inline int has_transparent_hugepage(void) +{ + return MACHINE_HAS_EDAT1 ? 1 : 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +/* + * 64 bit swap entry format: + * A page-table entry has some bits we have to treat in a special way. + * Bits 54 and 63 are used to indicate the page type. Bit 53 marks the pte + * as invalid. + * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200 + * | offset |E11XX|type |S0| + * |0000000000111111111122222222223333333333444444444455|55555|55566|66| + * |0123456789012345678901234567890123456789012345678901|23456|78901|23| + * + * Bits 0-51 store the offset. + * Bit 52 (E) is used to remember PG_anon_exclusive. + * Bits 57-61 store the type. + * Bit 62 (S) is used for softdirty tracking. + * Bits 55 and 56 (X) are unused. + */ + +#define __SWP_OFFSET_MASK ((1UL << 52) - 1) +#define __SWP_OFFSET_SHIFT 12 +#define __SWP_TYPE_MASK ((1UL << 5) - 1) +#define __SWP_TYPE_SHIFT 2 + +static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) +{ + unsigned long pteval; + + pteval = _PAGE_INVALID | _PAGE_PROTECT; + pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; + pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; + return __pte(pteval); +} + +static inline unsigned long __swp_type(swp_entry_t entry) +{ + return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK; +} + +static inline unsigned long __swp_offset(swp_entry_t entry) +{ + return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK; +} + +static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) +{ + return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) }; +} + +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +extern int vmem_add_mapping(unsigned long start, unsigned long size); +extern void vmem_remove_mapping(unsigned long start, unsigned long size); +extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc); +extern int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot); +extern void vmem_unmap_4k_page(unsigned long addr); +extern pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc); +extern int s390_enable_sie(void); +extern int s390_enable_skey(void); +extern void s390_reset_cmma(struct mm_struct *mm); + +/* s390 has a private copy of get unmapped area to deal with cache synonyms */ +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + +#define pmd_pgtable(pmd) \ + ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)) + +#endif /* _S390_PAGE_H */ diff --git a/arch/s390/include/asm/physmem_info.h b/arch/s390/include/asm/physmem_info.h new file mode 100644 index 000000000..9e41a74fc --- /dev/null +++ b/arch/s390/include/asm/physmem_info.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_MEM_DETECT_H +#define _ASM_S390_MEM_DETECT_H + +#include +#include + +enum physmem_info_source { + MEM_DETECT_NONE = 0, + MEM_DETECT_SCLP_STOR_INFO, + MEM_DETECT_DIAG260, + MEM_DETECT_SCLP_READ_INFO, + MEM_DETECT_BIN_SEARCH +}; + +struct physmem_range { + u64 start; + u64 end; +}; + +enum reserved_range_type { + RR_DECOMPRESSOR, + RR_INITRD, + RR_VMLINUX, + RR_AMODE31, + RR_IPLREPORT, + RR_CERT_COMP_LIST, + RR_MEM_DETECT_EXTENDED, + RR_VMEM, + RR_MAX +}; + +struct reserved_range { + unsigned long start; + unsigned long end; + struct reserved_range *chain; +}; + +/* + * Storage element id is defined as 1 byte (up to 256 storage elements). + * In practise only storage element id 0 and 1 are used). + * According to architecture one storage element could have as much as + * 1020 subincrements. 255 physmem_ranges are embedded in physmem_info. + * If more physmem_ranges are required, a block of memory from already + * known physmem_range is taken (online_extended points to it). + */ +#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */ + +struct physmem_info { + u32 range_count; + u8 info_source; + unsigned long usable; + struct reserved_range reserved[RR_MAX]; + struct physmem_range online[MEM_INLINED_ENTRIES]; + struct physmem_range *online_extended; +}; + +extern struct physmem_info physmem_info; + +void add_physmem_online_range(u64 start, u64 end); + +static inline int __get_physmem_range(u32 n, unsigned long *start, + unsigned long *end, bool respect_usable_limit) +{ + if (n >= physmem_info.range_count) { + *start = 0; + *end = 0; + return -1; + } + + if (n < MEM_INLINED_ENTRIES) { + *start = (unsigned long)physmem_info.online[n].start; + *end = (unsigned long)physmem_info.online[n].end; + } else { + *start = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].start; + *end = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].end; + } + + if (respect_usable_limit && physmem_info.usable) { + if (*start >= physmem_info.usable) + return -1; + if (*end > physmem_info.usable) + *end = physmem_info.usable; + } + return 0; +} + +/** + * for_each_physmem_usable_range - early online memory range iterator + * @i: an integer used as loop variable + * @p_start: ptr to unsigned long for start address of the range + * @p_end: ptr to unsigned long for end address of the range + * + * Walks over detected online memory ranges below usable limit. + */ +#define for_each_physmem_usable_range(i, p_start, p_end) \ + for (i = 0; !__get_physmem_range(i, p_start, p_end, true); i++) + +/* Walks over all detected online memory ranges disregarding usable limit. */ +#define for_each_physmem_online_range(i, p_start, p_end) \ + for (i = 0; !__get_physmem_range(i, p_start, p_end, false); i++) + +static inline const char *get_physmem_info_source(void) +{ + switch (physmem_info.info_source) { + case MEM_DETECT_SCLP_STOR_INFO: + return "sclp storage info"; + case MEM_DETECT_DIAG260: + return "diag260"; + case MEM_DETECT_SCLP_READ_INFO: + return "sclp read info"; + case MEM_DETECT_BIN_SEARCH: + return "binary search"; + } + return "none"; +} + +#define RR_TYPE_NAME(t) case RR_ ## t: return #t +static inline const char *get_rr_type_name(enum reserved_range_type t) +{ + switch (t) { + RR_TYPE_NAME(DECOMPRESSOR); + RR_TYPE_NAME(INITRD); + RR_TYPE_NAME(VMLINUX); + RR_TYPE_NAME(AMODE31); + RR_TYPE_NAME(IPLREPORT); + RR_TYPE_NAME(CERT_COMP_LIST); + RR_TYPE_NAME(MEM_DETECT_EXTENDED); + RR_TYPE_NAME(VMEM); + default: + return "UNKNOWN"; + } +} + +#define for_each_physmem_reserved_type_range(t, range, p_start, p_end) \ + for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end; \ + range && range->end; range = range->chain ? __va(range->chain) : NULL, \ + *p_start = range ? range->start : 0, *p_end = range ? range->end : 0) + +static inline struct reserved_range *__physmem_reserved_next(enum reserved_range_type *t, + struct reserved_range *range) +{ + if (!range) { + range = &physmem_info.reserved[*t]; + if (range->end) + return range; + } + if (range->chain) + return __va(range->chain); + while (++*t < RR_MAX) { + range = &physmem_info.reserved[*t]; + if (range->end) + return range; + } + return NULL; +} + +#define for_each_physmem_reserved_range(t, range, p_start, p_end) \ + for (t = 0, range = __physmem_reserved_next(&t, NULL), \ + *p_start = range ? range->start : 0, *p_end = range ? range->end : 0; \ + range; range = __physmem_reserved_next(&t, range), \ + *p_start = range ? range->start : 0, *p_end = range ? range->end : 0) + +static inline unsigned long get_physmem_reserved(enum reserved_range_type type, + unsigned long *addr, unsigned long *size) +{ + *addr = physmem_info.reserved[type].start; + *size = physmem_info.reserved[type].end - physmem_info.reserved[type].start; + return *size; +} + +#endif diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h new file mode 100644 index 000000000..47d80a745 --- /dev/null +++ b/arch/s390/include/asm/pkey.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernelspace interface to the pkey device driver + * + * Copyright IBM Corp. 2016, 2023 + * + * Author: Harald Freudenberger + * + */ + +#ifndef _KAPI_PKEY_H +#define _KAPI_PKEY_H + +#include +#include +#include + +/* + * In-kernel API: Transform an key blob (of any type) into a protected key. + * @param key pointer to a buffer containing the key blob + * @param keylen size of the key blob in bytes + * @param protkey pointer to buffer receiving the protected key + * @return 0 on success, negative errno value on failure + */ +int pkey_keyblob2pkey(const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype); + +#endif /* _KAPI_PKEY_H */ diff --git a/arch/s390/include/asm/pnet.h b/arch/s390/include/asm/pnet.h new file mode 100644 index 000000000..5739276b4 --- /dev/null +++ b/arch/s390/include/asm/pnet.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * IBM System z PNET ID Support + * + * Copyright IBM Corp. 2018 + */ + +#ifndef _ASM_S390_PNET_H +#define _ASM_S390_PNET_H + +#include +#include + +int pnet_id_by_dev_port(struct device *dev, unsigned short port, u8 *pnetid); +#endif /* _ASM_S390_PNET_H */ diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h new file mode 100644 index 000000000..bf15da0fe --- /dev/null +++ b/arch/s390/include/asm/preempt.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_PREEMPT_H +#define __ASM_PREEMPT_H + +#include +#include +#include + +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + +/* We use the MSB mostly because its available */ +#define PREEMPT_NEED_RESCHED 0x80000000 +#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) + +static inline int preempt_count(void) +{ + return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED; +} + +static inline void preempt_count_set(int pc) +{ + int old, new; + + do { + old = READ_ONCE(S390_lowcore.preempt_count); + new = (old & PREEMPT_NEED_RESCHED) | + (pc & ~PREEMPT_NEED_RESCHED); + } while (__atomic_cmpxchg(&S390_lowcore.preempt_count, + old, new) != old); +} + +static inline void set_preempt_need_resched(void) +{ + __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count); +} + +static inline void clear_preempt_need_resched(void) +{ + __atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count); +} + +static inline bool test_preempt_need_resched(void) +{ + return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED); +} + +static inline void __preempt_count_add(int val) +{ + /* + * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES + * enabled, gcc 12 fails to handle __builtin_constant_p(). + */ + if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) { + if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) { + __atomic_add_const(val, &S390_lowcore.preempt_count); + return; + } + } + __atomic_add(val, &S390_lowcore.preempt_count); +} + +static inline void __preempt_count_sub(int val) +{ + __preempt_count_add(-val); +} + +static inline bool __preempt_count_dec_and_test(void) +{ + return __atomic_add(-1, &S390_lowcore.preempt_count) == 1; +} + +static inline bool should_resched(int preempt_offset) +{ + return unlikely(READ_ONCE(S390_lowcore.preempt_count) == + preempt_offset); +} + +#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +#define PREEMPT_ENABLED (0) + +static inline int preempt_count(void) +{ + return READ_ONCE(S390_lowcore.preempt_count); +} + +static inline void preempt_count_set(int pc) +{ + S390_lowcore.preempt_count = pc; +} + +static inline void set_preempt_need_resched(void) +{ +} + +static inline void clear_preempt_need_resched(void) +{ +} + +static inline bool test_preempt_need_resched(void) +{ + return false; +} + +static inline void __preempt_count_add(int val) +{ + S390_lowcore.preempt_count += val; +} + +static inline void __preempt_count_sub(int val) +{ + S390_lowcore.preempt_count -= val; +} + +static inline bool __preempt_count_dec_and_test(void) +{ + return !--S390_lowcore.preempt_count && tif_need_resched(); +} + +static inline bool should_resched(int preempt_offset) +{ + return unlikely(preempt_count() == preempt_offset && + tif_need_resched()); +} + +#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +#define init_task_preempt_count(p) do { } while (0) +/* Deferred to CPU bringup time */ +#define init_idle_preempt_count(p, cpu) do { } while (0) + +#ifdef CONFIG_PREEMPTION +extern void preempt_schedule(void); +#define __preempt_schedule() preempt_schedule() +extern void preempt_schedule_notrace(void); +#define __preempt_schedule_notrace() preempt_schedule_notrace() +#endif /* CONFIG_PREEMPTION */ + +#endif /* __ASM_PREEMPT_H */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h new file mode 100644 index 000000000..dc17896a0 --- /dev/null +++ b/arch/s390/include/asm/processor.h @@ -0,0 +1,380 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999 + * Author(s): Hartmut Penner (hp@de.ibm.com), + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * Derived from "include/asm-i386/processor.h" + * Copyright (C) 1994, Linus Torvalds + */ + +#ifndef __ASM_S390_PROCESSOR_H +#define __ASM_S390_PROCESSOR_H + +#include + +#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ +#define CIF_FPU 3 /* restore FPU registers */ +#define CIF_ENABLED_WAIT 5 /* in enabled wait state */ +#define CIF_MCCK_GUEST 6 /* machine check happening in guest */ +#define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */ + +#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY) +#define _CIF_FPU BIT(CIF_FPU) +#define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT) +#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST) +#define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU) + +#define RESTART_FLAG_CTLREGS _AC(1 << 0, U) + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef long (*sys_call_ptr_t)(struct pt_regs *regs); + +static __always_inline void set_cpu_flag(int flag) +{ + S390_lowcore.cpu_flags |= (1UL << flag); +} + +static __always_inline void clear_cpu_flag(int flag) +{ + S390_lowcore.cpu_flags &= ~(1UL << flag); +} + +static __always_inline bool test_cpu_flag(int flag) +{ + return S390_lowcore.cpu_flags & (1UL << flag); +} + +static __always_inline bool test_and_set_cpu_flag(int flag) +{ + if (test_cpu_flag(flag)) + return true; + set_cpu_flag(flag); + return false; +} + +static __always_inline bool test_and_clear_cpu_flag(int flag) +{ + if (!test_cpu_flag(flag)) + return false; + clear_cpu_flag(flag); + return true; +} + +/* + * Test CIF flag of another CPU. The caller needs to ensure that + * CPU hotplug can not happen, e.g. by disabling preemption. + */ +static __always_inline bool test_cpu_flag_of(int flag, int cpu) +{ + struct lowcore *lc = lowcore_ptr[cpu]; + + return lc->cpu_flags & (1UL << flag); +} + +#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY) + +static inline void get_cpu_id(struct cpuid *ptr) +{ + asm volatile("stidp %0" : "=Q" (*ptr)); +} + +void s390_adjust_jiffies(void); +void s390_update_cpu_mhz(void); +void cpu_detect_mhz_feature(void); + +extern const struct seq_operations cpuinfo_op; +extern void execve_tail(void); +unsigned long vdso_size(void); + +/* + * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. + */ + +#define TASK_SIZE (test_thread_flag(TIF_31BIT) ? \ + _REGION3_SIZE : TASK_SIZE_MAX) +#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ + (_REGION3_SIZE >> 1) : (_REGION2_SIZE >> 1)) +#define TASK_SIZE_MAX (-PAGE_SIZE) + +#define VDSO_BASE (STACK_TOP + PAGE_SIZE) +#define VDSO_LIMIT (test_thread_flag(TIF_31BIT) ? _REGION3_SIZE : _REGION2_SIZE) +#define STACK_TOP (VDSO_LIMIT - vdso_size() - PAGE_SIZE) +#define STACK_TOP_MAX (_REGION2_SIZE - vdso_size() - PAGE_SIZE) + +#define HAVE_ARCH_PICK_MMAP_LAYOUT + +#define __stackleak_poison __stackleak_poison +static __always_inline void __stackleak_poison(unsigned long erase_low, + unsigned long erase_high, + unsigned long poison) +{ + unsigned long tmp, count; + + count = erase_high - erase_low; + if (!count) + return; + asm volatile( + " cghi %[count],8\n" + " je 2f\n" + " aghi %[count],-(8+1)\n" + " srlg %[tmp],%[count],8\n" + " ltgr %[tmp],%[tmp]\n" + " jz 1f\n" + "0: stg %[poison],0(%[addr])\n" + " mvc 8(256-8,%[addr]),0(%[addr])\n" + " la %[addr],256(%[addr])\n" + " brctg %[tmp],0b\n" + "1: stg %[poison],0(%[addr])\n" + " larl %[tmp],3f\n" + " ex %[count],0(%[tmp])\n" + " j 4f\n" + "2: stg %[poison],0(%[addr])\n" + " j 4f\n" + "3: mvc 8(1,%[addr]),0(%[addr])\n" + "4:\n" + : [addr] "+&a" (erase_low), [count] "+&d" (count), [tmp] "=&a" (tmp) + : [poison] "d" (poison) + : "memory", "cc" + ); +} + +/* + * Thread structure + */ +struct thread_struct { + unsigned int acrs[NUM_ACRS]; + unsigned long ksp; /* kernel stack pointer */ + unsigned long user_timer; /* task cputime in user space */ + unsigned long guest_timer; /* task cputime in kvm guest */ + unsigned long system_timer; /* task cputime in kernel space */ + unsigned long hardirq_timer; /* task cputime in hardirq context */ + unsigned long softirq_timer; /* task cputime in softirq context */ + const sys_call_ptr_t *sys_call_table; /* system call table address */ + unsigned long gmap_addr; /* address of last gmap fault. */ + unsigned int gmap_write_flag; /* gmap fault write indication */ + unsigned int gmap_int_code; /* int code of last gmap fault */ + unsigned int gmap_pfault; /* signal of a pending guest pfault */ + + /* Per-thread information related to debugging */ + struct per_regs per_user; /* User specified PER registers */ + struct per_event per_event; /* Cause of the last PER trap */ + unsigned long per_flags; /* Flags to control debug behavior */ + unsigned int system_call; /* system call number in signal */ + unsigned long last_break; /* last breaking-event-address. */ + /* pfault_wait is used to block the process on a pfault event */ + unsigned long pfault_wait; + struct list_head list; + /* cpu runtime instrumentation */ + struct runtime_instr_cb *ri_cb; + struct gs_cb *gs_cb; /* Current guarded storage cb */ + struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */ + struct pgm_tdb trap_tdb; /* Transaction abort diagnose block */ + /* + * Warning: 'fpu' is dynamically-sized. It *MUST* be at + * the end. + */ + struct fpu fpu; /* FP and VX register save area */ +}; + +/* Flag to disable transactions. */ +#define PER_FLAG_NO_TE 1UL +/* Flag to enable random transaction aborts. */ +#define PER_FLAG_TE_ABORT_RAND 2UL +/* Flag to specify random transaction abort mode: + * - abort each transaction at a random instruction before TEND if set. + * - abort random transactions at a random instruction if cleared. + */ +#define PER_FLAG_TE_ABORT_RAND_TEND 4UL + +typedef struct thread_struct thread_struct; + +#define ARCH_MIN_TASKALIGN 8 + +#define INIT_THREAD { \ + .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ + .fpu.regs = (void *) init_task.thread.fpu.fprs, \ + .last_break = 1, \ +} + +/* + * Do necessary setup to start up a new thread. + */ +#define start_thread(regs, new_psw, new_stackp) do { \ + regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \ + regs->psw.addr = new_psw; \ + regs->gprs[15] = new_stackp; \ + execve_tail(); \ +} while (0) + +#define start_thread31(regs, new_psw, new_stackp) do { \ + regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ + regs->psw.addr = new_psw; \ + regs->gprs[15] = new_stackp; \ + execve_tail(); \ +} while (0) + +/* Forward declaration, a strange C thing */ +struct task_struct; +struct mm_struct; +struct seq_file; +struct pt_regs; + +void show_registers(struct pt_regs *regs); +void show_cacheinfo(struct seq_file *m); + +/* Free guarded storage control block */ +void guarded_storage_release(struct task_struct *tsk); +void gs_load_bc_cb(struct pt_regs *regs); + +unsigned long __get_wchan(struct task_struct *p); +#define task_pt_regs(tsk) ((struct pt_regs *) \ + (task_stack_page(tsk) + THREAD_SIZE) - 1) +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15]) + +/* Has task runtime instrumentation enabled ? */ +#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb) + +/* avoid using global register due to gcc bug in versions < 8.4 */ +#define current_stack_pointer (__current_stack_pointer()) + +static __always_inline unsigned long __current_stack_pointer(void) +{ + unsigned long sp; + + asm volatile("lgr %0,15" : "=d" (sp)); + return sp; +} + +static __always_inline bool on_thread_stack(void) +{ + unsigned long ksp = S390_lowcore.kernel_stack; + + return !((ksp ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); +} + +static __always_inline unsigned short stap(void) +{ + unsigned short cpu_address; + + asm volatile("stap %0" : "=Q" (cpu_address)); + return cpu_address; +} + +#define cpu_relax() barrier() + +#define ECAG_CACHE_ATTRIBUTE 0 +#define ECAG_CPU_ATTRIBUTE 1 + +static inline unsigned long __ecag(unsigned int asi, unsigned char parm) +{ + unsigned long val; + + asm volatile("ecag %0,0,0(%1)" : "=d" (val) : "a" (asi << 8 | parm)); + return val; +} + +static inline void psw_set_key(unsigned int key) +{ + asm volatile("spka 0(%0)" : : "d" (key)); +} + +/* + * Set PSW to specified value. + */ +static inline void __load_psw(psw_t psw) +{ + asm volatile("lpswe %0" : : "Q" (psw) : "cc"); +} + +/* + * Set PSW mask to specified value, while leaving the + * PSW addr pointing to the next instruction. + */ +static __always_inline void __load_psw_mask(unsigned long mask) +{ + unsigned long addr; + psw_t psw; + + psw.mask = mask; + + asm volatile( + " larl %0,1f\n" + " stg %0,%1\n" + " lpswe %2\n" + "1:" + : "=&d" (addr), "=Q" (psw.addr) : "Q" (psw) : "memory", "cc"); +} + +/* + * Extract current PSW mask + */ +static inline unsigned long __extract_psw(void) +{ + unsigned int reg1, reg2; + + asm volatile("epsw %0,%1" : "=d" (reg1), "=a" (reg2)); + return (((unsigned long) reg1) << 32) | ((unsigned long) reg2); +} + +static inline void local_mcck_enable(void) +{ + __load_psw_mask(__extract_psw() | PSW_MASK_MCHECK); +} + +static inline void local_mcck_disable(void) +{ + __load_psw_mask(__extract_psw() & ~PSW_MASK_MCHECK); +} + +/* + * Rewind PSW instruction address by specified number of bytes. + */ +static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) +{ + unsigned long mask; + + mask = (psw.mask & PSW_MASK_EA) ? -1UL : + (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 : + (1UL << 24) - 1; + return (psw.addr - ilc) & mask; +} + +/* + * Function to drop a processor into disabled wait state + */ +static __always_inline void __noreturn disabled_wait(void) +{ + psw_t psw; + + psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA; + psw.addr = _THIS_IP_; + __load_psw(psw); + while (1); +} + +#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL + +static __always_inline bool regs_irqs_disabled(struct pt_regs *regs) +{ + return arch_irqs_disabled_flags(regs->psw.mask); +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_S390_PROCESSOR_H */ diff --git a/arch/s390/include/asm/ptdump.h b/arch/s390/include/asm/ptdump.h new file mode 100644 index 000000000..f960b2896 --- /dev/null +++ b/arch/s390/include/asm/ptdump.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_S390_PTDUMP_H +#define _ASM_S390_PTDUMP_H + +void ptdump_check_wx(void); + +static inline void debug_checkwx(void) +{ + if (IS_ENABLED(CONFIG_DEBUG_WX)) + ptdump_check_wx(); +} + +#endif /* _ASM_S390_PTDUMP_H */ diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h new file mode 100644 index 000000000..d28bf8fb2 --- /dev/null +++ b/arch/s390/include/asm/ptrace.h @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999, 2000 + * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + */ +#ifndef _S390_PTRACE_H +#define _S390_PTRACE_H + +#include +#include +#include + +#define PIF_SYSCALL 0 /* inside a system call */ +#define PIF_EXECVE_PGSTE_RESTART 1 /* restart execve for PGSTE binaries */ +#define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */ +#define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */ +#define PIF_FTRACE_FULL_REGS 4 /* all register contents valid (ftrace) */ + +#define _PIF_SYSCALL BIT(PIF_SYSCALL) +#define _PIF_EXECVE_PGSTE_RESTART BIT(PIF_EXECVE_PGSTE_RESTART) +#define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET) +#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT) +#define _PIF_FTRACE_FULL_REGS BIT(PIF_FTRACE_FULL_REGS) + +#define PSW32_MASK_PER _AC(0x40000000, UL) +#define PSW32_MASK_DAT _AC(0x04000000, UL) +#define PSW32_MASK_IO _AC(0x02000000, UL) +#define PSW32_MASK_EXT _AC(0x01000000, UL) +#define PSW32_MASK_KEY _AC(0x00F00000, UL) +#define PSW32_MASK_BASE _AC(0x00080000, UL) /* Always one */ +#define PSW32_MASK_MCHECK _AC(0x00040000, UL) +#define PSW32_MASK_WAIT _AC(0x00020000, UL) +#define PSW32_MASK_PSTATE _AC(0x00010000, UL) +#define PSW32_MASK_ASC _AC(0x0000C000, UL) +#define PSW32_MASK_CC _AC(0x00003000, UL) +#define PSW32_MASK_PM _AC(0x00000f00, UL) +#define PSW32_MASK_RI _AC(0x00000080, UL) + +#define PSW32_ADDR_AMODE _AC(0x80000000, UL) +#define PSW32_ADDR_INSN _AC(0x7FFFFFFF, UL) + +#define PSW32_DEFAULT_KEY ((PAGE_DEFAULT_ACC) << 20) + +#define PSW32_ASC_PRIMARY _AC(0x00000000, UL) +#define PSW32_ASC_ACCREG _AC(0x00004000, UL) +#define PSW32_ASC_SECONDARY _AC(0x00008000, UL) +#define PSW32_ASC_HOME _AC(0x0000C000, UL) + +#define PSW_DEFAULT_KEY ((PAGE_DEFAULT_ACC) << 52) + +#define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \ + PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_DAT) +#define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \ + PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \ + PSW_MASK_PSTATE | PSW_ASC_PRIMARY) + +#ifndef __ASSEMBLY__ + +struct psw_bits { + unsigned long : 1; + unsigned long per : 1; /* PER-Mask */ + unsigned long : 3; + unsigned long dat : 1; /* DAT Mode */ + unsigned long io : 1; /* Input/Output Mask */ + unsigned long ext : 1; /* External Mask */ + unsigned long key : 4; /* PSW Key */ + unsigned long : 1; + unsigned long mcheck : 1; /* Machine-Check Mask */ + unsigned long wait : 1; /* Wait State */ + unsigned long pstate : 1; /* Problem State */ + unsigned long as : 2; /* Address Space Control */ + unsigned long cc : 2; /* Condition Code */ + unsigned long pm : 4; /* Program Mask */ + unsigned long ri : 1; /* Runtime Instrumentation */ + unsigned long : 6; + unsigned long eaba : 2; /* Addressing Mode */ + unsigned long : 31; + unsigned long ia : 64; /* Instruction Address */ +}; + +enum { + PSW_BITS_AMODE_24BIT = 0, + PSW_BITS_AMODE_31BIT = 1, + PSW_BITS_AMODE_64BIT = 3 +}; + +enum { + PSW_BITS_AS_PRIMARY = 0, + PSW_BITS_AS_ACCREG = 1, + PSW_BITS_AS_SECONDARY = 2, + PSW_BITS_AS_HOME = 3 +}; + +#define psw_bits(__psw) (*({ \ + typecheck(psw_t, __psw); \ + &(*(struct psw_bits *)(&(__psw))); \ +})) + +typedef struct { + unsigned int mask; + unsigned int addr; +} psw_t32 __aligned(8); + +#define PGM_INT_CODE_MASK 0x7f +#define PGM_INT_CODE_PER 0x80 + +/* + * The pt_regs struct defines the way the registers are stored on + * the stack during a system call. + */ +struct pt_regs { + union { + user_pt_regs user_regs; + struct { + unsigned long args[1]; + psw_t psw; + unsigned long gprs[NUM_GPRS]; + }; + }; + unsigned long orig_gpr2; + union { + struct { + unsigned int int_code; + unsigned int int_parm; + unsigned long int_parm_long; + }; + struct tpi_info tpi_info; + }; + unsigned long flags; + unsigned long cr1; + unsigned long last_break; +}; + +/* + * Program event recording (PER) register set. + */ +struct per_regs { + unsigned long control; /* PER control bits */ + unsigned long start; /* PER starting address */ + unsigned long end; /* PER ending address */ +}; + +/* + * PER event contains information about the cause of the last PER exception. + */ +struct per_event { + unsigned short cause; /* PER code, ATMID and AI */ + unsigned long address; /* PER address */ + unsigned char paid; /* PER access identification */ +}; + +/* + * Simplified per_info structure used to decode the ptrace user space ABI. + */ +struct per_struct_kernel { + unsigned long cr9; /* PER control bits */ + unsigned long cr10; /* PER starting address */ + unsigned long cr11; /* PER ending address */ + unsigned long bits; /* Obsolete software bits */ + unsigned long starting_addr; /* User specified start address */ + unsigned long ending_addr; /* User specified end address */ + unsigned short perc_atmid; /* PER trap ATMID */ + unsigned long address; /* PER trap instruction address */ + unsigned char access_id; /* PER trap access identification */ +}; + +#define PER_EVENT_MASK 0xEB000000UL + +#define PER_EVENT_BRANCH 0x80000000UL +#define PER_EVENT_IFETCH 0x40000000UL +#define PER_EVENT_STORE 0x20000000UL +#define PER_EVENT_STORE_REAL 0x08000000UL +#define PER_EVENT_TRANSACTION_END 0x02000000UL +#define PER_EVENT_NULLIFICATION 0x01000000UL + +#define PER_CONTROL_MASK 0x00e00000UL + +#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL +#define PER_CONTROL_SUSPENSION 0x00400000UL +#define PER_CONTROL_ALTERATION 0x00200000UL + +static inline void set_pt_regs_flag(struct pt_regs *regs, int flag) +{ + regs->flags |= (1UL << flag); +} + +static inline void clear_pt_regs_flag(struct pt_regs *regs, int flag) +{ + regs->flags &= ~(1UL << flag); +} + +static inline int test_pt_regs_flag(struct pt_regs *regs, int flag) +{ + return !!(regs->flags & (1UL << flag)); +} + +static inline int test_and_clear_pt_regs_flag(struct pt_regs *regs, int flag) +{ + int ret = test_pt_regs_flag(regs, flag); + + clear_pt_regs_flag(regs, flag); + return ret; +} + +/* + * These are defined as per linux/ptrace.h, which see. + */ +#define arch_has_single_step() (1) +#define arch_has_block_step() (1) + +#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) +#define instruction_pointer(regs) ((regs)->psw.addr) +#define user_stack_pointer(regs)((regs)->gprs[15]) +#define profile_pc(regs) instruction_pointer(regs) + +static inline long regs_return_value(struct pt_regs *regs) +{ + return regs->gprs[2]; +} + +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->psw.addr = val; +} + +int regs_query_register_offset(const char *name); +const char *regs_query_register_name(unsigned int offset); +unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset); +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n); + +/** + * regs_get_kernel_argument() - get Nth function argument in kernel + * @regs: pt_regs of that context + * @n: function argument number (start from 0) + * + * regs_get_kernel_argument() returns @n th argument of the function call. + */ +static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, + unsigned int n) +{ + unsigned int argoffset = STACK_FRAME_OVERHEAD / sizeof(long); + +#define NR_REG_ARGUMENTS 5 + if (n < NR_REG_ARGUMENTS) + return regs_get_register(regs, 2 + n); + n -= NR_REG_ARGUMENTS; + return regs_get_kernel_stack_nth(regs, argoffset + n); +} + +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->gprs[15]; +} + +static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) +{ + regs->gprs[2] = rc; +} + +#endif /* __ASSEMBLY__ */ +#endif /* _S390_PTRACE_H */ diff --git a/arch/s390/include/asm/purgatory.h b/arch/s390/include/asm/purgatory.h new file mode 100644 index 000000000..e297bcfc4 --- /dev/null +++ b/arch/s390/include/asm/purgatory.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2018 + * + * Author(s): Philipp Rudo + */ + +#ifndef _S390_PURGATORY_H_ +#define _S390_PURGATORY_H_ +#ifndef __ASSEMBLY__ + +#include + +int verify_sha256_digest(void); + +#endif /* __ASSEMBLY__ */ +#endif /* _S390_PURGATORY_H_ */ diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h new file mode 100644 index 000000000..2f983e0b9 --- /dev/null +++ b/arch/s390/include/asm/qdio.h @@ -0,0 +1,364 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2000, 2008 + * Author(s): Utz Bacher + * Jan Glauber + * + */ +#ifndef __QDIO_H__ +#define __QDIO_H__ + +#include +#include +#include + +/* only use 4 queues to save some cachelines */ +#define QDIO_MAX_QUEUES_PER_IRQ 4 +#define QDIO_MAX_BUFFERS_PER_Q 128 +#define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1) +#define QDIO_BUFNR(num) ((num) & QDIO_MAX_BUFFERS_MASK) +#define QDIO_MAX_ELEMENTS_PER_BUFFER 16 + +#define QDIO_QETH_QFMT 0 +#define QDIO_ZFCP_QFMT 1 +#define QDIO_IQDIO_QFMT 2 + +/** + * struct qdesfmt0 - queue descriptor, format 0 + * @sliba: absolute address of storage list information block + * @sla: absolute address of storage list + * @slsba: absolute address of storage list state block + * @akey: access key for SLIB + * @bkey: access key for SL + * @ckey: access key for SBALs + * @dkey: access key for SLSB + */ +struct qdesfmt0 { + u64 sliba; + u64 sla; + u64 slsba; + u32 : 32; + u32 akey : 4; + u32 bkey : 4; + u32 ckey : 4; + u32 dkey : 4; + u32 : 16; +} __attribute__ ((packed)); + +#define QDR_AC_MULTI_BUFFER_ENABLE 0x01 + +/** + * struct qdr - queue description record (QDR) + * @qfmt: queue format + * @ac: adapter characteristics + * @iqdcnt: input queue descriptor count + * @oqdcnt: output queue descriptor count + * @iqdsz: input queue descriptor size + * @oqdsz: output queue descriptor size + * @qiba: absolute address of queue information block + * @qkey: queue information block key + * @qdf0: queue descriptions + */ +struct qdr { + u32 qfmt : 8; + u32 : 16; + u32 ac : 8; + u32 : 8; + u32 iqdcnt : 8; + u32 : 8; + u32 oqdcnt : 8; + u32 : 8; + u32 iqdsz : 8; + u32 : 8; + u32 oqdsz : 8; + /* private: */ + u32 res[9]; + /* public: */ + u64 qiba; + u32 : 32; + u32 qkey : 4; + u32 : 28; + struct qdesfmt0 qdf0[126]; +} __packed __aligned(PAGE_SIZE); + +#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40 +#define QIB_RFLAGS_ENABLE_QEBSM 0x80 +#define QIB_RFLAGS_ENABLE_DATA_DIV 0x02 + +/** + * struct qib - queue information block (QIB) + * @qfmt: queue format + * @pfmt: implementation dependent parameter format + * @rflags: QEBSM + * @ac: adapter characteristics + * @isliba: logical address of first input SLIB + * @osliba: logical address of first output SLIB + * @ebcnam: adapter identifier in EBCDIC + * @parm: implementation dependent parameters + */ +struct qib { + u32 qfmt : 8; + u32 pfmt : 8; + u32 rflags : 8; + u32 ac : 8; + u32 : 32; + u64 isliba; + u64 osliba; + u32 : 32; + u32 : 32; + u8 ebcnam[8]; + /* private: */ + u8 res[88]; + /* public: */ + u8 parm[128]; +} __attribute__ ((packed, aligned(256))); + +/** + * struct slibe - storage list information block element (SLIBE) + * @parms: implementation dependent parameters + */ +struct slibe { + u64 parms; +}; + +/** + * struct qaob - queue asynchronous operation block + * @res0: reserved parameters + * @res1: reserved parameter + * @res2: reserved parameter + * @res3: reserved parameter + * @aorc: asynchronous operation return code + * @flags: internal flags + * @cbtbs: control block type + * @sb_count: number of storage blocks + * @sba: storage block element addresses + * @dcount: size of storage block elements + * @user0: user definable value + * @res4: reserved parameter + * @user1: user definable value + */ +struct qaob { + u64 res0[6]; + u8 res1; + u8 res2; + u8 res3; + u8 aorc; + u8 flags; + u16 cbtbs; + u8 sb_count; + u64 sba[QDIO_MAX_ELEMENTS_PER_BUFFER]; + u16 dcount[QDIO_MAX_ELEMENTS_PER_BUFFER]; + u64 user0; + u64 res4[2]; + u8 user1[16]; +} __attribute__ ((packed, aligned(256))); + +/** + * struct slib - storage list information block (SLIB) + * @nsliba: next SLIB address (if any) + * @sla: SL address + * @slsba: SLSB address + * @slibe: SLIB elements + */ +struct slib { + u64 nsliba; + u64 sla; + u64 slsba; + /* private: */ + u8 res[1000]; + /* public: */ + struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q]; +} __attribute__ ((packed, aligned(2048))); + +#define SBAL_EFLAGS_LAST_ENTRY 0x40 +#define SBAL_EFLAGS_CONTIGUOUS 0x20 +#define SBAL_EFLAGS_FIRST_FRAG 0x04 +#define SBAL_EFLAGS_MIDDLE_FRAG 0x08 +#define SBAL_EFLAGS_LAST_FRAG 0x0c +#define SBAL_EFLAGS_MASK 0x6f + +#define SBAL_SFLAGS0_PCI_REQ 0x40 +#define SBAL_SFLAGS0_DATA_CONTINUATION 0x20 + +/* Awesome OpenFCP extensions */ +#define SBAL_SFLAGS0_TYPE_STATUS 0x00 +#define SBAL_SFLAGS0_TYPE_WRITE 0x08 +#define SBAL_SFLAGS0_TYPE_READ 0x10 +#define SBAL_SFLAGS0_TYPE_WRITE_READ 0x18 +#define SBAL_SFLAGS0_MORE_SBALS 0x04 +#define SBAL_SFLAGS0_COMMAND 0x02 +#define SBAL_SFLAGS0_LAST_SBAL 0x00 +#define SBAL_SFLAGS0_ONLY_SBAL SBAL_SFLAGS0_COMMAND +#define SBAL_SFLAGS0_MIDDLE_SBAL SBAL_SFLAGS0_MORE_SBALS +#define SBAL_SFLAGS0_FIRST_SBAL (SBAL_SFLAGS0_MORE_SBALS | SBAL_SFLAGS0_COMMAND) + +/** + * struct qdio_buffer_element - SBAL entry + * @eflags: SBAL entry flags + * @scount: SBAL count + * @sflags: whole SBAL flags + * @length: length + * @addr: absolute data address +*/ +struct qdio_buffer_element { + u8 eflags; + /* private: */ + u8 res1; + /* public: */ + u8 scount; + u8 sflags; + u32 length; + u64 addr; +} __attribute__ ((packed, aligned(16))); + +/** + * struct qdio_buffer - storage block address list (SBAL) + * @element: SBAL entries + */ +struct qdio_buffer { + struct qdio_buffer_element element[QDIO_MAX_ELEMENTS_PER_BUFFER]; +} __attribute__ ((packed, aligned(256))); + +/** + * struct sl_element - storage list entry + * @sbal: absolute SBAL address + */ +struct sl_element { + u64 sbal; +} __attribute__ ((packed)); + +/** + * struct sl - storage list (SL) + * @element: SL entries + */ +struct sl { + struct sl_element element[QDIO_MAX_BUFFERS_PER_Q]; +} __attribute__ ((packed, aligned(1024))); + +/** + * struct slsb - storage list state block (SLSB) + * @val: state per buffer + */ +struct slsb { + u8 val[QDIO_MAX_BUFFERS_PER_Q]; +} __attribute__ ((packed, aligned(256))); + +/* qdio adapter-characteristics-1 flag */ +#define CHSC_AC1_INITIATE_INPUTQ 0x80 +#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */ +#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */ +#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */ +#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */ +#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */ +#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */ +#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */ + +#define CHSC_AC2_MULTI_BUFFER_AVAILABLE 0x0080 +#define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040 +#define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010 +#define CHSC_AC2_SNIFFER_AVAILABLE 0x0008 +#define CHSC_AC2_DATA_DIV_ENABLED 0x0002 + +#define CHSC_AC3_FORMAT2_CQ_AVAILABLE 0x8000 + +struct qdio_ssqd_desc { + u8 flags; + u8:8; + u16 sch; + u8 qfmt; + u8 parm; + u8 qdioac1; + u8 sch_class; + u8 pcnt; + u8 icnt; + u8:8; + u8 ocnt; + u8:8; + u8 mbccnt; + u16 qdioac2; + u64 sch_token; + u8 mro; + u8 mri; + u16 qdioac3; + u16:16; + u8:8; + u8 mmwc; +} __attribute__ ((packed)); + +/* params are: ccw_device, qdio_error, queue_number, + first element processed, number of elements processed, int_parm */ +typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, + int, int, unsigned long); + +/* qdio errors reported through the queue handlers: */ +#define QDIO_ERROR_ACTIVATE 0x0001 +#define QDIO_ERROR_GET_BUF_STATE 0x0002 +#define QDIO_ERROR_SET_BUF_STATE 0x0004 + +/* extra info for completed SBALs: */ +#define QDIO_ERROR_SLSB_STATE 0x0100 +#define QDIO_ERROR_SLSB_PENDING 0x0200 + +/* for qdio_cleanup */ +#define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01 +#define QDIO_FLAG_CLEANUP_USING_HALT 0x02 + +/** + * struct qdio_initialize - qdio initialization data + * @q_format: queue format + * @qdr_ac: feature flags to set + * @qib_param_field_format: format for qib_parm_field + * @qib_param_field: pointer to 128 bytes or NULL, if no param field + * @qib_rflags: rflags to set + * @no_input_qs: number of input queues + * @no_output_qs: number of output queues + * @input_handler: handler to be called for input queues, and device-wide errors + * @output_handler: handler to be called for output queues + * @irq_poll: Data IRQ polling handler + * @scan_threshold: # of in-use buffers that triggers scan on output queue + * @int_parm: interruption parameter + * @input_sbal_addr_array: per-queue array, each element points to 128 SBALs + * @output_sbal_addr_array: per-queue array, each element points to 128 SBALs + */ +struct qdio_initialize { + unsigned char q_format; + unsigned char qdr_ac; + unsigned int qib_param_field_format; + unsigned char *qib_param_field; + unsigned char qib_rflags; + unsigned int no_input_qs; + unsigned int no_output_qs; + qdio_handler_t *input_handler; + qdio_handler_t *output_handler; + void (*irq_poll)(struct ccw_device *cdev, unsigned long data); + unsigned long int_parm; + struct qdio_buffer ***input_sbal_addr_array; + struct qdio_buffer ***output_sbal_addr_array; +}; + +int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count); +void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count); +void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count); + +extern int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs, + unsigned int no_output_qs); +extern int qdio_establish(struct ccw_device *cdev, + struct qdio_initialize *init_data); +extern int qdio_activate(struct ccw_device *); +extern int qdio_start_irq(struct ccw_device *cdev); +extern int qdio_stop_irq(struct ccw_device *cdev); +extern int qdio_inspect_input_queue(struct ccw_device *cdev, unsigned int nr, + unsigned int *bufnr, unsigned int *error); +extern int qdio_inspect_output_queue(struct ccw_device *cdev, unsigned int nr, + unsigned int *bufnr, unsigned int *error); +extern int qdio_add_bufs_to_input_queue(struct ccw_device *cdev, + unsigned int q_nr, unsigned int bufnr, + unsigned int count); +extern int qdio_add_bufs_to_output_queue(struct ccw_device *cdev, + unsigned int q_nr, unsigned int bufnr, + unsigned int count, struct qaob *aob); +extern int qdio_shutdown(struct ccw_device *, int); +extern int qdio_free(struct ccw_device *); +extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *); + +#endif /* __QDIO_H__ */ diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h new file mode 100644 index 000000000..0e1605538 --- /dev/null +++ b/arch/s390/include/asm/runtime_instr.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _RUNTIME_INSTR_H +#define _RUNTIME_INSTR_H + +#include + +extern struct runtime_instr_cb runtime_instr_empty_cb; + +static inline void save_ri_cb(struct runtime_instr_cb *cb_prev) +{ + if (cb_prev) + store_runtime_instr_cb(cb_prev); +} + +static inline void restore_ri_cb(struct runtime_instr_cb *cb_next, + struct runtime_instr_cb *cb_prev) +{ + if (cb_next) + load_runtime_instr_cb(cb_next); + else if (cb_prev) + load_runtime_instr_cb(&runtime_instr_empty_cb); +} + +struct task_struct; + +void runtime_instr_release(struct task_struct *tsk); + +#endif /* _RUNTIME_INSTR_H */ diff --git a/arch/s390/include/asm/rwonce.h b/arch/s390/include/asm/rwonce.h new file mode 100644 index 000000000..91fc24520 --- /dev/null +++ b/arch/s390/include/asm/rwonce.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_S390_RWONCE_H +#define __ASM_S390_RWONCE_H + +#include + +/* + * Use READ_ONCE_ALIGNED_128() for 128-bit block concurrent (atomic) read + * accesses. Note that x must be 128-bit aligned, otherwise a specification + * exception is generated. + */ +#define READ_ONCE_ALIGNED_128(x) \ +({ \ + union { \ + typeof(x) __x; \ + __uint128_t val; \ + } __u; \ + \ + BUILD_BUG_ON(sizeof(x) != 16); \ + asm volatile( \ + " lpq %[val],%[_x]\n" \ + : [val] "=d" (__u.val) \ + : [_x] "QS" (x) \ + : "memory"); \ + __u.__x; \ +}) + +#include + +#endif /* __ASM_S390_RWONCE_H */ diff --git a/arch/s390/include/asm/schid.h b/arch/s390/include/asm/schid.h new file mode 100644 index 000000000..3ac405a67 --- /dev/null +++ b/arch/s390/include/asm/schid.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ASM_SCHID_H +#define ASM_SCHID_H + +#include +#include + +/* Helper function for sane state of pre-allocated subchannel_id. */ +static inline void +init_subchannel_id(struct subchannel_id *schid) +{ + memset(schid, 0, sizeof(struct subchannel_id)); + schid->one = 1; +} + +static inline int +schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2) +{ + return !memcmp(schid1, schid2, sizeof(struct subchannel_id)); +} + +#endif /* ASM_SCHID_H */ diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h new file mode 100644 index 000000000..5742d23bb --- /dev/null +++ b/arch/s390/include/asm/sclp.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2007 + */ + +#ifndef _ASM_S390_SCLP_H +#define _ASM_S390_SCLP_H + +#include + +#define SCLP_CHP_INFO_MASK_SIZE 32 +#define EARLY_SCCB_SIZE PAGE_SIZE +#define SCLP_MAX_CORES 512 +/* 144 + 16 * SCLP_MAX_CORES + 2 * (SCLP_MAX_CORES - 1) */ +#define EXT_SCCB_READ_SCP (3 * PAGE_SIZE) +/* 24 + 16 * SCLP_MAX_CORES */ +#define EXT_SCCB_READ_CPU (3 * PAGE_SIZE) + +#ifndef __ASSEMBLY__ +#include +#include +#include + +struct sclp_chp_info { + u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; + u8 standby[SCLP_CHP_INFO_MASK_SIZE]; + u8 configured[SCLP_CHP_INFO_MASK_SIZE]; +}; + +#define LOADPARM_LEN 8 + +struct sclp_ipl_info { + int is_valid; + int has_dump; + char loadparm[LOADPARM_LEN]; +}; + +struct sclp_core_entry { + u8 core_id; + u8 reserved0; + u8 : 4; + u8 sief2 : 1; + u8 skey : 1; + u8 : 2; + u8 : 2; + u8 gpere : 1; + u8 siif : 1; + u8 sigpif : 1; + u8 : 3; + u8 reserved2[3]; + u8 : 2; + u8 ib : 1; + u8 cei : 1; + u8 : 4; + u8 reserved3[6]; + u8 type; + u8 reserved1; +} __attribute__((packed)); + +struct sclp_core_info { + unsigned int configured; + unsigned int standby; + unsigned int combined; + struct sclp_core_entry core[SCLP_MAX_CORES]; +}; + +struct sclp_info { + unsigned char has_linemode : 1; + unsigned char has_vt220 : 1; + unsigned char has_siif : 1; + unsigned char has_sigpif : 1; + unsigned char has_core_type : 1; + unsigned char has_sprp : 1; + unsigned char has_hvs : 1; + unsigned char has_esca : 1; + unsigned char has_sief2 : 1; + unsigned char has_64bscao : 1; + unsigned char has_gpere : 1; + unsigned char has_cmma : 1; + unsigned char has_gsls : 1; + unsigned char has_ib : 1; + unsigned char has_cei : 1; + unsigned char has_pfmfi : 1; + unsigned char has_ibs : 1; + unsigned char has_skey : 1; + unsigned char has_kss : 1; + unsigned char has_gisaf : 1; + unsigned char has_diag318 : 1; + unsigned char has_diag320 : 1; + unsigned char has_sipl : 1; + unsigned char has_sipl_eckd : 1; + unsigned char has_dirq : 1; + unsigned char has_iplcc : 1; + unsigned char has_zpci_lsi : 1; + unsigned char has_aisii : 1; + unsigned char has_aeni : 1; + unsigned char has_aisi : 1; + unsigned int ibc; + unsigned int mtid; + unsigned int mtid_cp; + unsigned int mtid_prev; + unsigned long rzm; + unsigned long rnmax; + unsigned long hamax; + unsigned int max_cores; + unsigned long hsa_size; + unsigned long facilities; + unsigned int hmfai; +}; +extern struct sclp_info sclp; + +struct zpci_report_error_header { + u8 version; /* Interface version byte */ + u8 action; /* Action qualifier byte + * 0: Adapter Reset Request + * 1: Deconfigure and repair action requested + * (OpenCrypto Problem Call Home) + * 2: Informational Report + * (OpenCrypto Successful Diagnostics Execution) + */ + u16 length; /* Length of Subsequent Data (up to 4K – SCLP header */ + u8 data[]; /* Subsequent Data passed verbatim to SCLP ET 24 */ +} __packed; + +extern char *sclp_early_sccb; + +void sclp_early_adjust_va(void); +void sclp_early_set_buffer(void *sccb); +int sclp_early_read_info(void); +int sclp_early_read_storage_info(void); +int sclp_early_get_core_info(struct sclp_core_info *info); +void sclp_early_get_ipl_info(struct sclp_ipl_info *info); +void sclp_early_detect(void); +void sclp_early_printk(const char *s); +void __sclp_early_printk(const char *s, unsigned int len); +void sclp_emergency_printk(const char *s); + +int sclp_early_get_memsize(unsigned long *mem); +int sclp_early_get_hsa_size(unsigned long *hsa_size); +int _sclp_get_core_info(struct sclp_core_info *info); +int sclp_core_configure(u8 core); +int sclp_core_deconfigure(u8 core); +int sclp_sdias_blk_count(void); +int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); +int sclp_chp_configure(struct chp_id chpid); +int sclp_chp_deconfigure(struct chp_id chpid); +int sclp_chp_read_info(struct sclp_chp_info *info); +int sclp_pci_configure(u32 fid); +int sclp_pci_deconfigure(u32 fid); +int sclp_ap_configure(u32 apid); +int sclp_ap_deconfigure(u32 apid); +int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid); +size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count); +void sclp_ocf_cpc_name_copy(char *dst); + +static inline int sclp_get_core_info(struct sclp_core_info *info, int early) +{ + if (early) + return sclp_early_get_core_info(info); + return _sclp_get_core_info(info); +} + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_S390_SCLP_H */ diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h new file mode 100644 index 000000000..322bdcd4b --- /dev/null +++ b/arch/s390/include/asm/scsw.h @@ -0,0 +1,1050 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Helper functions for scsw access. + * + * Copyright IBM Corp. 2008, 2012 + * Author(s): Peter Oberparleiter + */ + +#ifndef _ASM_S390_SCSW_H_ +#define _ASM_S390_SCSW_H_ + +#include +#include +#include + +/** + * struct cmd_scsw - command-mode subchannel status word + * @key: subchannel key + * @sctl: suspend control + * @eswf: esw format + * @cc: deferred condition code + * @fmt: format + * @pfch: prefetch + * @isic: initial-status interruption control + * @alcc: address-limit checking control + * @ssi: suppress-suspended interruption + * @zcc: zero condition code + * @ectl: extended control + * @pno: path not operational + * @res: reserved + * @fctl: function control + * @actl: activity control + * @stctl: status control + * @cpa: channel program address + * @dstat: device status + * @cstat: subchannel status + * @count: residual count + */ +struct cmd_scsw { + __u32 key : 4; + __u32 sctl : 1; + __u32 eswf : 1; + __u32 cc : 2; + __u32 fmt : 1; + __u32 pfch : 1; + __u32 isic : 1; + __u32 alcc : 1; + __u32 ssi : 1; + __u32 zcc : 1; + __u32 ectl : 1; + __u32 pno : 1; + __u32 res : 1; + __u32 fctl : 3; + __u32 actl : 7; + __u32 stctl : 5; + __u32 cpa; + __u32 dstat : 8; + __u32 cstat : 8; + __u32 count : 16; +} __attribute__ ((packed)); + +/** + * struct tm_scsw - transport-mode subchannel status word + * @key: subchannel key + * @eswf: esw format + * @cc: deferred condition code + * @fmt: format + * @x: IRB-format control + * @q: interrogate-complete + * @ectl: extended control + * @pno: path not operational + * @fctl: function control + * @actl: activity control + * @stctl: status control + * @tcw: TCW address + * @dstat: device status + * @cstat: subchannel status + * @fcxs: FCX status + * @schxs: subchannel-extended status + */ +struct tm_scsw { + u32 key:4; + u32 :1; + u32 eswf:1; + u32 cc:2; + u32 fmt:3; + u32 x:1; + u32 q:1; + u32 :1; + u32 ectl:1; + u32 pno:1; + u32 :1; + u32 fctl:3; + u32 actl:7; + u32 stctl:5; + u32 tcw; + u32 dstat:8; + u32 cstat:8; + u32 fcxs:8; + u32 ifob:1; + u32 sesq:7; +} __attribute__ ((packed)); + +/** + * struct eadm_scsw - subchannel status word for eadm subchannels + * @key: subchannel key + * @eswf: esw format + * @cc: deferred condition code + * @ectl: extended control + * @fctl: function control + * @actl: activity control + * @stctl: status control + * @aob: AOB address + * @dstat: device status + * @cstat: subchannel status + */ +struct eadm_scsw { + u32 key:4; + u32:1; + u32 eswf:1; + u32 cc:2; + u32:6; + u32 ectl:1; + u32:2; + u32 fctl:3; + u32 actl:7; + u32 stctl:5; + u32 aob; + u32 dstat:8; + u32 cstat:8; + u32:16; +} __packed; + +/** + * union scsw - subchannel status word + * @cmd: command-mode SCSW + * @tm: transport-mode SCSW + * @eadm: eadm SCSW + */ +union scsw { + struct cmd_scsw cmd; + struct tm_scsw tm; + struct eadm_scsw eadm; +} __packed; + +#define SCSW_FCTL_CLEAR_FUNC 0x1 +#define SCSW_FCTL_HALT_FUNC 0x2 +#define SCSW_FCTL_START_FUNC 0x4 + +#define SCSW_ACTL_SUSPENDED 0x1 +#define SCSW_ACTL_DEVACT 0x2 +#define SCSW_ACTL_SCHACT 0x4 +#define SCSW_ACTL_CLEAR_PEND 0x8 +#define SCSW_ACTL_HALT_PEND 0x10 +#define SCSW_ACTL_START_PEND 0x20 +#define SCSW_ACTL_RESUME_PEND 0x40 + +#define SCSW_STCTL_STATUS_PEND 0x1 +#define SCSW_STCTL_SEC_STATUS 0x2 +#define SCSW_STCTL_PRIM_STATUS 0x4 +#define SCSW_STCTL_INTER_STATUS 0x8 +#define SCSW_STCTL_ALERT_STATUS 0x10 + +#define DEV_STAT_ATTENTION 0x80 +#define DEV_STAT_STAT_MOD 0x40 +#define DEV_STAT_CU_END 0x20 +#define DEV_STAT_BUSY 0x10 +#define DEV_STAT_CHN_END 0x08 +#define DEV_STAT_DEV_END 0x04 +#define DEV_STAT_UNIT_CHECK 0x02 +#define DEV_STAT_UNIT_EXCEP 0x01 + +#define SCHN_STAT_PCI 0x80 +#define SCHN_STAT_INCORR_LEN 0x40 +#define SCHN_STAT_PROG_CHECK 0x20 +#define SCHN_STAT_PROT_CHECK 0x10 +#define SCHN_STAT_CHN_DATA_CHK 0x08 +#define SCHN_STAT_CHN_CTRL_CHK 0x04 +#define SCHN_STAT_INTF_CTRL_CHK 0x02 +#define SCHN_STAT_CHAIN_CHECK 0x01 + +#define SCSW_SESQ_DEV_NOFCX 3 +#define SCSW_SESQ_PATH_NOFCX 4 + +/* + * architectured values for first sense byte + */ +#define SNS0_CMD_REJECT 0x80 +#define SNS_CMD_REJECT SNS0_CMD_REJEC +#define SNS0_INTERVENTION_REQ 0x40 +#define SNS0_BUS_OUT_CHECK 0x20 +#define SNS0_EQUIPMENT_CHECK 0x10 +#define SNS0_DATA_CHECK 0x08 +#define SNS0_OVERRUN 0x04 +#define SNS0_INCOMPL_DOMAIN 0x01 + +/* + * architectured values for second sense byte + */ +#define SNS1_PERM_ERR 0x80 +#define SNS1_INV_TRACK_FORMAT 0x40 +#define SNS1_EOC 0x20 +#define SNS1_MESSAGE_TO_OPER 0x10 +#define SNS1_NO_REC_FOUND 0x08 +#define SNS1_FILE_PROTECTED 0x04 +#define SNS1_WRITE_INHIBITED 0x02 +#define SNS1_INPRECISE_END 0x01 + +/* + * architectured values for third sense byte + */ +#define SNS2_REQ_INH_WRITE 0x80 +#define SNS2_CORRECTABLE 0x40 +#define SNS2_FIRST_LOG_ERR 0x20 +#define SNS2_ENV_DATA_PRESENT 0x10 +#define SNS2_INPRECISE_END 0x04 + +/* + * architectured values for PPRC errors + */ +#define SNS7_INVALID_ON_SEC 0x0e + +/** + * scsw_is_tm - check for transport mode scsw + * @scsw: pointer to scsw + * + * Return non-zero if the specified scsw is a transport mode scsw, zero + * otherwise. + */ +static inline int scsw_is_tm(union scsw *scsw) +{ + return css_general_characteristics.fcx && (scsw->tm.x == 1); +} + +/** + * scsw_key - return scsw key field + * @scsw: pointer to scsw + * + * Return the value of the key field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_key(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.key; + else + return scsw->cmd.key; +} + +/** + * scsw_eswf - return scsw eswf field + * @scsw: pointer to scsw + * + * Return the value of the eswf field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_eswf(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.eswf; + else + return scsw->cmd.eswf; +} + +/** + * scsw_cc - return scsw cc field + * @scsw: pointer to scsw + * + * Return the value of the cc field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_cc(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.cc; + else + return scsw->cmd.cc; +} + +/** + * scsw_ectl - return scsw ectl field + * @scsw: pointer to scsw + * + * Return the value of the ectl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_ectl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.ectl; + else + return scsw->cmd.ectl; +} + +/** + * scsw_pno - return scsw pno field + * @scsw: pointer to scsw + * + * Return the value of the pno field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_pno(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.pno; + else + return scsw->cmd.pno; +} + +/** + * scsw_fctl - return scsw fctl field + * @scsw: pointer to scsw + * + * Return the value of the fctl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_fctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.fctl; + else + return scsw->cmd.fctl; +} + +/** + * scsw_actl - return scsw actl field + * @scsw: pointer to scsw + * + * Return the value of the actl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_actl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.actl; + else + return scsw->cmd.actl; +} + +/** + * scsw_stctl - return scsw stctl field + * @scsw: pointer to scsw + * + * Return the value of the stctl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_stctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.stctl; + else + return scsw->cmd.stctl; +} + +/** + * scsw_dstat - return scsw dstat field + * @scsw: pointer to scsw + * + * Return the value of the dstat field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_dstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.dstat; + else + return scsw->cmd.dstat; +} + +/** + * scsw_cstat - return scsw cstat field + * @scsw: pointer to scsw + * + * Return the value of the cstat field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_cstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.cstat; + else + return scsw->cmd.cstat; +} + +/** + * scsw_cmd_is_valid_key - check key field validity + * @scsw: pointer to scsw + * + * Return non-zero if the key field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_key(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_cmd_is_valid_sctl - check sctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the sctl field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_sctl(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_cmd_is_valid_eswf - check eswf field validity + * @scsw: pointer to scsw + * + * Return non-zero if the eswf field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_eswf(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); +} + +/** + * scsw_cmd_is_valid_cc - check cc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cc field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_cc(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && + (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); +} + +/** + * scsw_cmd_is_valid_fmt - check fmt field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fmt field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_fmt(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_cmd_is_valid_pfch - check pfch field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pfch field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_pfch(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_cmd_is_valid_isic - check isic field validity + * @scsw: pointer to scsw + * + * Return non-zero if the isic field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_isic(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_cmd_is_valid_alcc - check alcc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the alcc field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_alcc(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_cmd_is_valid_ssi - check ssi field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ssi field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_ssi(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_cmd_is_valid_zcc - check zcc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the zcc field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_zcc(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && + (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS); +} + +/** + * scsw_cmd_is_valid_ectl - check ectl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ectl field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_ectl(union scsw *scsw) +{ + /* Must be status pending. */ + if (!(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND)) + return 0; + + /* Must have alert status. */ + if (!(scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS)) + return 0; + + /* Must be alone or together with primary, secondary or both, + * => no intermediate status. + */ + if (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) + return 0; + + return 1; +} + +/** + * scsw_cmd_is_valid_pno - check pno field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pno field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_pno(union scsw *scsw) +{ + /* Must indicate at least one I/O function. */ + if (!scsw->cmd.fctl) + return 0; + + /* Must be status pending. */ + if (!(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND)) + return 0; + + /* Can be status pending alone, or with any combination of primary, + * secondary and alert => no intermediate status. + */ + if (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS)) + return 1; + + /* If intermediate, must be suspended. */ + if (scsw->cmd.actl & SCSW_ACTL_SUSPENDED) + return 1; + + return 0; +} + +/** + * scsw_cmd_is_valid_fctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_fctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} + +/** + * scsw_cmd_is_valid_actl - check actl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the actl field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_actl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} + +/** + * scsw_cmd_is_valid_stctl - check stctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the stctl field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_stctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} + +/** + * scsw_cmd_is_valid_dstat - check dstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the dstat field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_dstat(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->cmd.cc != 3); +} + +/** + * scsw_cmd_is_valid_cstat - check cstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cstat field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_cstat(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->cmd.cc != 3); +} + +/** + * scsw_tm_is_valid_key - check key field validity + * @scsw: pointer to scsw + * + * Return non-zero if the key field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_key(union scsw *scsw) +{ + return (scsw->tm.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_tm_is_valid_eswf - check eswf field validity + * @scsw: pointer to scsw + * + * Return non-zero if the eswf field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_eswf(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); +} + +/** + * scsw_tm_is_valid_cc - check cc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cc field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_cc(union scsw *scsw) +{ + return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) && + (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); +} + +/** + * scsw_tm_is_valid_fmt - check fmt field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fmt field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_fmt(union scsw *scsw) +{ + return 1; +} + +/** + * scsw_tm_is_valid_x - check x field validity + * @scsw: pointer to scsw + * + * Return non-zero if the x field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_x(union scsw *scsw) +{ + return 1; +} + +/** + * scsw_tm_is_valid_q - check q field validity + * @scsw: pointer to scsw + * + * Return non-zero if the q field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_q(union scsw *scsw) +{ + return 1; +} + +/** + * scsw_tm_is_valid_ectl - check ectl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ectl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_ectl(union scsw *scsw) +{ + /* Must be status pending. */ + if (!(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND)) + return 0; + + /* Must have alert status. */ + if (!(scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS)) + return 0; + + /* Must be alone or together with primary, secondary or both, + * => no intermediate status. + */ + if (scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) + return 0; + + return 1; +} + +/** + * scsw_tm_is_valid_pno - check pno field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pno field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_pno(union scsw *scsw) +{ + /* Must indicate at least one I/O function. */ + if (!scsw->tm.fctl) + return 0; + + /* Must be status pending. */ + if (!(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND)) + return 0; + + /* Can be status pending alone, or with any combination of primary, + * secondary and alert => no intermediate status. + */ + if (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS)) + return 1; + + /* If intermediate, must be suspended. */ + if (scsw->tm.actl & SCSW_ACTL_SUSPENDED) + return 1; + + return 0; +} + +/** + * scsw_tm_is_valid_fctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_fctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} + +/** + * scsw_tm_is_valid_actl - check actl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the actl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_actl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} + +/** + * scsw_tm_is_valid_stctl - check stctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the stctl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_stctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} + +/** + * scsw_tm_is_valid_dstat - check dstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the dstat field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_dstat(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->tm.cc != 3); +} + +/** + * scsw_tm_is_valid_cstat - check cstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cstat field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_cstat(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->tm.cc != 3); +} + +/** + * scsw_tm_is_valid_fcxs - check fcxs field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fcxs field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_fcxs(union scsw *scsw) +{ + return 1; +} + +/** + * scsw_tm_is_valid_schxs - check schxs field validity + * @scsw: pointer to scsw + * + * Return non-zero if the schxs field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_schxs(union scsw *scsw) +{ + return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK | + SCHN_STAT_INTF_CTRL_CHK | + SCHN_STAT_PROT_CHECK | + SCHN_STAT_CHN_DATA_CHK)); +} + +/** + * scsw_is_valid_actl - check actl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the actl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_actl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_actl(scsw); + else + return scsw_cmd_is_valid_actl(scsw); +} + +/** + * scsw_is_valid_cc - check cc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cc field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_cc(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_cc(scsw); + else + return scsw_cmd_is_valid_cc(scsw); +} + +/** + * scsw_is_valid_cstat - check cstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cstat field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_cstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_cstat(scsw); + else + return scsw_cmd_is_valid_cstat(scsw); +} + +/** + * scsw_is_valid_dstat - check dstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the dstat field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_dstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_dstat(scsw); + else + return scsw_cmd_is_valid_dstat(scsw); +} + +/** + * scsw_is_valid_ectl - check ectl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ectl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_ectl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_ectl(scsw); + else + return scsw_cmd_is_valid_ectl(scsw); +} + +/** + * scsw_is_valid_eswf - check eswf field validity + * @scsw: pointer to scsw + * + * Return non-zero if the eswf field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_eswf(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_eswf(scsw); + else + return scsw_cmd_is_valid_eswf(scsw); +} + +/** + * scsw_is_valid_fctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_fctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_fctl(scsw); + else + return scsw_cmd_is_valid_fctl(scsw); +} + +/** + * scsw_is_valid_key - check key field validity + * @scsw: pointer to scsw + * + * Return non-zero if the key field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_key(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_key(scsw); + else + return scsw_cmd_is_valid_key(scsw); +} + +/** + * scsw_is_valid_pno - check pno field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pno field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_pno(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_pno(scsw); + else + return scsw_cmd_is_valid_pno(scsw); +} + +/** + * scsw_is_valid_stctl - check stctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the stctl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_stctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_stctl(scsw); + else + return scsw_cmd_is_valid_stctl(scsw); +} + +/** + * scsw_cmd_is_solicited - check for solicited scsw + * @scsw: pointer to scsw + * + * Return non-zero if the command mode scsw indicates that the associated + * status condition is solicited, zero if it is unsolicited. + */ +static inline int scsw_cmd_is_solicited(union scsw *scsw) +{ + return (scsw->cmd.cc != 0) || (scsw->cmd.stctl != + (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); +} + +/** + * scsw_tm_is_solicited - check for solicited scsw + * @scsw: pointer to scsw + * + * Return non-zero if the transport mode scsw indicates that the associated + * status condition is solicited, zero if it is unsolicited. + */ +static inline int scsw_tm_is_solicited(union scsw *scsw) +{ + return (scsw->tm.cc != 0) || (scsw->tm.stctl != + (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); +} + +/** + * scsw_is_solicited - check for solicited scsw + * @scsw: pointer to scsw + * + * Return non-zero if the transport or command mode scsw indicates that the + * associated status condition is solicited, zero if it is unsolicited. + */ +static inline int scsw_is_solicited(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_solicited(scsw); + else + return scsw_cmd_is_solicited(scsw); +} + +#endif /* _ASM_S390_SCSW_H_ */ diff --git a/arch/s390/include/asm/seccomp.h b/arch/s390/include/asm/seccomp.h new file mode 100644 index 000000000..71d46f0ba --- /dev/null +++ b/arch/s390/include/asm/seccomp.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_SECCOMP_H +#define _ASM_S390_SECCOMP_H + +#include + +#define __NR_seccomp_read __NR_read +#define __NR_seccomp_write __NR_write +#define __NR_seccomp_exit __NR_exit +#define __NR_seccomp_sigreturn __NR_sigreturn + +#define __NR_seccomp_read_32 __NR_read +#define __NR_seccomp_write_32 __NR_write +#define __NR_seccomp_exit_32 __NR_exit +#define __NR_seccomp_sigreturn_32 __NR_sigreturn + +#include + +#define SECCOMP_ARCH_NATIVE AUDIT_ARCH_S390X +#define SECCOMP_ARCH_NATIVE_NR NR_syscalls +#define SECCOMP_ARCH_NATIVE_NAME "s390x" +#ifdef CONFIG_COMPAT +# define SECCOMP_ARCH_COMPAT AUDIT_ARCH_S390 +# define SECCOMP_ARCH_COMPAT_NR NR_syscalls +# define SECCOMP_ARCH_COMPAT_NAME "s390" +#endif + +#endif /* _ASM_S390_SECCOMP_H */ diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h new file mode 100644 index 000000000..0486e6ef6 --- /dev/null +++ b/arch/s390/include/asm/sections.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _S390_SECTIONS_H +#define _S390_SECTIONS_H + +#include + +/* + * .boot.data section contains variables "shared" between the decompressor and + * the decompressed kernel. The decompressor will store values in them, and + * copy over to the decompressed image before starting it. + * + * Each variable end up in its own intermediate section .boot.data., + * those sections are later sorted by alignment + name and merged together into + * final .boot.data section, which should be identical in the decompressor and + * the decompressed kernel (that is checked during the build). + */ +#define __bootdata(var) __section(".boot.data." #var) var + +/* + * .boot.preserved.data is similar to .boot.data, but it is not part of the + * .init section and thus will be preserved for later use in the decompressed + * kernel. + */ +#define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var + +extern char *__samode31, *__eamode31; +extern char *__stext_amode31, *__etext_amode31; + +#endif diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h new file mode 100644 index 000000000..06fbabe2f --- /dev/null +++ b/arch/s390/include/asm/set_memory.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASMS390_SET_MEMORY_H +#define _ASMS390_SET_MEMORY_H + +#include + +extern struct mutex cpa_mutex; + +enum { + _SET_MEMORY_RO_BIT, + _SET_MEMORY_RW_BIT, + _SET_MEMORY_NX_BIT, + _SET_MEMORY_X_BIT, + _SET_MEMORY_4K_BIT, + _SET_MEMORY_INV_BIT, + _SET_MEMORY_DEF_BIT, +}; + +#define SET_MEMORY_RO BIT(_SET_MEMORY_RO_BIT) +#define SET_MEMORY_RW BIT(_SET_MEMORY_RW_BIT) +#define SET_MEMORY_NX BIT(_SET_MEMORY_NX_BIT) +#define SET_MEMORY_X BIT(_SET_MEMORY_X_BIT) +#define SET_MEMORY_4K BIT(_SET_MEMORY_4K_BIT) +#define SET_MEMORY_INV BIT(_SET_MEMORY_INV_BIT) +#define SET_MEMORY_DEF BIT(_SET_MEMORY_DEF_BIT) + +int __set_memory(unsigned long addr, unsigned long numpages, unsigned long flags); + +#define set_memory_rox set_memory_rox + +/* + * Generate two variants of each set_memory() function: + * + * set_memory_yy(unsigned long addr, int numpages); + * __set_memory_yy(void *start, void *end); + * + * The second variant exists for both convenience to avoid the usual + * (unsigned long) casts, but unlike the first variant it can also be used + * for areas larger than 8TB, which may happen at memory initialization. + */ +#define __SET_MEMORY_FUNC(fname, flags) \ +static inline int fname(unsigned long addr, int numpages) \ +{ \ + return __set_memory(addr, numpages, (flags)); \ +} \ + \ +static inline int __##fname(void *start, void *end) \ +{ \ + unsigned long numpages; \ + \ + numpages = (end - start) >> PAGE_SHIFT; \ + return __set_memory((unsigned long)start, numpages, (flags)); \ +} + +__SET_MEMORY_FUNC(set_memory_ro, SET_MEMORY_RO) +__SET_MEMORY_FUNC(set_memory_rw, SET_MEMORY_RW) +__SET_MEMORY_FUNC(set_memory_nx, SET_MEMORY_NX) +__SET_MEMORY_FUNC(set_memory_x, SET_MEMORY_X) +__SET_MEMORY_FUNC(set_memory_rox, SET_MEMORY_RO | SET_MEMORY_X) +__SET_MEMORY_FUNC(set_memory_rwnx, SET_MEMORY_RW | SET_MEMORY_NX) +__SET_MEMORY_FUNC(set_memory_4k, SET_MEMORY_4K) + +int set_direct_map_invalid_noflush(struct page *page); +int set_direct_map_default_noflush(struct page *page); + +#endif diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h new file mode 100644 index 000000000..25cadc2b9 --- /dev/null +++ b/arch/s390/include/asm/setup.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999, 2017 + */ +#ifndef _ASM_S390_SETUP_H +#define _ASM_S390_SETUP_H + +#include +#include +#include + +#define PARMAREA 0x10400 + +#define COMMAND_LINE_SIZE CONFIG_COMMAND_LINE_SIZE +/* + * Machine features detected in early.c + */ + +#define MACHINE_FLAG_VM BIT(0) +#define MACHINE_FLAG_KVM BIT(1) +#define MACHINE_FLAG_LPAR BIT(2) +#define MACHINE_FLAG_DIAG9C BIT(3) +#define MACHINE_FLAG_ESOP BIT(4) +#define MACHINE_FLAG_IDTE BIT(5) +#define MACHINE_FLAG_EDAT1 BIT(7) +#define MACHINE_FLAG_EDAT2 BIT(8) +#define MACHINE_FLAG_TOPOLOGY BIT(10) +#define MACHINE_FLAG_TE BIT(11) +#define MACHINE_FLAG_TLB_LC BIT(12) +#define MACHINE_FLAG_VX BIT(13) +#define MACHINE_FLAG_TLB_GUEST BIT(14) +#define MACHINE_FLAG_NX BIT(15) +#define MACHINE_FLAG_GS BIT(16) +#define MACHINE_FLAG_SCC BIT(17) +#define MACHINE_FLAG_PCI_MIO BIT(18) +#define MACHINE_FLAG_RDP BIT(19) + +#define LPP_MAGIC BIT(31) +#define LPP_PID_MASK _AC(0xffffffff, UL) + +/* Offsets to entry points in kernel/head.S */ + +#define STARTUP_NORMAL_OFFSET 0x10000 +#define STARTUP_KDUMP_OFFSET 0x10010 + +#define LEGACY_COMMAND_LINE_SIZE 896 + +#ifndef __ASSEMBLY__ + +#include +#include + +struct parmarea { + unsigned long ipl_device; /* 0x10400 */ + unsigned long initrd_start; /* 0x10408 */ + unsigned long initrd_size; /* 0x10410 */ + unsigned long oldmem_base; /* 0x10418 */ + unsigned long oldmem_size; /* 0x10420 */ + unsigned long kernel_version; /* 0x10428 */ + unsigned long max_command_line_size; /* 0x10430 */ + char pad1[0x10480-0x10438]; /* 0x10438 - 0x10480 */ + char command_line[COMMAND_LINE_SIZE]; /* 0x10480 */ +}; + +extern struct parmarea parmarea; + +extern unsigned int zlib_dfltcc_support; +#define ZLIB_DFLTCC_DISABLED 0 +#define ZLIB_DFLTCC_FULL 1 +#define ZLIB_DFLTCC_DEFLATE_ONLY 2 +#define ZLIB_DFLTCC_INFLATE_ONLY 3 +#define ZLIB_DFLTCC_FULL_DEBUG 4 + +extern unsigned long ident_map_size; +extern unsigned long max_mappable; + +/* The Write Back bit position in the physaddr is given by the SLPC PCI */ +extern unsigned long mio_wb_bit_mask; + +#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) +#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) +#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR) + +#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) +#define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP) +#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) +#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) +#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) +#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) +#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) +#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) +#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX) +#define MACHINE_HAS_TLB_GUEST (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_GUEST) +#define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX) +#define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS) +#define MACHINE_HAS_SCC (S390_lowcore.machine_flags & MACHINE_FLAG_SCC) +#define MACHINE_HAS_PCI_MIO (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO) +#define MACHINE_HAS_RDP (S390_lowcore.machine_flags & MACHINE_FLAG_RDP) + +/* + * Console mode. Override with conmode= + */ +extern unsigned int console_mode; +extern unsigned int console_devno; +extern unsigned int console_irq; + +#define CONSOLE_IS_UNDEFINED (console_mode == 0) +#define CONSOLE_IS_SCLP (console_mode == 1) +#define CONSOLE_IS_3215 (console_mode == 2) +#define CONSOLE_IS_3270 (console_mode == 3) +#define CONSOLE_IS_VT220 (console_mode == 4) +#define CONSOLE_IS_HVC (console_mode == 5) +#define SET_CONSOLE_SCLP do { console_mode = 1; } while (0) +#define SET_CONSOLE_3215 do { console_mode = 2; } while (0) +#define SET_CONSOLE_3270 do { console_mode = 3; } while (0) +#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0) +#define SET_CONSOLE_HVC do { console_mode = 5; } while (0) + +#ifdef CONFIG_VMCP +void vmcp_cma_reserve(void); +#else +static inline void vmcp_cma_reserve(void) { } +#endif + +void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault); + +void cmma_init(void); +void cmma_init_nodat(void); + +extern void (*_machine_restart)(char *command); +extern void (*_machine_halt)(void); +extern void (*_machine_power_off)(void); + +extern unsigned long __kaslr_offset; +static inline unsigned long kaslr_offset(void) +{ + return __kaslr_offset; +} + +extern int __kaslr_enabled; +static inline int kaslr_enabled(void) +{ + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + return __kaslr_enabled; + return 0; +} + +struct oldmem_data { + unsigned long start; + unsigned long size; +}; +extern struct oldmem_data oldmem_data; + +static __always_inline u32 gen_lpswe(unsigned long addr) +{ + BUILD_BUG_ON(addr > 0xfff); + return 0xb2b20000 | addr; +} +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_S390_SETUP_H */ diff --git a/arch/s390/include/asm/signal.h b/arch/s390/include/asm/signal.h new file mode 100644 index 000000000..7daf4d8b5 --- /dev/null +++ b/arch/s390/include/asm/signal.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * + * Derived from "include/asm-i386/signal.h" + */ +#ifndef _ASMS390_SIGNAL_H +#define _ASMS390_SIGNAL_H + +#include + +/* Most things should be clean enough to redefine this at will, if care + is taken to make libc match. */ +#include +#define _NSIG _SIGCONTEXT_NSIG +#define _NSIG_BPW _SIGCONTEXT_NSIG_BPW +#define _NSIG_WORDS _SIGCONTEXT_NSIG_WORDS + +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +#define __ARCH_HAS_SA_RESTORER +#endif diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h new file mode 100644 index 000000000..edee63da0 --- /dev/null +++ b/arch/s390/include/asm/sigp.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __S390_ASM_SIGP_H +#define __S390_ASM_SIGP_H + +/* SIGP order codes */ +#define SIGP_SENSE 1 +#define SIGP_EXTERNAL_CALL 2 +#define SIGP_EMERGENCY_SIGNAL 3 +#define SIGP_START 4 +#define SIGP_STOP 5 +#define SIGP_RESTART 6 +#define SIGP_STOP_AND_STORE_STATUS 9 +#define SIGP_INITIAL_CPU_RESET 11 +#define SIGP_CPU_RESET 12 +#define SIGP_SET_PREFIX 13 +#define SIGP_STORE_STATUS_AT_ADDRESS 14 +#define SIGP_SET_ARCHITECTURE 18 +#define SIGP_COND_EMERGENCY_SIGNAL 19 +#define SIGP_SENSE_RUNNING 21 +#define SIGP_SET_MULTI_THREADING 22 +#define SIGP_STORE_ADDITIONAL_STATUS 23 + +/* SIGP condition codes */ +#define SIGP_CC_ORDER_CODE_ACCEPTED 0 +#define SIGP_CC_STATUS_STORED 1 +#define SIGP_CC_BUSY 2 +#define SIGP_CC_NOT_OPERATIONAL 3 + +/* SIGP cpu status bits */ + +#define SIGP_STATUS_INVALID_ORDER 0x00000002UL +#define SIGP_STATUS_CHECK_STOP 0x00000010UL +#define SIGP_STATUS_STOPPED 0x00000040UL +#define SIGP_STATUS_EXT_CALL_PENDING 0x00000080UL +#define SIGP_STATUS_INVALID_PARAMETER 0x00000100UL +#define SIGP_STATUS_INCORRECT_STATE 0x00000200UL +#define SIGP_STATUS_NOT_RUNNING 0x00000400UL + +#ifndef __ASSEMBLY__ + +static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm, + u32 *status) +{ + union register_pair r1 = { .odd = parm, }; + int cc; + + asm volatile( + " sigp %[r1],%[addr],0(%[order])\n" + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=&d" (cc), [r1] "+&d" (r1.pair) + : [addr] "d" (addr), [order] "a" (order) + : "cc"); + *status = r1.even; + return cc; +} + +static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm, + u32 *status) +{ + u32 _status; + int cc; + + cc = ____pcpu_sigp(addr, order, parm, &_status); + if (status && cc == SIGP_CC_STATUS_STORED) + *status = _status; + return cc; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __S390_ASM_SIGP_H */ diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h new file mode 100644 index 000000000..73ed27810 --- /dev/null +++ b/arch/s390/include/asm/smp.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 1999, 2012 + * Author(s): Denis Joseph Barrow, + * Martin Schwidefsky , + */ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +#include +#include +#include + +#define raw_smp_processor_id() (S390_lowcore.cpu_nr) + +extern struct mutex smp_cpu_state_mutex; +extern unsigned int smp_cpu_mt_shift; +extern unsigned int smp_cpu_mtid; +extern __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS]; +extern cpumask_t cpu_setup_mask; + +extern int __cpu_up(unsigned int cpu, struct task_struct *tidle); + +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); + +extern void smp_call_online_cpu(void (*func)(void *), void *); +extern void smp_call_ipl_cpu(void (*func)(void *), void *); +extern void smp_emergency_stop(void); + +extern int smp_find_processor_id(u16 address); +extern int smp_store_status(int cpu); +extern void smp_save_dump_ipl_cpu(void); +extern void smp_save_dump_secondary_cpus(void); +extern void smp_yield_cpu(int cpu); +extern void smp_cpu_set_polarization(int cpu, int val); +extern int smp_cpu_get_polarization(int cpu); +extern int smp_cpu_get_cpu_address(int cpu); +extern void smp_fill_possible_mask(void); +extern void smp_detect_cpus(void); + +static inline void smp_stop_cpu(void) +{ + u16 pcpu = stap(); + + for (;;) { + __pcpu_sigp(pcpu, SIGP_STOP, 0, NULL); + cpu_relax(); + } +} + +/* Return thread 0 CPU number as base CPU */ +static inline int smp_get_base_cpu(int cpu) +{ + return cpu - (cpu % (smp_cpu_mtid + 1)); +} + +static inline void smp_cpus_done(unsigned int max_cpus) +{ +} + +extern int smp_reinit_ipl_cpu(void); +extern int smp_rescan_cpus(void); +extern void __noreturn cpu_die(void); +extern void __cpu_die(unsigned int cpu); +extern int __cpu_disable(void); +extern void schedule_mcck_handler(void); +void notrace smp_yield_cpu(int cpu); + +#endif /* __ASM_SMP_H */ diff --git a/arch/s390/include/asm/softirq_stack.h b/arch/s390/include/asm/softirq_stack.h new file mode 100644 index 000000000..1ac5115d3 --- /dev/null +++ b/arch/s390/include/asm/softirq_stack.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef __ASM_S390_SOFTIRQ_STACK_H +#define __ASM_S390_SOFTIRQ_STACK_H + +#include +#include + +#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK +static inline void do_softirq_own_stack(void) +{ + call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq); +} +#endif +#endif /* __ASM_S390_SOFTIRQ_STACK_H */ diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h new file mode 100644 index 000000000..c54989360 --- /dev/null +++ b/arch/s390/include/asm/sparsemem.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_SPARSEMEM_H +#define _ASM_S390_SPARSEMEM_H + +#define SECTION_SIZE_BITS 28 +#define MAX_PHYSMEM_BITS CONFIG_MAX_PHYSMEM_BITS + +#endif /* _ASM_S390_SPARSEMEM_H */ diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h new file mode 100644 index 000000000..37127cd77 --- /dev/null +++ b/arch/s390/include/asm/spinlock.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * Derived from "include/asm-i386/spinlock.h" + */ + +#ifndef __ASM_SPINLOCK_H +#define __ASM_SPINLOCK_H + +#include +#include +#include +#include +#include + +#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) + +extern int spin_retry; + +bool arch_vcpu_is_preempted(int cpu); + +#define vcpu_is_preempted arch_vcpu_is_preempted + +/* + * Simple spin lock operations. There are two variants, one clears IRQ's + * on the local processor, one does not. + * + * We make no fairness assumptions. They have a cost. + * + * (the type definitions are in asm/spinlock_types.h) + */ + +void arch_spin_relax(arch_spinlock_t *lock); +#define arch_spin_relax arch_spin_relax + +void arch_spin_lock_wait(arch_spinlock_t *); +int arch_spin_trylock_retry(arch_spinlock_t *); +void arch_spin_lock_setup(int cpu); + +static inline u32 arch_spin_lockval(int cpu) +{ + return cpu + 1; +} + +static inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return lock.lock == 0; +} + +static inline int arch_spin_is_locked(arch_spinlock_t *lp) +{ + return READ_ONCE(lp->lock) != 0; +} + +static inline int arch_spin_trylock_once(arch_spinlock_t *lp) +{ + barrier(); + return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL)); +} + +static inline void arch_spin_lock(arch_spinlock_t *lp) +{ + if (!arch_spin_trylock_once(lp)) + arch_spin_lock_wait(lp); +} + +static inline int arch_spin_trylock(arch_spinlock_t *lp) +{ + if (!arch_spin_trylock_once(lp)) + return arch_spin_trylock_retry(lp); + return 1; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lp) +{ + typecheck(int, lp->lock); + kcsan_release(); + asm_inline volatile( + ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */ + " sth %1,%0\n" + : "=R" (((unsigned short *) &lp->lock)[1]) + : "d" (0) : "cc", "memory"); +} + +/* + * Read-write spinlocks, allowing multiple readers + * but only one writer. + * + * NOTE! it is quite common to have readers in interrupts + * but no interrupt writers. For those circumstances we + * can "mix" irq-safe locks - any writer needs to get a + * irq-safe write-lock, but readers can get non-irqsafe + * read-locks. + */ + +#define arch_read_relax(rw) barrier() +#define arch_write_relax(rw) barrier() + +void arch_read_lock_wait(arch_rwlock_t *lp); +void arch_write_lock_wait(arch_rwlock_t *lp); + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + int old; + + old = __atomic_add(1, &rw->cnts); + if (old & 0xffff0000) + arch_read_lock_wait(rw); +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + __atomic_add_const_barrier(-1, &rw->cnts); +} + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000)) + arch_write_lock_wait(rw); +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + __atomic_add_barrier(-0x30000, &rw->cnts); +} + + +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + int old; + + old = READ_ONCE(rw->cnts); + return (!(old & 0xffff0000) && + __atomic_cmpxchg_bool(&rw->cnts, old, old + 1)); +} + +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + int old; + + old = READ_ONCE(rw->cnts); + return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000); +} + +#endif /* __ASM_SPINLOCK_H */ diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h new file mode 100644 index 000000000..b69695e39 --- /dev/null +++ b/arch/s390/include/asm/spinlock_types.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_SPINLOCK_TYPES_H +#define __ASM_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_RAW_H +# error "please don't include this file directly" +#endif + +typedef struct { + int lock; +} arch_spinlock_t; + +#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, } + +typedef struct { + int cnts; + arch_spinlock_t wait; +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { 0 } + +#endif diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h new file mode 100644 index 000000000..78f7b729b --- /dev/null +++ b/arch/s390/include/asm/stacktrace.h @@ -0,0 +1,241 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_STACKTRACE_H +#define _ASM_S390_STACKTRACE_H + +#include +#include +#include + +enum stack_type { + STACK_TYPE_UNKNOWN, + STACK_TYPE_TASK, + STACK_TYPE_IRQ, + STACK_TYPE_NODAT, + STACK_TYPE_RESTART, + STACK_TYPE_MCCK, +}; + +struct stack_info { + enum stack_type type; + unsigned long begin, end; +}; + +const char *stack_type_name(enum stack_type type); +int get_stack_info(unsigned long sp, struct task_struct *task, + struct stack_info *info, unsigned long *visit_mask); + +static inline bool on_stack(struct stack_info *info, + unsigned long addr, size_t len) +{ + if (info->type == STACK_TYPE_UNKNOWN) + return false; + if (addr + len < addr) + return false; + return addr >= info->begin && addr + len <= info->end; +} + +/* + * Stack layout of a C stack frame. + * Kernel uses the packed stack layout (-mpacked-stack). + */ +struct stack_frame { + union { + unsigned long empty[9]; + struct { + unsigned long sie_control_block; + unsigned long sie_savearea; + unsigned long sie_reason; + unsigned long sie_flags; + unsigned long sie_control_block_phys; + }; + }; + unsigned long gprs[10]; + unsigned long back_chain; +}; + +/* + * Unlike current_stack_pointer which simply contains the current value of %r15 + * current_frame_address() returns function stack frame address, which matches + * %r15 upon function invocation. It may differ from %r15 later if function + * allocates stack for local variables or new stack frame to call other + * functions. + */ +#define current_frame_address() \ + ((unsigned long)__builtin_frame_address(0) - \ + offsetof(struct stack_frame, back_chain)) + +static __always_inline unsigned long get_stack_pointer(struct task_struct *task, + struct pt_regs *regs) +{ + if (regs) + return (unsigned long)kernel_stack_pointer(regs); + if (task == current) + return current_frame_address(); + return (unsigned long)task->thread.ksp; +} + +/* + * To keep this simple mark register 2-6 as being changed (volatile) + * by the called function, even though register 6 is saved/nonvolatile. + */ +#define CALL_FMT_0 "=&d" (r2) +#define CALL_FMT_1 "+&d" (r2) +#define CALL_FMT_2 CALL_FMT_1, "+&d" (r3) +#define CALL_FMT_3 CALL_FMT_2, "+&d" (r4) +#define CALL_FMT_4 CALL_FMT_3, "+&d" (r5) +#define CALL_FMT_5 CALL_FMT_4, "+&d" (r6) + +#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory" +#define CALL_CLOBBER_4 CALL_CLOBBER_5 +#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5" +#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4" +#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3" +#define CALL_CLOBBER_0 CALL_CLOBBER_1 + +#define CALL_LARGS_0(...) \ + long dummy = 0 +#define CALL_LARGS_1(t1, a1) \ + long arg1 = (long)(t1)(a1) +#define CALL_LARGS_2(t1, a1, t2, a2) \ + CALL_LARGS_1(t1, a1); \ + long arg2 = (long)(t2)(a2) +#define CALL_LARGS_3(t1, a1, t2, a2, t3, a3) \ + CALL_LARGS_2(t1, a1, t2, a2); \ + long arg3 = (long)(t3)(a3) +#define CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4) \ + CALL_LARGS_3(t1, a1, t2, a2, t3, a3); \ + long arg4 = (long)(t4)(a4) +#define CALL_LARGS_5(t1, a1, t2, a2, t3, a3, t4, a4, t5, a5) \ + CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4); \ + long arg5 = (long)(t5)(a5) + +#define CALL_REGS_0 \ + register long r2 asm("2") = dummy +#define CALL_REGS_1 \ + register long r2 asm("2") = arg1 +#define CALL_REGS_2 \ + CALL_REGS_1; \ + register long r3 asm("3") = arg2 +#define CALL_REGS_3 \ + CALL_REGS_2; \ + register long r4 asm("4") = arg3 +#define CALL_REGS_4 \ + CALL_REGS_3; \ + register long r5 asm("5") = arg4 +#define CALL_REGS_5 \ + CALL_REGS_4; \ + register long r6 asm("6") = arg5 + +#define CALL_TYPECHECK_0(...) +#define CALL_TYPECHECK_1(t, a, ...) \ + typecheck(t, a) +#define CALL_TYPECHECK_2(t, a, ...) \ + CALL_TYPECHECK_1(__VA_ARGS__); \ + typecheck(t, a) +#define CALL_TYPECHECK_3(t, a, ...) \ + CALL_TYPECHECK_2(__VA_ARGS__); \ + typecheck(t, a) +#define CALL_TYPECHECK_4(t, a, ...) \ + CALL_TYPECHECK_3(__VA_ARGS__); \ + typecheck(t, a) +#define CALL_TYPECHECK_5(t, a, ...) \ + CALL_TYPECHECK_4(__VA_ARGS__); \ + typecheck(t, a) + +#define CALL_PARM_0(...) void +#define CALL_PARM_1(t, a, ...) t +#define CALL_PARM_2(t, a, ...) t, CALL_PARM_1(__VA_ARGS__) +#define CALL_PARM_3(t, a, ...) t, CALL_PARM_2(__VA_ARGS__) +#define CALL_PARM_4(t, a, ...) t, CALL_PARM_3(__VA_ARGS__) +#define CALL_PARM_5(t, a, ...) t, CALL_PARM_4(__VA_ARGS__) +#define CALL_PARM_6(t, a, ...) t, CALL_PARM_5(__VA_ARGS__) + +/* + * Use call_on_stack() to call a function switching to a specified + * stack. Proper sign and zero extension of function arguments is + * done. Usage: + * + * rc = call_on_stack(nr, stack, rettype, fn, t1, a1, t2, a2, ...) + * + * - nr specifies the number of function arguments of fn. + * - stack specifies the stack to be used. + * - fn is the function to be called. + * - rettype is the return type of fn. + * - t1, a1, ... are pairs, where t1 must match the type of the first + * argument of fn, t2 the second, etc. a1 is the corresponding + * first function argument (not name), etc. + */ +#define call_on_stack(nr, stack, rettype, fn, ...) \ +({ \ + rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = fn; \ + unsigned long frame = current_frame_address(); \ + unsigned long __stack = stack; \ + unsigned long prev; \ + CALL_LARGS_##nr(__VA_ARGS__); \ + CALL_REGS_##nr; \ + \ + CALL_TYPECHECK_##nr(__VA_ARGS__); \ + asm volatile( \ + " lgr %[_prev],15\n" \ + " lg 15,%[_stack]\n" \ + " stg %[_frame],%[_bc](15)\n" \ + " brasl 14,%[_fn]\n" \ + " lgr 15,%[_prev]\n" \ + : [_prev] "=&d" (prev), CALL_FMT_##nr \ + : [_stack] "R" (__stack), \ + [_bc] "i" (offsetof(struct stack_frame, back_chain)), \ + [_frame] "d" (frame), \ + [_fn] "X" (__fn) : CALL_CLOBBER_##nr); \ + (rettype)r2; \ +}) + +/* + * Use call_nodat() to call a function with DAT disabled. + * Proper sign and zero extension of function arguments is done. + * Usage: + * + * rc = call_nodat(nr, rettype, fn, t1, a1, t2, a2, ...) + * + * - nr specifies the number of function arguments of fn. + * - fn is the function to be called, where fn is a physical address. + * - rettype is the return type of fn. + * - t1, a1, ... are pairs, where t1 must match the type of the first + * argument of fn, t2 the second, etc. a1 is the corresponding + * first function argument (not name), etc. + * + * fn() is called with standard C function call ABI, with the exception + * that no useful stackframe or stackpointer is passed via register 15. + * Therefore the called function must not use r15 to access the stack. + */ +#define call_nodat(nr, rettype, fn, ...) \ +({ \ + rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = (fn); \ + /* aligned since psw_leave must not cross page boundary */ \ + psw_t __aligned(16) psw_leave; \ + psw_t psw_enter; \ + CALL_LARGS_##nr(__VA_ARGS__); \ + CALL_REGS_##nr; \ + \ + CALL_TYPECHECK_##nr(__VA_ARGS__); \ + psw_enter.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT; \ + psw_enter.addr = (unsigned long)__fn; \ + asm volatile( \ + " epsw 0,1\n" \ + " risbg 1,0,0,31,32\n" \ + " larl 7,1f\n" \ + " stg 1,%[psw_leave]\n" \ + " stg 7,8+%[psw_leave]\n" \ + " la 7,%[psw_leave]\n" \ + " lra 7,0(7)\n" \ + " larl 1,0f\n" \ + " lra 14,0(1)\n" \ + " lpswe %[psw_enter]\n" \ + "0: lpswe 0(7)\n" \ + "1:\n" \ + : CALL_FMT_##nr, [psw_leave] "=Q" (psw_leave) \ + : [psw_enter] "Q" (psw_enter) \ + : "7", CALL_CLOBBER_##nr); \ + (rettype)r2; \ +}) + +#endif /* _ASM_S390_STACKTRACE_H */ diff --git a/arch/s390/include/asm/stp.h b/arch/s390/include/asm/stp.h new file mode 100644 index 000000000..4d74d7e33 --- /dev/null +++ b/arch/s390/include/asm/stp.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ +#ifndef __S390_STP_H +#define __S390_STP_H + +#include + +/* notifier for syncs */ +extern struct atomic_notifier_head s390_epoch_delta_notifier; + +/* STP interruption parameter */ +struct stp_irq_parm { + u32 : 14; + u32 tsc : 1; /* Timing status change */ + u32 lac : 1; /* Link availability change */ + u32 tcpc : 1; /* Time control parameter change */ + u32 : 15; +} __packed; + +#define STP_OP_SYNC 1 +#define STP_OP_CTRL 3 + +struct stp_sstpi { + u32 : 32; + u32 tu : 1; + u32 lu : 1; + u32 : 6; + u32 stratum : 8; + u32 vbits : 16; + u32 leaps : 16; + u32 tmd : 4; + u32 ctn : 4; + u32 : 3; + u32 c : 1; + u32 tst : 4; + u32 tzo : 16; + u32 dsto : 16; + u32 ctrl : 16; + u32 : 16; + u32 tto; + u32 : 32; + u32 ctnid[3]; + u32 : 32; + u64 todoff; + u32 rsvd[50]; +} __packed; + +struct stp_tzib { + u32 tzan : 16; + u32 : 16; + u32 tzo : 16; + u32 dsto : 16; + u32 stn; + u32 dstn; + u64 dst_on_alg; + u64 dst_off_alg; +} __packed; + +struct stp_tcpib { + u32 atcode : 4; + u32 ntcode : 4; + u32 d : 1; + u32 : 23; + s32 tto; + struct stp_tzib atzib; + struct stp_tzib ntzib; + s32 adst_offset : 16; + s32 ndst_offset : 16; + u32 rsvd1; + u64 ntzib_update; + u64 ndsto_update; +} __packed; + +struct stp_lsoib { + u32 p : 1; + u32 : 31; + s32 also : 16; + s32 nlso : 16; + u64 nlsout; +} __packed; + +struct stp_stzi { + u32 rsvd0[3]; + u64 data_ts; + u32 rsvd1[22]; + struct stp_tcpib tcpib; + struct stp_lsoib lsoib; +} __packed; + +/* Functions needed by the machine check handler */ +int stp_sync_check(void); +int stp_island_check(void); +void stp_queue_work(void); + +#endif /* __S390_STP_H */ diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h new file mode 100644 index 000000000..351685de5 --- /dev/null +++ b/arch/s390/include/asm/string.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +#ifndef _S390_STRING_H_ +#define _S390_STRING_H_ + +#ifndef _LINUX_TYPES_H +#include +#endif + +#define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */ +#define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */ +#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */ +#define __HAVE_ARCH_MEMSET16 /* arch function */ +#define __HAVE_ARCH_MEMSET32 /* arch function */ +#define __HAVE_ARCH_MEMSET64 /* arch function */ + +void *memcpy(void *dest, const void *src, size_t n); +void *memset(void *s, int c, size_t n); +void *memmove(void *dest, const void *src, size_t n); + +#ifndef CONFIG_KASAN +#define __HAVE_ARCH_MEMCHR /* inline & arch function */ +#define __HAVE_ARCH_MEMCMP /* arch function */ +#define __HAVE_ARCH_MEMSCAN /* inline & arch function */ +#define __HAVE_ARCH_STRCAT /* inline & arch function */ +#define __HAVE_ARCH_STRCMP /* arch function */ +#define __HAVE_ARCH_STRCPY /* inline & arch function */ +#define __HAVE_ARCH_STRLCAT /* arch function */ +#define __HAVE_ARCH_STRLEN /* inline & arch function */ +#define __HAVE_ARCH_STRNCAT /* arch function */ +#define __HAVE_ARCH_STRNCPY /* arch function */ +#define __HAVE_ARCH_STRNLEN /* inline & arch function */ +#define __HAVE_ARCH_STRSTR /* arch function */ + +/* Prototypes for non-inlined arch strings functions. */ +int memcmp(const void *s1, const void *s2, size_t n); +int strcmp(const char *s1, const char *s2); +size_t strlcat(char *dest, const char *src, size_t n); +char *strncat(char *dest, const char *src, size_t n); +char *strncpy(char *dest, const char *src, size_t n); +char *strstr(const char *s1, const char *s2); +#endif /* !CONFIG_KASAN */ + +#undef __HAVE_ARCH_STRCHR +#undef __HAVE_ARCH_STRNCHR +#undef __HAVE_ARCH_STRNCMP +#undef __HAVE_ARCH_STRPBRK +#undef __HAVE_ARCH_STRSEP +#undef __HAVE_ARCH_STRSPN + +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) + +#define strlen(s) __strlen(s) + +#define __no_sanitize_prefix_strfunc(x) __##x + +#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif + +#else +#define __no_sanitize_prefix_strfunc(x) x +#endif /* defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) */ + +void *__memcpy(void *dest, const void *src, size_t n); +void *__memset(void *s, int c, size_t n); +void *__memmove(void *dest, const void *src, size_t n); +void *__memset16(uint16_t *s, uint16_t v, size_t count); +void *__memset32(uint32_t *s, uint32_t v, size_t count); +void *__memset64(uint64_t *s, uint64_t v, size_t count); + +static inline void *memset16(uint16_t *s, uint16_t v, size_t count) +{ + return __memset16(s, v, count * sizeof(v)); +} + +static inline void *memset32(uint32_t *s, uint32_t v, size_t count) +{ + return __memset32(s, v, count * sizeof(v)); +} + +static inline void *memset64(uint64_t *s, uint64_t v, size_t count) +{ + return __memset64(s, v, count * sizeof(v)); +} + +#if !defined(IN_ARCH_STRING_C) && (!defined(CONFIG_FORTIFY_SOURCE) || defined(__NO_FORTIFY)) + +#ifdef __HAVE_ARCH_MEMCHR +static inline void *memchr(const void * s, int c, size_t n) +{ + const void *ret = s + n; + + asm volatile( + " lgr 0,%[c]\n" + "0: srst %[ret],%[s]\n" + " jo 0b\n" + " jl 1f\n" + " la %[ret],0\n" + "1:" + : [ret] "+&a" (ret), [s] "+&a" (s) + : [c] "d" (c) + : "cc", "memory", "0"); + return (void *) ret; +} +#endif + +#ifdef __HAVE_ARCH_MEMSCAN +static inline void *memscan(void *s, int c, size_t n) +{ + const void *ret = s + n; + + asm volatile( + " lgr 0,%[c]\n" + "0: srst %[ret],%[s]\n" + " jo 0b\n" + : [ret] "+&a" (ret), [s] "+&a" (s) + : [c] "d" (c) + : "cc", "memory", "0"); + return (void *) ret; +} +#endif + +#ifdef __HAVE_ARCH_STRCAT +static inline char *strcat(char *dst, const char *src) +{ + unsigned long dummy = 0; + char *ret = dst; + + asm volatile( + " lghi 0,0\n" + "0: srst %[dummy],%[dst]\n" + " jo 0b\n" + "1: mvst %[dummy],%[src]\n" + " jo 1b" + : [dummy] "+&a" (dummy), [dst] "+&a" (dst), [src] "+&a" (src) + : + : "cc", "memory", "0"); + return ret; +} +#endif + +#ifdef __HAVE_ARCH_STRCPY +static inline char *strcpy(char *dst, const char *src) +{ + char *ret = dst; + + asm volatile( + " lghi 0,0\n" + "0: mvst %[dst],%[src]\n" + " jo 0b" + : [dst] "+&a" (dst), [src] "+&a" (src) + : + : "cc", "memory", "0"); + return ret; +} +#endif + +#if defined(__HAVE_ARCH_STRLEN) || (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)) +static inline size_t __no_sanitize_prefix_strfunc(strlen)(const char *s) +{ + unsigned long end = 0; + const char *tmp = s; + + asm volatile( + " lghi 0,0\n" + "0: srst %[end],%[tmp]\n" + " jo 0b" + : [end] "+&a" (end), [tmp] "+&a" (tmp) + : + : "cc", "memory", "0"); + return end - (unsigned long)s; +} +#endif + +#ifdef __HAVE_ARCH_STRNLEN +static inline size_t strnlen(const char * s, size_t n) +{ + const char *tmp = s; + const char *end = s + n; + + asm volatile( + " lghi 0,0\n" + "0: srst %[end],%[tmp]\n" + " jo 0b" + : [end] "+&a" (end), [tmp] "+&a" (tmp) + : + : "cc", "memory", "0"); + return end - s; +} +#endif +#else /* IN_ARCH_STRING_C */ +void *memchr(const void * s, int c, size_t n); +void *memscan(void *s, int c, size_t n); +char *strcat(char *dst, const char *src); +char *strcpy(char *dst, const char *src); +size_t strlen(const char *s); +size_t strnlen(const char * s, size_t n); +#endif /* !IN_ARCH_STRING_C */ + +#endif /* __S390_STRING_H_ */ diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h new file mode 100644 index 000000000..c61b2cc1a --- /dev/null +++ b/arch/s390/include/asm/switch_to.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 1999, 2009 + * + * Author(s): Martin Schwidefsky + */ + +#ifndef __ASM_SWITCH_TO_H +#define __ASM_SWITCH_TO_H + +#include +#include +#include +#include + +extern struct task_struct *__switch_to(void *, void *); +extern void update_cr_regs(struct task_struct *task); + +static inline void save_access_regs(unsigned int *acrs) +{ + typedef struct { int _[NUM_ACRS]; } acrstype; + + asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs)); +} + +static inline void restore_access_regs(unsigned int *acrs) +{ + typedef struct { int _[NUM_ACRS]; } acrstype; + + asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs)); +} + +#define switch_to(prev, next, last) do { \ + /* save_fpu_regs() sets the CIF_FPU flag, which enforces \ + * a restore of the floating point / vector registers as \ + * soon as the next task returns to user space \ + */ \ + save_fpu_regs(); \ + save_access_regs(&prev->thread.acrs[0]); \ + save_ri_cb(prev->thread.ri_cb); \ + save_gs_cb(prev->thread.gs_cb); \ + update_cr_regs(next); \ + restore_access_regs(&next->thread.acrs[0]); \ + restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ + restore_gs_cb(next->thread.gs_cb); \ + prev = __switch_to(prev, next); \ +} while (0) + +#endif /* __ASM_SWITCH_TO_H */ diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h new file mode 100644 index 000000000..27e3d804b --- /dev/null +++ b/arch/s390/include/asm/syscall.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Access to user system call parameters and results + * + * Copyright IBM Corp. 2008 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#ifndef _ASM_SYSCALL_H +#define _ASM_SYSCALL_H 1 + +#include +#include +#include +#include + +extern const sys_call_ptr_t sys_call_table[]; +extern const sys_call_ptr_t sys_call_table_emu[]; + +static inline long syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return test_pt_regs_flag(regs, PIF_SYSCALL) ? + (regs->int_code & 0xffff) : -1; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + regs->gprs[2] = regs->orig_gpr2; +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + unsigned long error = regs->gprs[2]; +#ifdef CONFIG_COMPAT + if (test_tsk_thread_flag(task, TIF_31BIT)) { + /* + * Sign-extend the value so (int)-EFOO becomes (long)-EFOO + * and will match correctly in comparisons. + */ + error = (long)(int)error; + } +#endif + return IS_ERR_VALUE(error) ? error : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->gprs[2]; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + set_pt_regs_flag(regs, PIF_SYSCALL_RET_SET); + regs->gprs[2] = error ? error : val; +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + unsigned long mask = -1UL; + unsigned int n = 6; + +#ifdef CONFIG_COMPAT + if (test_tsk_thread_flag(task, TIF_31BIT)) + mask = 0xffffffff; +#endif + while (n-- > 0) + if (n > 0) + args[n] = regs->gprs[2 + n] & mask; + + args[0] = regs->orig_gpr2 & mask; +} + +static inline int syscall_get_arch(struct task_struct *task) +{ +#ifdef CONFIG_COMPAT + if (test_tsk_thread_flag(task, TIF_31BIT)) + return AUDIT_ARCH_S390; +#endif + return AUDIT_ARCH_S390X; +} + +static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) +{ + return false; +} + +#define SYSCALL_FMT_0 +#define SYSCALL_FMT_1 , "0" (r2) +#define SYSCALL_FMT_2 , "d" (r3) SYSCALL_FMT_1 +#define SYSCALL_FMT_3 , "d" (r4) SYSCALL_FMT_2 +#define SYSCALL_FMT_4 , "d" (r5) SYSCALL_FMT_3 +#define SYSCALL_FMT_5 , "d" (r6) SYSCALL_FMT_4 +#define SYSCALL_FMT_6 , "d" (r7) SYSCALL_FMT_5 + +#define SYSCALL_PARM_0 +#define SYSCALL_PARM_1 , long arg1 +#define SYSCALL_PARM_2 SYSCALL_PARM_1, long arg2 +#define SYSCALL_PARM_3 SYSCALL_PARM_2, long arg3 +#define SYSCALL_PARM_4 SYSCALL_PARM_3, long arg4 +#define SYSCALL_PARM_5 SYSCALL_PARM_4, long arg5 +#define SYSCALL_PARM_6 SYSCALL_PARM_5, long arg6 + +#define SYSCALL_REGS_0 +#define SYSCALL_REGS_1 \ + register long r2 asm("2") = arg1 +#define SYSCALL_REGS_2 \ + SYSCALL_REGS_1; \ + register long r3 asm("3") = arg2 +#define SYSCALL_REGS_3 \ + SYSCALL_REGS_2; \ + register long r4 asm("4") = arg3 +#define SYSCALL_REGS_4 \ + SYSCALL_REGS_3; \ + register long r5 asm("5") = arg4 +#define SYSCALL_REGS_5 \ + SYSCALL_REGS_4; \ + register long r6 asm("6") = arg5 +#define SYSCALL_REGS_6 \ + SYSCALL_REGS_5; \ + register long r7 asm("7") = arg6 + +#define GENERATE_SYSCALL_FUNC(nr) \ +static __always_inline \ +long syscall##nr(unsigned long syscall SYSCALL_PARM_##nr) \ +{ \ + register unsigned long r1 asm ("1") = syscall; \ + register long rc asm ("2"); \ + SYSCALL_REGS_##nr; \ + \ + asm volatile ( \ + " svc 0\n" \ + : "=d" (rc) \ + : "d" (r1) SYSCALL_FMT_##nr \ + : "memory"); \ + return rc; \ +} + +GENERATE_SYSCALL_FUNC(0) +GENERATE_SYSCALL_FUNC(1) +GENERATE_SYSCALL_FUNC(2) +GENERATE_SYSCALL_FUNC(3) +GENERATE_SYSCALL_FUNC(4) +GENERATE_SYSCALL_FUNC(5) +GENERATE_SYSCALL_FUNC(6) + +#endif /* _ASM_SYSCALL_H */ diff --git a/arch/s390/include/asm/syscall_wrapper.h b/arch/s390/include/asm/syscall_wrapper.h new file mode 100644 index 000000000..35c1d1b86 --- /dev/null +++ b/arch/s390/include/asm/syscall_wrapper.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * syscall_wrapper.h - s390 specific wrappers to syscall definitions + * + */ + +#ifndef _ASM_S390_SYSCALL_WRAPPER_H +#define _ASM_S390_SYSCALL_WRAPPER_H + +/* Mapping of registers to parameters for syscalls */ +#define SC_S390_REGS_TO_ARGS(x, ...) \ + __MAP(x, __SC_ARGS \ + ,, regs->orig_gpr2,, regs->gprs[3],, regs->gprs[4] \ + ,, regs->gprs[5],, regs->gprs[6],, regs->gprs[7]) + +#ifdef CONFIG_COMPAT + +#define __SC_COMPAT_CAST(t, a) \ +({ \ + long __ReS = a; \ + \ + BUILD_BUG_ON((sizeof(t) > 4) && !__TYPE_IS_L(t) && \ + !__TYPE_IS_UL(t) && !__TYPE_IS_PTR(t) && \ + !__TYPE_IS_LL(t)); \ + if (__TYPE_IS_L(t)) \ + __ReS = (s32)a; \ + if (__TYPE_IS_UL(t)) \ + __ReS = (u32)a; \ + if (__TYPE_IS_PTR(t)) \ + __ReS = a & 0x7fffffff; \ + if (__TYPE_IS_LL(t)) \ + return -ENOSYS; \ + (t)__ReS; \ +}) + +/* + * To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias + * named __s390x_sys_*() + */ +#define COMPAT_SYSCALL_DEFINE0(sname) \ + long __s390_compat_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__s390_compat_sys_##sname, ERRNO); \ + long __s390_compat_sys_##sname(void) + +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + long __s390_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__s390_sys_##sname, ERRNO); \ + long __s390x_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \ + static inline long __do_sys_##sname(void); \ + long __s390_sys_##sname(void) \ + { \ + return __do_sys_##sname(); \ + } \ + long __s390x_sys_##sname(void) \ + { \ + return __do_sys_##sname(); \ + } \ + static inline long __do_sys_##sname(void) + +#define COND_SYSCALL(name) \ + cond_syscall(__s390x_sys_##name); \ + cond_syscall(__s390_sys_##name) + +#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + long __s390_compat_sys##name(struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \ + static inline long __se_compat_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)); \ + static inline long __do_compat_sys##name(__MAP(x, __SC_DECL, __VA_ARGS__)); \ + long __s390_compat_sys##name(struct pt_regs *regs) \ + { \ + return __se_compat_sys##name(SC_S390_REGS_TO_ARGS(x, __VA_ARGS__)); \ + } \ + static inline long __se_compat_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)) \ + { \ + __MAP(x, __SC_TEST, __VA_ARGS__); \ + return __do_compat_sys##name(__MAP(x, __SC_DELOUSE, __VA_ARGS__)); \ + } \ + static inline long __do_compat_sys##name(__MAP(x, __SC_DECL, __VA_ARGS__)) + +/* + * As some compat syscalls may not be implemented, we need to expand + * COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well. + */ +#define COND_SYSCALL_COMPAT(name) \ + cond_syscall(__s390_compat_sys_##name) + +#define __S390_SYS_STUBx(x, name, ...) \ + long __s390_sys##name(struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \ + static inline long ___se_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)); \ + long __s390_sys##name(struct pt_regs *regs) \ + { \ + return ___se_sys##name(SC_S390_REGS_TO_ARGS(x, __VA_ARGS__)); \ + } \ + static inline long ___se_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)) \ + { \ + __MAP(x, __SC_TEST, __VA_ARGS__); \ + return __do_sys##name(__MAP(x, __SC_COMPAT_CAST, __VA_ARGS__)); \ + } + +#else /* CONFIG_COMPAT */ + +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + long __s390x_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \ + static inline long __do_sys_##sname(void); \ + long __s390x_sys_##sname(void) \ + { \ + return __do_sys_##sname(); \ + } \ + static inline long __do_sys_##sname(void) + +#define COND_SYSCALL(name) \ + cond_syscall(__s390x_sys_##name) + +#define __S390_SYS_STUBx(x, fullname, name, ...) + +#endif /* CONFIG_COMPAT */ + +#define __SYSCALL_DEFINEx(x, name, ...) \ + long __s390x_sys##name(struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \ + static inline long __se_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)); \ + static inline long __do_sys##name(__MAP(x, __SC_DECL, __VA_ARGS__)); \ + __S390_SYS_STUBx(x, name, __VA_ARGS__); \ + long __s390x_sys##name(struct pt_regs *regs) \ + { \ + return __se_sys##name(SC_S390_REGS_TO_ARGS(x, __VA_ARGS__)); \ + } \ + static inline long __se_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)) \ + { \ + __MAP(x, __SC_TEST, __VA_ARGS__); \ + return __do_sys##name(__MAP(x, __SC_CAST, __VA_ARGS__)); \ + } \ + static inline long __do_sys##name(__MAP(x, __SC_DECL, __VA_ARGS__)) + +#endif /* _ASM_S390_SYSCALL_WRAPPER_H */ diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h new file mode 100644 index 000000000..ab1c63160 --- /dev/null +++ b/arch/s390/include/asm/sysinfo.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * definition for store system information stsi + * + * Copyright IBM Corp. 2001, 2008 + * + * Author(s): Ulrich Weigand + * Christian Borntraeger + */ + +#ifndef __ASM_S390_SYSINFO_H +#define __ASM_S390_SYSINFO_H + +#include +#include + +struct sysinfo_1_1_1 { + unsigned char p:1; + unsigned char :6; + unsigned char t:1; + unsigned char :8; + unsigned char ccr; + unsigned char cai; + char reserved_0[20]; + unsigned long lic; + char manufacturer[16]; + char type[4]; + char reserved_1[12]; + char model_capacity[16]; + char sequence[16]; + char plant[4]; + char model[16]; + char model_perm_cap[16]; + char model_temp_cap[16]; + unsigned int model_cap_rating; + unsigned int model_perm_cap_rating; + unsigned int model_temp_cap_rating; + unsigned char typepct[5]; + unsigned char reserved_2[3]; + unsigned int ncr; + unsigned int npr; + unsigned int ntr; +}; + +struct sysinfo_1_2_1 { + char reserved_0[80]; + char sequence[16]; + char plant[4]; + char reserved_1[2]; + unsigned short cpu_address; +}; + +struct sysinfo_1_2_2 { + char format; + char reserved_0[1]; + unsigned short acc_offset; + unsigned char mt_installed :1; + unsigned char :2; + unsigned char mt_stid :5; + unsigned char :3; + unsigned char mt_gtid :5; + char reserved_1[18]; + unsigned int nominal_cap; + unsigned int secondary_cap; + unsigned int capability; + unsigned short cpus_total; + unsigned short cpus_configured; + unsigned short cpus_standby; + unsigned short cpus_reserved; + unsigned short adjustment[]; +}; + +struct sysinfo_1_2_2_extension { + unsigned int alt_capability; + unsigned short alt_adjustment[]; +}; + +struct sysinfo_2_2_1 { + char reserved_0[80]; + char sequence[16]; + char plant[4]; + unsigned short cpu_id; + unsigned short cpu_address; +}; + +struct sysinfo_2_2_2 { + char reserved_0[32]; + unsigned short lpar_number; + char reserved_1; + unsigned char characteristics; + unsigned short cpus_total; + unsigned short cpus_configured; + unsigned short cpus_standby; + unsigned short cpus_reserved; + char name[8]; + unsigned int caf; + char reserved_2[8]; + unsigned char mt_installed :1; + unsigned char :2; + unsigned char mt_stid :5; + unsigned char :3; + unsigned char mt_gtid :5; + unsigned char :3; + unsigned char mt_psmtid :5; + char reserved_3[5]; + unsigned short cpus_dedicated; + unsigned short cpus_shared; + char reserved_4[3]; + unsigned char vsne; + uuid_t uuid; + char reserved_5[160]; + char ext_name[256]; +}; + +#define LPAR_CHAR_DEDICATED (1 << 7) +#define LPAR_CHAR_SHARED (1 << 6) +#define LPAR_CHAR_LIMITED (1 << 5) + +struct sysinfo_3_2_2 { + char reserved_0[31]; + unsigned char :4; + unsigned char count:4; + struct { + char reserved_0[4]; + unsigned short cpus_total; + unsigned short cpus_configured; + unsigned short cpus_standby; + unsigned short cpus_reserved; + char name[8]; + unsigned int caf; + char cpi[16]; + char reserved_1[3]; + unsigned char evmne; + unsigned int reserved_2; + uuid_t uuid; + } vm[8]; + char reserved_3[1504]; + char ext_names[8][256]; +}; + +extern int topology_max_mnest; + +/* + * Returns the maximum nesting level supported by the cpu topology code. + * The current maximum level is 4 which is the drawer level. + */ +static inline unsigned char topology_mnest_limit(void) +{ + return min(topology_max_mnest, 4); +} + +#define TOPOLOGY_NR_MAG 6 + +struct topology_core { + unsigned char nl; + unsigned char reserved0[3]; + unsigned char :5; + unsigned char d:1; + unsigned char pp:2; + unsigned char reserved1; + unsigned short origin; + unsigned long mask; +}; + +struct topology_container { + unsigned char nl; + unsigned char reserved[6]; + unsigned char id; +}; + +union topology_entry { + unsigned char nl; + struct topology_core cpu; + struct topology_container container; +}; + +struct sysinfo_15_1_x { + unsigned char reserved0[2]; + unsigned short length; + unsigned char mag[TOPOLOGY_NR_MAG]; + unsigned char reserved1; + unsigned char mnest; + unsigned char reserved2[4]; + union topology_entry tle[]; +}; + +int stsi(void *sysinfo, int fc, int sel1, int sel2); + +/* + * Service level reporting interface. + */ +struct service_level { + struct list_head list; + void (*seq_print)(struct seq_file *, struct service_level *); +}; + +int register_service_level(struct service_level *); +int unregister_service_level(struct service_level *); + +int sthyi_fill(void *dst, u64 *rc); +#endif /* __ASM_S390_SYSINFO_H */ diff --git a/arch/s390/include/asm/text-patching.h b/arch/s390/include/asm/text-patching.h new file mode 100644 index 000000000..b219056a8 --- /dev/null +++ b/arch/s390/include/asm/text-patching.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_S390_TEXT_PATCHING_H +#define _ASM_S390_TEXT_PATCHING_H + +#include + +static __always_inline void sync_core(void) +{ + bcr_serialize(); +} + +void text_poke_sync(void); +void text_poke_sync_lock(void); + +#endif /* _ASM_S390_TEXT_PATCHING_H */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h new file mode 100644 index 000000000..a674c7d25 --- /dev/null +++ b/arch/s390/include/asm/thread_info.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 2002, 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#ifndef _ASM_THREAD_INFO_H +#define _ASM_THREAD_INFO_H + +#include +#ifndef ASM_OFFSETS_C +#include +#endif + +/* + * General size of kernel stacks + */ +#ifdef CONFIG_KASAN +#define THREAD_SIZE_ORDER 4 +#else +#define THREAD_SIZE_ORDER 2 +#endif +#define BOOT_STACK_SIZE (PAGE_SIZE << 2) +#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) + +#define STACK_INIT_OFFSET (THREAD_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) + +#ifndef __ASSEMBLY__ +#include +#include + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - this struct shares the supervisor stack pages + * - if the contents of this structure are changed, the assembly constants must also be changed + */ +struct thread_info { + unsigned long flags; /* low level flags */ + unsigned long syscall_work; /* SYSCALL_WORK_ flags */ + unsigned int cpu; /* current CPU */ +}; + +/* + * macros/functions for gaining access to the thread information structure + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .flags = 0, \ +} + +struct task_struct; + +void arch_setup_new_exec(void); +#define arch_setup_new_exec arch_setup_new_exec + +#endif + +/* + * thread information flags bit numbers + */ +/* _TIF_WORK bits */ +#define TIF_NOTIFY_RESUME 0 /* callback before returning to user */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_UPROBE 3 /* breakpointed or single-stepping */ +#define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */ +#define TIF_PATCH_PENDING 5 /* pending live patching update */ +#define TIF_PGSTE 6 /* New mm's will use 4K page tables */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ +#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ +#define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */ + +#define TIF_31BIT 16 /* 32bit process */ +#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ +#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ +#define TIF_SINGLE_STEP 19 /* This task is single stepped */ +#define TIF_BLOCK_STEP 20 /* This task is block stepped */ +#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */ + +/* _TIF_TRACE bits */ +#define TIF_SYSCALL_TRACE 24 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 25 /* syscall auditing active */ +#define TIF_SECCOMP 26 /* secure computing */ +#define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */ + +#define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME) +#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL) +#define _TIF_SIGPENDING BIT(TIF_SIGPENDING) +#define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED) +#define _TIF_UPROBE BIT(TIF_UPROBE) +#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE) +#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING) +#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST) +#define _TIF_PER_TRAP BIT(TIF_PER_TRAP) + +#define _TIF_31BIT BIT(TIF_31BIT) +#define _TIF_SINGLE_STEP BIT(TIF_SINGLE_STEP) + +#define _TIF_SYSCALL_TRACE BIT(TIF_SYSCALL_TRACE) +#define _TIF_SYSCALL_AUDIT BIT(TIF_SYSCALL_AUDIT) +#define _TIF_SECCOMP BIT(TIF_SECCOMP) +#define _TIF_SYSCALL_TRACEPOINT BIT(TIF_SYSCALL_TRACEPOINT) + +#endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h new file mode 100644 index 000000000..4d646659a --- /dev/null +++ b/arch/s390/include/asm/timex.h @@ -0,0 +1,281 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999 + * + * Derived from "include/asm-i386/timex.h" + * Copyright (C) 1992, Linus Torvalds + */ + +#ifndef _ASM_S390_TIMEX_H +#define _ASM_S390_TIMEX_H + +#include +#include +#include + +/* The value of the TOD clock for 1.1.1970. */ +#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL + +extern u64 clock_comparator_max; + +union tod_clock { + __uint128_t val; + struct { + __uint128_t ei : 8; /* epoch index */ + __uint128_t tod : 64; /* bits 0-63 of tod clock */ + __uint128_t : 40; + __uint128_t pf : 16; /* programmable field */ + }; + struct { + __uint128_t eitod : 72; /* epoch index + bits 0-63 tod clock */ + __uint128_t : 56; + }; + struct { + __uint128_t us : 60; /* micro-seconds */ + __uint128_t sus : 12; /* sub-microseconds */ + __uint128_t : 56; + }; +} __packed; + +/* Inline functions for clock register access. */ +static inline int set_tod_clock(__u64 time) +{ + int cc; + + asm volatile( + " sck %1\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) : "Q" (time) : "cc"); + return cc; +} + +static inline int store_tod_clock_ext_cc(union tod_clock *clk) +{ + int cc; + + asm volatile( + " stcke %1\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc), "=Q" (*clk) : : "cc"); + return cc; +} + +static __always_inline void store_tod_clock_ext(union tod_clock *tod) +{ + asm volatile("stcke %0" : "=Q" (*tod) : : "cc"); +} + +static inline void set_clock_comparator(__u64 time) +{ + asm volatile("sckc %0" : : "Q" (time)); +} + +static inline void set_tod_programmable_field(u16 val) +{ + asm volatile( + " lgr 0,%[val]\n" + " sckpf\n" + : + : [val] "d" ((unsigned long)val) + : "0"); +} + +void clock_comparator_work(void); + +void __init time_early_init(void); + +extern unsigned char ptff_function_mask[16]; + +/* Function codes for the ptff instruction. */ +#define PTFF_QAF 0x00 /* query available functions */ +#define PTFF_QTO 0x01 /* query tod offset */ +#define PTFF_QSI 0x02 /* query steering information */ +#define PTFF_QUI 0x04 /* query UTC information */ +#define PTFF_ATO 0x40 /* adjust tod offset */ +#define PTFF_STO 0x41 /* set tod offset */ +#define PTFF_SFS 0x42 /* set fine steering rate */ +#define PTFF_SGS 0x43 /* set gross steering rate */ + +/* Query TOD offset result */ +struct ptff_qto { + unsigned long physical_clock; + unsigned long tod_offset; + unsigned long logical_tod_offset; + unsigned long tod_epoch_difference; +} __packed; + +static inline int ptff_query(unsigned int nr) +{ + unsigned char *ptr; + + ptr = ptff_function_mask + (nr >> 3); + return (*ptr & (0x80 >> (nr & 7))) != 0; +} + +/* Query UTC information result */ +struct ptff_qui { + unsigned int tm : 2; + unsigned int ts : 2; + unsigned int : 28; + unsigned int pad_0x04; + unsigned long leap_event; + short old_leap; + short new_leap; + unsigned int pad_0x14; + unsigned long prt[5]; + unsigned long cst[3]; + unsigned int skew; + unsigned int pad_0x5c[41]; +} __packed; + +/* + * ptff - Perform timing facility function + * @ptff_block: Pointer to ptff parameter block + * @len: Length of parameter block + * @func: Function code + * Returns: Condition code (0 on success) + */ +#define ptff(ptff_block, len, func) \ +({ \ + struct addrtype { char _[len]; }; \ + unsigned int reg0 = func; \ + unsigned long reg1 = (unsigned long)(ptff_block); \ + int rc; \ + \ + asm volatile( \ + " lgr 0,%[reg0]\n" \ + " lgr 1,%[reg1]\n" \ + " ptff\n" \ + " ipm %[rc]\n" \ + " srl %[rc],28\n" \ + : [rc] "=&d" (rc), "+m" (*(struct addrtype *)reg1) \ + : [reg0] "d" (reg0), [reg1] "d" (reg1) \ + : "cc", "0", "1"); \ + rc; \ +}) + +static inline unsigned long local_tick_disable(void) +{ + unsigned long old; + + old = S390_lowcore.clock_comparator; + S390_lowcore.clock_comparator = clock_comparator_max; + set_clock_comparator(S390_lowcore.clock_comparator); + return old; +} + +static inline void local_tick_enable(unsigned long comp) +{ + S390_lowcore.clock_comparator = comp; + set_clock_comparator(S390_lowcore.clock_comparator); +} + +#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ + +typedef unsigned long cycles_t; + +static __always_inline unsigned long get_tod_clock(void) +{ + union tod_clock clk; + + store_tod_clock_ext(&clk); + return clk.tod; +} + +static inline unsigned long get_tod_clock_fast(void) +{ + unsigned long clk; + + asm volatile("stckf %0" : "=Q" (clk) : : "cc"); + return clk; +} + +static inline cycles_t get_cycles(void) +{ + return (cycles_t) get_tod_clock() >> 2; +} +#define get_cycles get_cycles + +int get_phys_clock(unsigned long *clock); +void init_cpu_timer(void); + +extern union tod_clock tod_clock_base; + +static __always_inline unsigned long __get_tod_clock_monotonic(void) +{ + return get_tod_clock() - tod_clock_base.tod; +} + +/** + * get_clock_monotonic - returns current time in clock rate units + * + * The clock and tod_clock_base get changed via stop_machine. + * Therefore preemption must be disabled, otherwise the returned + * value is not guaranteed to be monotonic. + */ +static inline unsigned long get_tod_clock_monotonic(void) +{ + unsigned long tod; + + preempt_disable_notrace(); + tod = __get_tod_clock_monotonic(); + preempt_enable_notrace(); + return tod; +} + +/** + * tod_to_ns - convert a TOD format value to nanoseconds + * @todval: to be converted TOD format value + * Returns: number of nanoseconds that correspond to the TOD format value + * + * Converting a 64 Bit TOD format value to nanoseconds means that the value + * must be divided by 4.096. In order to achieve that we multiply with 125 + * and divide by 512: + * + * ns = (todval * 125) >> 9; + * + * In order to avoid an overflow with the multiplication we can rewrite this. + * With a split todval == 2^9 * th + tl (th upper 55 bits, tl lower 9 bits) + * we end up with + * + * ns = ((2^9 * th + tl) * 125 ) >> 9; + * -> ns = (th * 125) + ((tl * 125) >> 9); + * + */ +static __always_inline unsigned long tod_to_ns(unsigned long todval) +{ + return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9); +} + +/** + * tod_after - compare two 64 bit TOD values + * @a: first 64 bit TOD timestamp + * @b: second 64 bit TOD timestamp + * + * Returns: true if a is later than b + */ +static inline int tod_after(unsigned long a, unsigned long b) +{ + if (MACHINE_HAS_SCC) + return (long) a > (long) b; + return a > b; +} + +/** + * tod_after_eq - compare two 64 bit TOD values + * @a: first 64 bit TOD timestamp + * @b: second 64 bit TOD timestamp + * + * Returns: true if a is later than b + */ +static inline int tod_after_eq(unsigned long a, unsigned long b) +{ + if (MACHINE_HAS_SCC) + return (long) a >= (long) b; + return a >= b; +} + +#endif diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h new file mode 100644 index 000000000..383b1f914 --- /dev/null +++ b/arch/s390/include/asm/tlb.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _S390_TLB_H +#define _S390_TLB_H + +/* + * TLB flushing on s390 is complicated. The following requirement + * from the principles of operation is the most arduous: + * + * "A valid table entry must not be changed while it is attached + * to any CPU and may be used for translation by that CPU except to + * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY, + * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page + * table entry, or (3) make a change by means of a COMPARE AND SWAP + * AND PURGE instruction that purges the TLB." + * + * The modification of a pte of an active mm struct therefore is + * a two step process: i) invalidate the pte, ii) store the new pte. + * This is true for the page protection bit as well. + * The only possible optimization is to flush at the beginning of + * a tlb_gather_mmu cycle if the mm_struct is currently not in use. + * + * Pages used for the page tables is a different story. FIXME: more + */ + +void __tlb_remove_table(void *_table); +static inline void tlb_flush(struct mmu_gather *tlb); +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct encoded_page *page, + int page_size); + +#define tlb_flush tlb_flush +#define pte_free_tlb pte_free_tlb +#define pmd_free_tlb pmd_free_tlb +#define p4d_free_tlb p4d_free_tlb +#define pud_free_tlb pud_free_tlb + +#include +#include + +/* + * Release the page cache reference for a pte removed by + * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page + * has already been freed, so just do free_page_and_swap_cache. + * + * s390 doesn't delay rmap removal, so there is nothing encoded in + * the page pointer. + */ +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct encoded_page *page, + int page_size) +{ + free_page_and_swap_cache(encoded_page_ptr(page)); + return false; +} + +static inline void tlb_flush(struct mmu_gather *tlb) +{ + __tlb_flush_mm_lazy(tlb->mm); +} + +/* + * pte_free_tlb frees a pte table and clears the CRSTE for the + * page table from the tlb. + */ +static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, + unsigned long address) +{ + __tlb_adjust_range(tlb, address, PAGE_SIZE); + tlb->mm->context.flush_mm = 1; + tlb->freed_tables = 1; + tlb->cleared_pmds = 1; + /* + * page_table_free_rcu takes care of the allocation bit masks + * of the 2K table fragments in the 4K page table page, + * then calls tlb_remove_table. + */ + page_table_free_rcu(tlb, (unsigned long *) pte, address); +} + +/* + * pmd_free_tlb frees a pmd table and clears the CRSTE for the + * segment table entry from the tlb. + * If the mm uses a two level page table the single pmd is freed + * as the pgd. pmd_free_tlb checks the asce_limit against 2GB + * to avoid the double free of the pmd in this case. + */ +static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long address) +{ + if (mm_pmd_folded(tlb->mm)) + return; + pagetable_pmd_dtor(virt_to_ptdesc(pmd)); + __tlb_adjust_range(tlb, address, PAGE_SIZE); + tlb->mm->context.flush_mm = 1; + tlb->freed_tables = 1; + tlb->cleared_puds = 1; + tlb_remove_ptdesc(tlb, pmd); +} + +/* + * p4d_free_tlb frees a pud table and clears the CRSTE for the + * region second table entry from the tlb. + * If the mm uses a four level page table the single p4d is freed + * as the pgd. p4d_free_tlb checks the asce_limit against 8PB + * to avoid the double free of the p4d in this case. + */ +static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, + unsigned long address) +{ + if (mm_p4d_folded(tlb->mm)) + return; + __tlb_adjust_range(tlb, address, PAGE_SIZE); + tlb->mm->context.flush_mm = 1; + tlb->freed_tables = 1; + tlb_remove_table(tlb, p4d); +} + +/* + * pud_free_tlb frees a pud table and clears the CRSTE for the + * region third table entry from the tlb. + * If the mm uses a three level page table the single pud is freed + * as the pgd. pud_free_tlb checks the asce_limit against 4TB + * to avoid the double free of the pud in this case. + */ +static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, + unsigned long address) +{ + if (mm_pud_folded(tlb->mm)) + return; + tlb->mm->context.flush_mm = 1; + tlb->freed_tables = 1; + tlb->cleared_p4ds = 1; + tlb_remove_table(tlb, pud); +} + + +#endif /* _S390_TLB_H */ diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h new file mode 100644 index 000000000..a6e2cd89b --- /dev/null +++ b/arch/s390/include/asm/tlbflush.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _S390_TLBFLUSH_H +#define _S390_TLBFLUSH_H + +#include +#include +#include + +/* + * Flush all TLB entries on the local CPU. + */ +static inline void __tlb_flush_local(void) +{ + asm volatile("ptlb" : : : "memory"); +} + +/* + * Flush TLB entries for a specific ASCE on all CPUs + */ +static inline void __tlb_flush_idte(unsigned long asce) +{ + unsigned long opt; + + opt = IDTE_PTOA; + if (MACHINE_HAS_TLB_GUEST) + opt |= IDTE_GUEST_ASCE; + /* Global TLB flush for the mm */ + asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc"); +} + +/* + * Flush all TLB entries on all CPUs. + */ +static inline void __tlb_flush_global(void) +{ + unsigned int dummy = 0; + + csp(&dummy, 0, 0); +} + +/* + * Flush TLB entries for a specific mm on all CPUs (in case gmap is used + * this implicates multiple ASCEs!). + */ +static inline void __tlb_flush_mm(struct mm_struct *mm) +{ + unsigned long gmap_asce; + + /* + * If the machine has IDTE we prefer to do a per mm flush + * on all cpus instead of doing a local flush if the mm + * only ran on the local cpu. + */ + preempt_disable(); + atomic_inc(&mm->context.flush_count); + /* Reset TLB flush mask */ + cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); + barrier(); + gmap_asce = READ_ONCE(mm->context.gmap_asce); + if (MACHINE_HAS_IDTE && gmap_asce != -1UL) { + if (gmap_asce) + __tlb_flush_idte(gmap_asce); + __tlb_flush_idte(mm->context.asce); + } else { + /* Global TLB flush */ + __tlb_flush_global(); + } + atomic_dec(&mm->context.flush_count); + preempt_enable(); +} + +static inline void __tlb_flush_kernel(void) +{ + if (MACHINE_HAS_IDTE) + __tlb_flush_idte(init_mm.context.asce); + else + __tlb_flush_global(); +} + +static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) +{ + spin_lock(&mm->context.lock); + if (mm->context.flush_mm) { + mm->context.flush_mm = 0; + __tlb_flush_mm(mm); + } + spin_unlock(&mm->context.lock); +} + +/* + * TLB flushing: + * flush_tlb() - flushes the current mm struct TLBs + * flush_tlb_all() - flushes all processes TLBs + * flush_tlb_mm(mm) - flushes the specified mm context TLB's + * flush_tlb_page(vma, vmaddr) - flushes one page + * flush_tlb_range(vma, start, end) - flushes a range of pages + * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages + */ + +/* + * flush_tlb_mm goes together with ptep_set_wrprotect for the + * copy_page_range operation and flush_tlb_range is related to + * ptep_get_and_clear for change_protection. ptep_set_wrprotect and + * ptep_get_and_clear do not flush the TLBs directly if the mm has + * only one user. At the end of the update the flush_tlb_mm and + * flush_tlb_range functions need to do the flush. + */ +#define flush_tlb() do { } while (0) +#define flush_tlb_all() do { } while (0) +#define flush_tlb_page(vma, addr) do { } while (0) + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + __tlb_flush_mm_lazy(mm); +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + __tlb_flush_mm_lazy(vma->vm_mm); +} + +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + __tlb_flush_kernel(); +} + +#endif /* _S390_TLBFLUSH_H */ diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h new file mode 100644 index 000000000..3a0ac0c7a --- /dev/null +++ b/arch/s390/include/asm/topology.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_TOPOLOGY_H +#define _ASM_S390_TOPOLOGY_H + +#include +#include + +struct sysinfo_15_1_x; +struct cpu; + +#ifdef CONFIG_SCHED_TOPOLOGY + +struct cpu_topology_s390 { + unsigned short thread_id; + unsigned short core_id; + unsigned short socket_id; + unsigned short book_id; + unsigned short drawer_id; + unsigned short dedicated : 1; + int booted_cores; + cpumask_t thread_mask; + cpumask_t core_mask; + cpumask_t book_mask; + cpumask_t drawer_mask; +}; + +extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) +#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) +#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask) +#define topology_book_id(cpu) (cpu_topology[cpu].book_id) +#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask) +#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id) +#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask) +#define topology_cpu_dedicated(cpu) (cpu_topology[cpu].dedicated) +#define topology_booted_cores(cpu) (cpu_topology[cpu].booted_cores) + +#define mc_capable() 1 + +void topology_init_early(void); +int topology_cpu_init(struct cpu *); +int topology_set_cpu_management(int fc); +void topology_schedule_update(void); +void store_topology(struct sysinfo_15_1_x *info); +void update_cpu_masks(void); +void topology_expect_change(void); +const struct cpumask *cpu_coregroup_mask(int cpu); + +#else /* CONFIG_SCHED_TOPOLOGY */ + +static inline void topology_init_early(void) { } +static inline void topology_schedule_update(void) { } +static inline int topology_cpu_init(struct cpu *cpu) { return 0; } +static inline int topology_cpu_dedicated(int cpu_nr) { return 0; } +static inline int topology_booted_cores(int cpu_nr) { return 1; } +static inline void update_cpu_masks(void) { } +static inline void topology_expect_change(void) { } + +#endif /* CONFIG_SCHED_TOPOLOGY */ + +#define POLARIZATION_UNKNOWN (-1) +#define POLARIZATION_HRZ (0) +#define POLARIZATION_VL (1) +#define POLARIZATION_VM (2) +#define POLARIZATION_VH (3) + +#define SD_BOOK_INIT SD_CPU_INIT + +#ifdef CONFIG_NUMA + +#define cpu_to_node cpu_to_node +static inline int cpu_to_node(int cpu) +{ + return 0; +} + +/* Returns a pointer to the cpumask of CPUs on node 'node'. */ +#define cpumask_of_node cpumask_of_node +static inline const struct cpumask *cpumask_of_node(int node) +{ + return cpu_possible_mask; +} + +#define pcibus_to_node(bus) __pcibus_to_node(bus) + +#else /* !CONFIG_NUMA */ + +#define numa_node_id numa_node_id +static inline int numa_node_id(void) +{ + return 0; +} + +#endif /* CONFIG_NUMA */ + +#include + +#endif /* _ASM_S390_TOPOLOGY_H */ diff --git a/arch/s390/include/asm/tpi.h b/arch/s390/include/asm/tpi.h new file mode 100644 index 000000000..f76e5fdff --- /dev/null +++ b/arch/s390/include/asm/tpi.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_S390_TPI_H +#define _ASM_S390_TPI_H + +#include +#include + +#ifndef __ASSEMBLY__ + +/* I/O-Interruption Code as stored by TEST PENDING INTERRUPTION (TPI). */ +struct tpi_info { + struct subchannel_id schid; + u32 intparm; + u32 adapter_IO:1; + u32 directed_irq:1; + u32 isc:3; + u32 :12; + u32 type:3; + u32 :12; +} __packed __aligned(4); + +/* I/O-Interruption Code as stored by TPI for an Adapter I/O */ +struct tpi_adapter_info { + u32 aism:8; + u32 :22; + u32 error:1; + u32 forward:1; + u32 reserved; + u32 adapter_IO:1; + u32 directed_irq:1; + u32 isc:3; + u32 :27; +} __packed __aligned(4); + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_S390_TPI_H */ diff --git a/arch/s390/include/asm/trace/diag.h b/arch/s390/include/asm/trace/diag.h new file mode 100644 index 000000000..22fcac4ff --- /dev/null +++ b/arch/s390/include/asm/trace/diag.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Tracepoint header for s390 diagnose calls + * + * Copyright IBM Corp. 2015 + * Author(s): Martin Schwidefsky + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM s390 + +#if !defined(_TRACE_S390_DIAG_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_S390_DIAG_H + +#include + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH asm/trace +#define TRACE_INCLUDE_FILE diag + +TRACE_EVENT(s390_diagnose, + TP_PROTO(unsigned short nr), + TP_ARGS(nr), + TP_STRUCT__entry( + __field(unsigned short, nr) + ), + TP_fast_assign( + __entry->nr = nr; + ), + TP_printk("nr=0x%x", __entry->nr) +); + +#ifdef CONFIG_TRACEPOINTS +void trace_s390_diagnose_norecursion(int diag_nr); +#else +static inline void trace_s390_diagnose_norecursion(int diag_nr) { } +#endif + +#endif /* _TRACE_S390_DIAG_H */ + +/* This part must be outside protection */ +#include diff --git a/arch/s390/include/asm/trace/zcrypt.h b/arch/s390/include/asm/trace/zcrypt.h new file mode 100644 index 000000000..457ddaa99 --- /dev/null +++ b/arch/s390/include/asm/trace/zcrypt.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Tracepoint definitions for the s390 zcrypt device driver + * + * Copyright IBM Corp. 2016 + * Author(s): Harald Freudenberger + * + * Currently there are two tracepoint events defined here. + * An s390_zcrypt_req request event occurs as soon as the request is + * recognized by the zcrypt ioctl function. This event may act as some kind + * of request-processing-starts-now indication. + * As late as possible within the zcrypt ioctl function there occurs the + * s390_zcrypt_rep event which may act as the point in time where the + * request has been processed by the kernel and the result is about to be + * transferred back to userspace. + * The glue which binds together request and reply event is the ptr + * parameter, which is the local buffer address where the request from + * userspace has been stored by the ioctl function. + * + * The main purpose of this zcrypt tracepoint api is to get some data for + * performance measurements together with information about on which card + * and queue the request has been processed. It is not an ffdc interface as + * there is already code in the zcrypt device driver to serve the s390 + * debug feature interface. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM s390 + +#if !defined(_TRACE_S390_ZCRYPT_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_S390_ZCRYPT_H + +#include + +#define TP_ICARSAMODEXPO 0x0001 +#define TP_ICARSACRT 0x0002 +#define TB_ZSECSENDCPRB 0x0003 +#define TP_ZSENDEP11CPRB 0x0004 +#define TP_HWRNGCPRB 0x0005 + +#define show_zcrypt_tp_type(type) \ + __print_symbolic(type, \ + { TP_ICARSAMODEXPO, "ICARSAMODEXPO" }, \ + { TP_ICARSACRT, "ICARSACRT" }, \ + { TB_ZSECSENDCPRB, "ZSECSENDCPRB" }, \ + { TP_ZSENDEP11CPRB, "ZSENDEP11CPRB" }, \ + { TP_HWRNGCPRB, "HWRNGCPRB" }) + +/** + * trace_s390_zcrypt_req - zcrypt request tracepoint function + * @ptr: Address of the local buffer where the request from userspace + * is stored. Can be used as a unique id to relate together + * request and reply. + * @type: One of the TP_ defines above. + * + * Called when a request from userspace is recognised within the ioctl + * function of the zcrypt device driver and may act as an entry + * timestamp. + */ +TRACE_EVENT(s390_zcrypt_req, + TP_PROTO(void *ptr, u32 type), + TP_ARGS(ptr, type), + TP_STRUCT__entry( + __field(void *, ptr) + __field(u32, type)), + TP_fast_assign( + __entry->ptr = ptr; + __entry->type = type;), + TP_printk("ptr=%p type=%s", + __entry->ptr, + show_zcrypt_tp_type(__entry->type)) +); + +/** + * trace_s390_zcrypt_rep - zcrypt reply tracepoint function + * @ptr: Address of the local buffer where the request from userspace + * is stored. Can be used as a unique id to match together + * request and reply. + * @fc: Function code. + * @rc: The bare returncode as returned by the device driver ioctl + * function. + * @dev: The adapter nr where this request was actually processed. + * @dom: Domain id of the device where this request was processed. + * + * Called upon recognising the reply from the crypto adapter. This + * message may act as the exit timestamp for the request but also + * carries some info about on which adapter the request was processed + * and the returncode from the device driver. + */ +TRACE_EVENT(s390_zcrypt_rep, + TP_PROTO(void *ptr, u32 fc, u32 rc, u16 dev, u16 dom), + TP_ARGS(ptr, fc, rc, dev, dom), + TP_STRUCT__entry( + __field(void *, ptr) + __field(u32, fc) + __field(u32, rc) + __field(u16, device) + __field(u16, domain)), + TP_fast_assign( + __entry->ptr = ptr; + __entry->fc = fc; + __entry->rc = rc; + __entry->device = dev; + __entry->domain = dom;), + TP_printk("ptr=%p fc=0x%04x rc=%d dev=0x%02hx domain=0x%04hx", + __entry->ptr, + (unsigned int) __entry->fc, + (int) __entry->rc, + (unsigned short) __entry->device, + (unsigned short) __entry->domain) +); + +#endif /* _TRACE_S390_ZCRYPT_H */ + +/* This part must be outside protection */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH asm/trace +#define TRACE_INCLUDE_FILE zcrypt + +#include diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h new file mode 100644 index 000000000..0b5d550a0 --- /dev/null +++ b/arch/s390/include/asm/types.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _ASM_S390_TYPES_H +#define _ASM_S390_TYPES_H + +#include + +#ifndef __ASSEMBLY__ + +union register_pair { + unsigned __int128 pair; + struct { + unsigned long even; + unsigned long odd; + }; +}; + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_S390_TYPES_H */ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h new file mode 100644 index 000000000..8a8c64a67 --- /dev/null +++ b/arch/s390/include/asm/uaccess.h @@ -0,0 +1,601 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999, 2000 + * Author(s): Hartmut Penner (hp@de.ibm.com), + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * Derived from "include/asm-i386/uaccess.h" + */ +#ifndef __S390_UACCESS_H +#define __S390_UACCESS_H + +/* + * User space memory access functions + */ +#include +#include +#include +#include +#include +#include + +void debug_user_asce(int exit); + +unsigned long __must_check +raw_copy_from_user(void *to, const void __user *from, unsigned long n); + +unsigned long __must_check +raw_copy_to_user(void __user *to, const void *from, unsigned long n); + +#ifndef CONFIG_KASAN +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER +#endif + +unsigned long __must_check +_copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key); + +static __always_inline unsigned long __must_check +copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key) +{ + if (check_copy_size(to, n, false)) + n = _copy_from_user_key(to, from, n, key); + return n; +} + +unsigned long __must_check +_copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key); + +static __always_inline unsigned long __must_check +copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key) +{ + if (check_copy_size(from, n, true)) + n = _copy_to_user_key(to, from, n, key); + return n; +} + +union oac { + unsigned int val; + struct { + struct { + unsigned short key : 4; + unsigned short : 4; + unsigned short as : 2; + unsigned short : 4; + unsigned short k : 1; + unsigned short a : 1; + } oac1; + struct { + unsigned short key : 4; + unsigned short : 4; + unsigned short as : 2; + unsigned short : 4; + unsigned short k : 1; + unsigned short a : 1; + } oac2; + }; +}; + +int __noreturn __put_user_bad(void); + +#define __put_user_asm(to, from, size) \ +({ \ + union oac __oac_spec = { \ + .oac1.as = PSW_BITS_AS_SECONDARY, \ + .oac1.a = 1, \ + }; \ + int __rc; \ + \ + asm volatile( \ + " lr 0,%[spec]\n" \ + "0: mvcos %[_to],%[_from],%[_size]\n" \ + "1: xr %[rc],%[rc]\n" \ + "2:\n" \ + EX_TABLE_UA_STORE(0b, 2b, %[rc]) \ + EX_TABLE_UA_STORE(1b, 2b, %[rc]) \ + : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \ + : [_size] "d" (size), [_from] "Q" (*(from)), \ + [spec] "d" (__oac_spec.val) \ + : "cc", "0"); \ + __rc; \ +}) + +static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) +{ + int rc; + + switch (size) { + case 1: + rc = __put_user_asm((unsigned char __user *)ptr, + (unsigned char *)x, + size); + break; + case 2: + rc = __put_user_asm((unsigned short __user *)ptr, + (unsigned short *)x, + size); + break; + case 4: + rc = __put_user_asm((unsigned int __user *)ptr, + (unsigned int *)x, + size); + break; + case 8: + rc = __put_user_asm((unsigned long __user *)ptr, + (unsigned long *)x, + size); + break; + default: + __put_user_bad(); + break; + } + return rc; +} + +int __noreturn __get_user_bad(void); + +#define __get_user_asm(to, from, size) \ +({ \ + union oac __oac_spec = { \ + .oac2.as = PSW_BITS_AS_SECONDARY, \ + .oac2.a = 1, \ + }; \ + int __rc; \ + \ + asm volatile( \ + " lr 0,%[spec]\n" \ + "0: mvcos 0(%[_to]),%[_from],%[_size]\n" \ + "1: xr %[rc],%[rc]\n" \ + "2:\n" \ + EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize]) \ + EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize]) \ + : [rc] "=&d" (__rc), "=Q" (*(to)) \ + : [_size] "d" (size), [_from] "Q" (*(from)), \ + [spec] "d" (__oac_spec.val), [_to] "a" (to), \ + [_ksize] "K" (size) \ + : "cc", "0"); \ + __rc; \ +}) + +static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) +{ + int rc; + + switch (size) { + case 1: + rc = __get_user_asm((unsigned char *)x, + (unsigned char __user *)ptr, + size); + break; + case 2: + rc = __get_user_asm((unsigned short *)x, + (unsigned short __user *)ptr, + size); + break; + case 4: + rc = __get_user_asm((unsigned int *)x, + (unsigned int __user *)ptr, + size); + break; + case 8: + rc = __get_user_asm((unsigned long *)x, + (unsigned long __user *)ptr, + size); + break; + default: + __get_user_bad(); + break; + } + return rc; +} + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + */ +#define __put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __x = (x); \ + int __pu_err = -EFAULT; \ + \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + case 2: \ + case 4: \ + case 8: \ + __pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr))); \ + break; \ + default: \ + __put_user_bad(); \ + break; \ + } \ + __builtin_expect(__pu_err, 0); \ +}) + +#define put_user(x, ptr) \ +({ \ + might_fault(); \ + __put_user(x, ptr); \ +}) + +#define __get_user(x, ptr) \ +({ \ + int __gu_err = -EFAULT; \ + \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: { \ + unsigned char __x; \ + \ + __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ + (x) = *(__force __typeof__(*(ptr)) *)&__x; \ + break; \ + }; \ + case 2: { \ + unsigned short __x; \ + \ + __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ + (x) = *(__force __typeof__(*(ptr)) *)&__x; \ + break; \ + }; \ + case 4: { \ + unsigned int __x; \ + \ + __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ + (x) = *(__force __typeof__(*(ptr)) *)&__x; \ + break; \ + }; \ + case 8: { \ + unsigned long __x; \ + \ + __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ + (x) = *(__force __typeof__(*(ptr)) *)&__x; \ + break; \ + }; \ + default: \ + __get_user_bad(); \ + break; \ + } \ + __builtin_expect(__gu_err, 0); \ +}) + +#define get_user(x, ptr) \ +({ \ + might_fault(); \ + __get_user(x, ptr); \ +}) + +/* + * Copy a null terminated string from userspace. + */ +long __must_check strncpy_from_user(char *dst, const char __user *src, long count); + +long __must_check strnlen_user(const char __user *src, long count); + +/* + * Zero Userspace + */ +unsigned long __must_check __clear_user(void __user *to, unsigned long size); + +static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) +{ + might_fault(); + return __clear_user(to, n); +} + +void *s390_kernel_write(void *dst, const void *src, size_t size); + +int __noreturn __put_kernel_bad(void); + +#define __put_kernel_asm(val, to, insn) \ +({ \ + int __rc; \ + \ + asm volatile( \ + "0: " insn " %[_val],%[_to]\n" \ + "1: xr %[rc],%[rc]\n" \ + "2:\n" \ + EX_TABLE_UA_STORE(0b, 2b, %[rc]) \ + EX_TABLE_UA_STORE(1b, 2b, %[rc]) \ + : [rc] "=d" (__rc), [_to] "+Q" (*(to)) \ + : [_val] "d" (val) \ + : "cc"); \ + __rc; \ +}) + +#define __put_kernel_nofault(dst, src, type, err_label) \ +do { \ + unsigned long __x = (unsigned long)(*((type *)(src))); \ + int __pk_err; \ + \ + switch (sizeof(type)) { \ + case 1: \ + __pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \ + break; \ + case 2: \ + __pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \ + break; \ + case 4: \ + __pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \ + break; \ + case 8: \ + __pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \ + break; \ + default: \ + __pk_err = __put_kernel_bad(); \ + break; \ + } \ + if (unlikely(__pk_err)) \ + goto err_label; \ +} while (0) + +int __noreturn __get_kernel_bad(void); + +#define __get_kernel_asm(val, from, insn) \ +({ \ + int __rc; \ + \ + asm volatile( \ + "0: " insn " %[_val],%[_from]\n" \ + "1: xr %[rc],%[rc]\n" \ + "2:\n" \ + EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val]) \ + EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val]) \ + : [rc] "=d" (__rc), [_val] "=d" (val) \ + : [_from] "Q" (*(from)) \ + : "cc"); \ + __rc; \ +}) + +#define __get_kernel_nofault(dst, src, type, err_label) \ +do { \ + int __gk_err; \ + \ + switch (sizeof(type)) { \ + case 1: { \ + unsigned char __x; \ + \ + __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \ + *((type *)(dst)) = (type)__x; \ + break; \ + }; \ + case 2: { \ + unsigned short __x; \ + \ + __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \ + *((type *)(dst)) = (type)__x; \ + break; \ + }; \ + case 4: { \ + unsigned int __x; \ + \ + __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \ + *((type *)(dst)) = (type)__x; \ + break; \ + }; \ + case 8: { \ + unsigned long __x; \ + \ + __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \ + *((type *)(dst)) = (type)__x; \ + break; \ + }; \ + default: \ + __gk_err = __get_kernel_bad(); \ + break; \ + } \ + if (unlikely(__gk_err)) \ + goto err_label; \ +} while (0) + +void __cmpxchg_user_key_called_with_bad_pointer(void); + +#define CMPXCHG_USER_KEY_MAX_LOOPS 128 + +static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, + __uint128_t old, __uint128_t new, + unsigned long key, int size) +{ + int rc = 0; + + switch (size) { + case 1: { + unsigned int prev, shift, mask, _old, _new; + unsigned long count; + + shift = (3 ^ (address & 3)) << 3; + address ^= address & 3; + _old = ((unsigned int)old & 0xff) << shift; + _new = ((unsigned int)new & 0xff) << shift; + mask = ~(0xff << shift); + asm volatile( + " spka 0(%[key])\n" + " sacf 256\n" + " llill %[count],%[max_loops]\n" + "0: l %[prev],%[address]\n" + "1: nr %[prev],%[mask]\n" + " xilf %[mask],0xffffffff\n" + " or %[new],%[prev]\n" + " or %[prev],%[tmp]\n" + "2: lr %[tmp],%[prev]\n" + "3: cs %[prev],%[new],%[address]\n" + "4: jnl 5f\n" + " xr %[tmp],%[prev]\n" + " xr %[new],%[tmp]\n" + " nr %[tmp],%[mask]\n" + " jnz 5f\n" + " brct %[count],2b\n" + "5: sacf 768\n" + " spka %[default_key]\n" + EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev]) + : [rc] "+&d" (rc), + [prev] "=&d" (prev), + [address] "+Q" (*(int *)address), + [tmp] "+&d" (_old), + [new] "+&d" (_new), + [mask] "+&d" (mask), + [count] "=a" (count) + : [key] "%[count]" (key << 4), + [default_key] "J" (PAGE_DEFAULT_KEY), + [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS) + : "memory", "cc"); + *(unsigned char *)uval = prev >> shift; + if (!count) + rc = -EAGAIN; + return rc; + } + case 2: { + unsigned int prev, shift, mask, _old, _new; + unsigned long count; + + shift = (2 ^ (address & 2)) << 3; + address ^= address & 2; + _old = ((unsigned int)old & 0xffff) << shift; + _new = ((unsigned int)new & 0xffff) << shift; + mask = ~(0xffff << shift); + asm volatile( + " spka 0(%[key])\n" + " sacf 256\n" + " llill %[count],%[max_loops]\n" + "0: l %[prev],%[address]\n" + "1: nr %[prev],%[mask]\n" + " xilf %[mask],0xffffffff\n" + " or %[new],%[prev]\n" + " or %[prev],%[tmp]\n" + "2: lr %[tmp],%[prev]\n" + "3: cs %[prev],%[new],%[address]\n" + "4: jnl 5f\n" + " xr %[tmp],%[prev]\n" + " xr %[new],%[tmp]\n" + " nr %[tmp],%[mask]\n" + " jnz 5f\n" + " brct %[count],2b\n" + "5: sacf 768\n" + " spka %[default_key]\n" + EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev]) + : [rc] "+&d" (rc), + [prev] "=&d" (prev), + [address] "+Q" (*(int *)address), + [tmp] "+&d" (_old), + [new] "+&d" (_new), + [mask] "+&d" (mask), + [count] "=a" (count) + : [key] "%[count]" (key << 4), + [default_key] "J" (PAGE_DEFAULT_KEY), + [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS) + : "memory", "cc"); + *(unsigned short *)uval = prev >> shift; + if (!count) + rc = -EAGAIN; + return rc; + } + case 4: { + unsigned int prev = old; + + asm volatile( + " spka 0(%[key])\n" + " sacf 256\n" + "0: cs %[prev],%[new],%[address]\n" + "1: sacf 768\n" + " spka %[default_key]\n" + EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev]) + : [rc] "+&d" (rc), + [prev] "+&d" (prev), + [address] "+Q" (*(int *)address) + : [new] "d" ((unsigned int)new), + [key] "a" (key << 4), + [default_key] "J" (PAGE_DEFAULT_KEY) + : "memory", "cc"); + *(unsigned int *)uval = prev; + return rc; + } + case 8: { + unsigned long prev = old; + + asm volatile( + " spka 0(%[key])\n" + " sacf 256\n" + "0: csg %[prev],%[new],%[address]\n" + "1: sacf 768\n" + " spka %[default_key]\n" + EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev]) + : [rc] "+&d" (rc), + [prev] "+&d" (prev), + [address] "+QS" (*(long *)address) + : [new] "d" ((unsigned long)new), + [key] "a" (key << 4), + [default_key] "J" (PAGE_DEFAULT_KEY) + : "memory", "cc"); + *(unsigned long *)uval = prev; + return rc; + } + case 16: { + __uint128_t prev = old; + + asm volatile( + " spka 0(%[key])\n" + " sacf 256\n" + "0: cdsg %[prev],%[new],%[address]\n" + "1: sacf 768\n" + " spka %[default_key]\n" + EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev]) + : [rc] "+&d" (rc), + [prev] "+&d" (prev), + [address] "+QS" (*(__int128_t *)address) + : [new] "d" (new), + [key] "a" (key << 4), + [default_key] "J" (PAGE_DEFAULT_KEY) + : "memory", "cc"); + *(__uint128_t *)uval = prev; + return rc; + } + } + __cmpxchg_user_key_called_with_bad_pointer(); + return rc; +} + +/** + * cmpxchg_user_key() - cmpxchg with user space target, honoring storage keys + * @ptr: User space address of value to compare to @old and exchange with + * @new. Must be aligned to sizeof(*@ptr). + * @uval: Address where the old value of *@ptr is written to. + * @old: Old value. Compared to the content pointed to by @ptr in order to + * determine if the exchange occurs. The old value read from *@ptr is + * written to *@uval. + * @new: New value to place at *@ptr. + * @key: Access key to use for checking storage key protection. + * + * Perform a cmpxchg on a user space target, honoring storage key protection. + * @key alone determines how key checking is performed, neither + * storage-protection-override nor fetch-protection-override apply. + * The caller must compare *@uval and @old to determine if values have been + * exchanged. In case of an exception *@uval is set to zero. + * + * Return: 0: cmpxchg executed + * -EFAULT: an exception happened when trying to access *@ptr + * -EAGAIN: maxed out number of retries (byte and short only) + */ +#define cmpxchg_user_key(ptr, uval, old, new, key) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(uval) __uval = (uval); \ + \ + BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval))); \ + might_fault(); \ + __chk_user_ptr(__ptr); \ + __cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval), \ + (old), (new), (key), sizeof(*(__ptr))); \ +}) + +#endif /* __S390_UACCESS_H */ diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h new file mode 100644 index 000000000..4260bc5ce --- /dev/null +++ b/arch/s390/include/asm/unistd.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * + * Derived from "include/asm-i386/unistd.h" + */ +#ifndef _ASM_S390_UNISTD_H_ +#define _ASM_S390_UNISTD_H_ + +#include +#include + +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_SYS_ALARM +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_SIGNAL +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_IPC +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_NICE +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLD_MMAP +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_SIGPROCMASK +# ifdef CONFIG_COMPAT +# define __ARCH_WANT_COMPAT_STAT +# define __ARCH_WANT_SYS_TIME32 +# define __ARCH_WANT_SYS_UTIME32 +# endif +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 + +#endif /* _ASM_S390_UNISTD_H_ */ diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h new file mode 100644 index 000000000..b8ecf04e3 --- /dev/null +++ b/arch/s390/include/asm/unwind.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_UNWIND_H +#define _ASM_S390_UNWIND_H + +#include +#include +#include +#include +#include +#include + +/* + * To use the stack unwinder it has to be initialized with unwind_start. + * There four combinations for task and regs: + * 1) task==NULL, regs==NULL: the unwind starts for the task that is currently + * running, sp/ip picked up from the CPU registers + * 2) task==NULL, regs!=NULL: the unwind starts from the sp/ip found in + * the struct pt_regs of an interrupt frame for the current task + * 3) task!=NULL, regs==NULL: the unwind starts for an inactive task with + * the sp picked up from task->thread.ksp and the ip picked up from the + * return address stored by __switch_to + * 4) task!=NULL, regs!=NULL: the sp/ip are picked up from the interrupt + * frame 'regs' of a inactive task + * If 'first_frame' is not zero unwind_start skips unwind frames until it + * reaches the specified stack pointer. + * The end of the unwinding is indicated with unwind_done, this can be true + * right after unwind_start, e.g. with first_frame!=0 that can not be found. + * unwind_next_frame skips to the next frame. + * Once the unwind is completed unwind_error() can be used to check if there + * has been a situation where the unwinder could not correctly understand + * the tasks call chain. + */ + +struct unwind_state { + struct stack_info stack_info; + unsigned long stack_mask; + struct task_struct *task; + struct pt_regs *regs; + unsigned long sp, ip; + int graph_idx; + struct llist_node *kr_cur; + bool reliable; + bool error; +}; + +/* Recover the return address modified by rethook and ftrace_graph. */ +static inline unsigned long unwind_recover_ret_addr(struct unwind_state *state, + unsigned long ip) +{ + ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *)state->sp); +#ifdef CONFIG_RETHOOK + if (is_rethook_trampoline(ip)) + ip = rethook_find_ret_addr(state->task, state->sp, &state->kr_cur); +#endif + return ip; +} + +void __unwind_start(struct unwind_state *state, struct task_struct *task, + struct pt_regs *regs, unsigned long first_frame); +bool unwind_next_frame(struct unwind_state *state); +unsigned long unwind_get_return_address(struct unwind_state *state); + +static inline bool unwind_done(struct unwind_state *state) +{ + return state->stack_info.type == STACK_TYPE_UNKNOWN; +} + +static inline bool unwind_error(struct unwind_state *state) +{ + return state->error; +} + +static __always_inline void unwind_start(struct unwind_state *state, + struct task_struct *task, + struct pt_regs *regs, + unsigned long first_frame) +{ + task = task ?: current; + first_frame = first_frame ?: get_stack_pointer(task, regs); + __unwind_start(state, task, regs, first_frame); +} + +static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) +{ + return unwind_done(state) ? NULL : state->regs; +} + +#define unwind_for_each_frame(state, task, regs, first_frame) \ + for (unwind_start(state, task, regs, first_frame); \ + !unwind_done(state); \ + unwind_next_frame(state)) + +static inline void unwind_init(void) {} +static inline void unwind_module_init(struct module *mod, void *orc_ip, + size_t orc_ip_size, void *orc, + size_t orc_size) {} + +#endif /* _ASM_S390_UNWIND_H */ diff --git a/arch/s390/include/asm/uprobes.h b/arch/s390/include/asm/uprobes.h new file mode 100644 index 000000000..b60b3c7ef --- /dev/null +++ b/arch/s390/include/asm/uprobes.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * User-space Probes (UProbes) for s390 + * + * Copyright IBM Corp. 2014 + * Author(s): Jan Willeke, + */ + +#ifndef _ASM_UPROBES_H +#define _ASM_UPROBES_H + +#include + +typedef u16 uprobe_opcode_t; + +#define UPROBE_XOL_SLOT_BYTES 256 /* cache aligned */ + +#define UPROBE_SWBP_INSN 0x0002 +#define UPROBE_SWBP_INSN_SIZE 2 + +struct arch_uprobe { + union{ + uprobe_opcode_t insn[3]; + uprobe_opcode_t ixol[3]; + }; + unsigned int saved_per : 1; + unsigned int saved_int_code; +}; + +struct arch_uprobe_task { +}; + +#endif /* _ASM_UPROBES_H */ diff --git a/arch/s390/include/asm/user.h b/arch/s390/include/asm/user.h new file mode 100644 index 000000000..8e8aaf485 --- /dev/null +++ b/arch/s390/include/asm/user.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * + * Derived from "include/asm-i386/usr.h" + */ + +#ifndef _S390_USER_H +#define _S390_USER_H + +#include +#include +/* Core file format: The core file is written in such a way that gdb + can understand it and provide useful information to the user (under + linux we use the 'trad-core' bfd). There are quite a number of + obstacles to being able to view the contents of the floating point + registers, and until these are solved you will not be able to view the + contents of them. Actually, you can read in the core file and look at + the contents of the user struct to find out what the floating point + registers contain. + The actual file contents are as follows: + UPAGE: 1 page consisting of a user struct that tells gdb what is present + in the file. Directly after this is a copy of the task_struct, which + is currently not used by gdb, but it may come in useful at some point. + All of the registers are stored as part of the upage. The upage should + always be only one page. + DATA: The data area is stored. We use current->end_text to + current->brk to pick up all of the user variables, plus any memory + that may have been malloced. No attempt is made to determine if a page + is demand-zero or if a page is totally unused, we just cover the entire + range. All of the addresses are rounded in such a way that an integral + number of pages is written. + STACK: We need the stack information in order to get a meaningful + backtrace. We need to write the data from (esp) to + current->start_stack, so we round each of these off in order to be able + to write an integer number of pages. + The minimum core file size is 3 pages, or 12288 bytes. +*/ + + +/* + * This is the old layout of "struct pt_regs", and + * is still the layout used by user mode (the new + * pt_regs doesn't have all registers as the kernel + * doesn't use the extra segment registers) + */ + +/* When the kernel dumps core, it starts by dumping the user struct - + this will be used by gdb to figure out where the data and stack segments + are within the file, and what virtual addresses to use. */ +struct user { +/* We start with the registers, to mimic the way that "memory" is returned + from the ptrace(3,...) function. */ + struct user_regs_struct regs; /* Where the registers are actually stored */ +/* The rest of this junk is to help gdb figure out what goes where */ + unsigned long int u_tsize; /* Text segment size (pages). */ + unsigned long int u_dsize; /* Data segment size (pages). */ + unsigned long int u_ssize; /* Stack segment size (pages). */ + unsigned long start_code; /* Starting virtual address of text. */ + unsigned long start_stack; /* Starting virtual address of stack area. + This is actually the bottom of the stack, + the top of the stack is always found in the + esp register. */ + long int signal; /* Signal that caused the core dump. */ + unsigned long u_ar0; /* Used by gdb to help find the values for */ + /* the registers. */ + unsigned long magic; /* To uniquely identify a core file */ + char u_comm[32]; /* User command that was responsible */ +}; + +#endif /* _S390_USER_H */ diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h new file mode 100644 index 000000000..0e7bd3873 --- /dev/null +++ b/arch/s390/include/asm/uv.h @@ -0,0 +1,517 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Ultravisor Interfaces + * + * Copyright IBM Corp. 2019, 2022 + * + * Author(s): + * Vasily Gorbik + * Janosch Frank + */ +#ifndef _ASM_S390_UV_H +#define _ASM_S390_UV_H + +#include +#include +#include +#include +#include +#include + +#define UVC_CC_OK 0 +#define UVC_CC_ERROR 1 +#define UVC_CC_BUSY 2 +#define UVC_CC_PARTIAL 3 + +#define UVC_RC_EXECUTED 0x0001 +#define UVC_RC_INV_CMD 0x0002 +#define UVC_RC_INV_STATE 0x0003 +#define UVC_RC_INV_LEN 0x0005 +#define UVC_RC_NO_RESUME 0x0007 +#define UVC_RC_NEED_DESTROY 0x8000 + +#define UVC_CMD_QUI 0x0001 +#define UVC_CMD_INIT_UV 0x000f +#define UVC_CMD_CREATE_SEC_CONF 0x0100 +#define UVC_CMD_DESTROY_SEC_CONF 0x0101 +#define UVC_CMD_DESTROY_SEC_CONF_FAST 0x0102 +#define UVC_CMD_CREATE_SEC_CPU 0x0120 +#define UVC_CMD_DESTROY_SEC_CPU 0x0121 +#define UVC_CMD_CONV_TO_SEC_STOR 0x0200 +#define UVC_CMD_CONV_FROM_SEC_STOR 0x0201 +#define UVC_CMD_DESTR_SEC_STOR 0x0202 +#define UVC_CMD_SET_SEC_CONF_PARAMS 0x0300 +#define UVC_CMD_UNPACK_IMG 0x0301 +#define UVC_CMD_VERIFY_IMG 0x0302 +#define UVC_CMD_CPU_RESET 0x0310 +#define UVC_CMD_CPU_RESET_INITIAL 0x0311 +#define UVC_CMD_PREPARE_RESET 0x0320 +#define UVC_CMD_CPU_RESET_CLEAR 0x0321 +#define UVC_CMD_CPU_SET_STATE 0x0330 +#define UVC_CMD_SET_UNSHARE_ALL 0x0340 +#define UVC_CMD_PIN_PAGE_SHARED 0x0341 +#define UVC_CMD_UNPIN_PAGE_SHARED 0x0342 +#define UVC_CMD_DUMP_INIT 0x0400 +#define UVC_CMD_DUMP_CONF_STOR_STATE 0x0401 +#define UVC_CMD_DUMP_CPU 0x0402 +#define UVC_CMD_DUMP_COMPLETE 0x0403 +#define UVC_CMD_SET_SHARED_ACCESS 0x1000 +#define UVC_CMD_REMOVE_SHARED_ACCESS 0x1001 +#define UVC_CMD_RETR_ATTEST 0x1020 +#define UVC_CMD_ADD_SECRET 0x1031 +#define UVC_CMD_LIST_SECRETS 0x1033 +#define UVC_CMD_LOCK_SECRETS 0x1034 + +/* Bits in installed uv calls */ +enum uv_cmds_inst { + BIT_UVC_CMD_QUI = 0, + BIT_UVC_CMD_INIT_UV = 1, + BIT_UVC_CMD_CREATE_SEC_CONF = 2, + BIT_UVC_CMD_DESTROY_SEC_CONF = 3, + BIT_UVC_CMD_CREATE_SEC_CPU = 4, + BIT_UVC_CMD_DESTROY_SEC_CPU = 5, + BIT_UVC_CMD_CONV_TO_SEC_STOR = 6, + BIT_UVC_CMD_CONV_FROM_SEC_STOR = 7, + BIT_UVC_CMD_SET_SHARED_ACCESS = 8, + BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9, + BIT_UVC_CMD_SET_SEC_PARMS = 11, + BIT_UVC_CMD_UNPACK_IMG = 13, + BIT_UVC_CMD_VERIFY_IMG = 14, + BIT_UVC_CMD_CPU_RESET = 15, + BIT_UVC_CMD_CPU_RESET_INITIAL = 16, + BIT_UVC_CMD_CPU_SET_STATE = 17, + BIT_UVC_CMD_PREPARE_RESET = 18, + BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET = 19, + BIT_UVC_CMD_UNSHARE_ALL = 20, + BIT_UVC_CMD_PIN_PAGE_SHARED = 21, + BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22, + BIT_UVC_CMD_DESTROY_SEC_CONF_FAST = 23, + BIT_UVC_CMD_DUMP_INIT = 24, + BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE = 25, + BIT_UVC_CMD_DUMP_CPU = 26, + BIT_UVC_CMD_DUMP_COMPLETE = 27, + BIT_UVC_CMD_RETR_ATTEST = 28, + BIT_UVC_CMD_ADD_SECRET = 29, + BIT_UVC_CMD_LIST_SECRETS = 30, + BIT_UVC_CMD_LOCK_SECRETS = 31, +}; + +enum uv_feat_ind { + BIT_UV_FEAT_MISC = 0, + BIT_UV_FEAT_AIV = 1, + BIT_UV_FEAT_AP = 4, + BIT_UV_FEAT_AP_INTR = 5, +}; + +struct uv_cb_header { + u16 len; + u16 cmd; /* Command Code */ + u16 rc; /* Response Code */ + u16 rrc; /* Return Reason Code */ +} __packed __aligned(8); + +/* Query Ultravisor Information */ +struct uv_cb_qui { + struct uv_cb_header header; /* 0x0000 */ + u64 reserved08; /* 0x0008 */ + u64 inst_calls_list[4]; /* 0x0010 */ + u64 reserved30[2]; /* 0x0030 */ + u64 uv_base_stor_len; /* 0x0040 */ + u64 reserved48; /* 0x0048 */ + u64 conf_base_phys_stor_len; /* 0x0050 */ + u64 conf_base_virt_stor_len; /* 0x0058 */ + u64 conf_virt_var_stor_len; /* 0x0060 */ + u64 cpu_stor_len; /* 0x0068 */ + u32 reserved70[3]; /* 0x0070 */ + u32 max_num_sec_conf; /* 0x007c */ + u64 max_guest_stor_addr; /* 0x0080 */ + u8 reserved88[0x9e - 0x88]; /* 0x0088 */ + u16 max_guest_cpu_id; /* 0x009e */ + u64 uv_feature_indications; /* 0x00a0 */ + u64 reserveda8; /* 0x00a8 */ + u64 supp_se_hdr_versions; /* 0x00b0 */ + u64 supp_se_hdr_pcf; /* 0x00b8 */ + u64 reservedc0; /* 0x00c0 */ + u64 conf_dump_storage_state_len; /* 0x00c8 */ + u64 conf_dump_finalize_len; /* 0x00d0 */ + u64 reservedd8; /* 0x00d8 */ + u64 supp_att_req_hdr_ver; /* 0x00e0 */ + u64 supp_att_pflags; /* 0x00e8 */ + u64 reservedf0; /* 0x00f0 */ + u64 supp_add_secret_req_ver; /* 0x00f8 */ + u64 supp_add_secret_pcf; /* 0x0100 */ + u64 supp_secret_types; /* 0x0180 */ + u16 max_secrets; /* 0x0110 */ + u8 reserved112[0x120 - 0x112]; /* 0x0112 */ +} __packed __aligned(8); + +/* Initialize Ultravisor */ +struct uv_cb_init { + struct uv_cb_header header; + u64 reserved08[2]; + u64 stor_origin; + u64 stor_len; + u64 reserved28[4]; +} __packed __aligned(8); + +/* Create Guest Configuration */ +struct uv_cb_cgc { + struct uv_cb_header header; + u64 reserved08[2]; + u64 guest_handle; + u64 conf_base_stor_origin; + u64 conf_virt_stor_origin; + u8 reserved30[6]; + union { + struct { + u16 : 14; + u16 ap_instr_intr : 1; + u16 ap_allow_instr : 1; + }; + u16 raw; + } flags; + u64 guest_stor_origin; + u64 guest_stor_len; + u64 guest_sca; + u64 guest_asce; + u64 reserved58[5]; +} __packed __aligned(8); + +/* Create Secure CPU */ +struct uv_cb_csc { + struct uv_cb_header header; + u64 reserved08[2]; + u64 cpu_handle; + u64 guest_handle; + u64 stor_origin; + u8 reserved30[6]; + u16 num; + u64 state_origin; + u64 reserved40[4]; +} __packed __aligned(8); + +/* Convert to Secure */ +struct uv_cb_cts { + struct uv_cb_header header; + u64 reserved08[2]; + u64 guest_handle; + u64 gaddr; +} __packed __aligned(8); + +/* Convert from Secure / Pin Page Shared */ +struct uv_cb_cfs { + struct uv_cb_header header; + u64 reserved08[2]; + u64 paddr; +} __packed __aligned(8); + +/* Set Secure Config Parameter */ +struct uv_cb_ssc { + struct uv_cb_header header; + u64 reserved08[2]; + u64 guest_handle; + u64 sec_header_origin; + u32 sec_header_len; + u32 reserved2c; + u64 reserved30[4]; +} __packed __aligned(8); + +/* Unpack */ +struct uv_cb_unp { + struct uv_cb_header header; + u64 reserved08[2]; + u64 guest_handle; + u64 gaddr; + u64 tweak[2]; + u64 reserved38[3]; +} __packed __aligned(8); + +#define PV_CPU_STATE_OPR 1 +#define PV_CPU_STATE_STP 2 +#define PV_CPU_STATE_CHKSTP 3 +#define PV_CPU_STATE_OPR_LOAD 5 + +struct uv_cb_cpu_set_state { + struct uv_cb_header header; + u64 reserved08[2]; + u64 cpu_handle; + u8 reserved20[7]; + u8 state; + u64 reserved28[5]; +}; + +/* + * A common UV call struct for calls that take no payload + * Examples: + * Destroy cpu/config + * Verify + */ +struct uv_cb_nodata { + struct uv_cb_header header; + u64 reserved08[2]; + u64 handle; + u64 reserved20[4]; +} __packed __aligned(8); + +/* Destroy Configuration Fast */ +struct uv_cb_destroy_fast { + struct uv_cb_header header; + u64 reserved08[2]; + u64 handle; + u64 reserved20[5]; +} __packed __aligned(8); + +/* Set Shared Access */ +struct uv_cb_share { + struct uv_cb_header header; + u64 reserved08[3]; + u64 paddr; + u64 reserved28; +} __packed __aligned(8); + +/* Retrieve Attestation Measurement */ +struct uv_cb_attest { + struct uv_cb_header header; /* 0x0000 */ + u64 reserved08[2]; /* 0x0008 */ + u64 arcb_addr; /* 0x0018 */ + u64 cont_token; /* 0x0020 */ + u8 reserved28[6]; /* 0x0028 */ + u16 user_data_len; /* 0x002e */ + u8 user_data[256]; /* 0x0030 */ + u32 reserved130[3]; /* 0x0130 */ + u32 meas_len; /* 0x013c */ + u64 meas_addr; /* 0x0140 */ + u8 config_uid[16]; /* 0x0148 */ + u32 reserved158; /* 0x0158 */ + u32 add_data_len; /* 0x015c */ + u64 add_data_addr; /* 0x0160 */ + u64 reserved168[4]; /* 0x0168 */ +} __packed __aligned(8); + +struct uv_cb_dump_cpu { + struct uv_cb_header header; + u64 reserved08[2]; + u64 cpu_handle; + u64 dump_area_origin; + u64 reserved28[5]; +} __packed __aligned(8); + +struct uv_cb_dump_stor_state { + struct uv_cb_header header; + u64 reserved08[2]; + u64 config_handle; + u64 dump_area_origin; + u64 gaddr; + u64 reserved28[4]; +} __packed __aligned(8); + +struct uv_cb_dump_complete { + struct uv_cb_header header; + u64 reserved08[2]; + u64 config_handle; + u64 dump_area_origin; + u64 reserved30[5]; +} __packed __aligned(8); + +/* + * A common UV call struct for pv guests that contains a single address + * Examples: + * Add Secret + * List Secrets + */ +struct uv_cb_guest_addr { + struct uv_cb_header header; + u64 reserved08[3]; + u64 addr; + u64 reserved28[4]; +} __packed __aligned(8); + +static inline int __uv_call(unsigned long r1, unsigned long r2) +{ + int cc; + + asm volatile( + " .insn rrf,0xB9A40000,%[r1],%[r2],0,0\n" + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=d" (cc) + : [r1] "a" (r1), [r2] "a" (r2) + : "memory", "cc"); + return cc; +} + +static inline int uv_call(unsigned long r1, unsigned long r2) +{ + int cc; + + do { + cc = __uv_call(r1, r2); + } while (cc > 1); + return cc; +} + +/* Low level uv_call that avoids stalls for long running busy conditions */ +static inline int uv_call_sched(unsigned long r1, unsigned long r2) +{ + int cc; + + do { + cc = __uv_call(r1, r2); + cond_resched(); + } while (cc > 1); + return cc; +} + +/* + * special variant of uv_call that only transports the cpu or guest + * handle and the command, like destroy or verify. + */ +static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc) +{ + struct uv_cb_nodata uvcb = { + .header.cmd = cmd, + .header.len = sizeof(uvcb), + .handle = handle, + }; + int cc; + + WARN(!handle, "No handle provided to Ultravisor call cmd %x\n", cmd); + cc = uv_call_sched(0, (u64)&uvcb); + *rc = uvcb.header.rc; + *rrc = uvcb.header.rrc; + return cc ? -EINVAL : 0; +} + +struct uv_info { + unsigned long inst_calls_list[4]; + unsigned long uv_base_stor_len; + unsigned long guest_base_stor_len; + unsigned long guest_virt_base_stor_len; + unsigned long guest_virt_var_stor_len; + unsigned long guest_cpu_stor_len; + unsigned long max_sec_stor_addr; + unsigned int max_num_sec_conf; + unsigned short max_guest_cpu_id; + unsigned long uv_feature_indications; + unsigned long supp_se_hdr_ver; + unsigned long supp_se_hdr_pcf; + unsigned long conf_dump_storage_state_len; + unsigned long conf_dump_finalize_len; + unsigned long supp_att_req_hdr_ver; + unsigned long supp_att_pflags; + unsigned long supp_add_secret_req_ver; + unsigned long supp_add_secret_pcf; + unsigned long supp_secret_types; + unsigned short max_secrets; +}; + +extern struct uv_info uv_info; + +static inline bool uv_has_feature(u8 feature_bit) +{ + if (feature_bit >= sizeof(uv_info.uv_feature_indications) * 8) + return false; + return test_bit_inv(feature_bit, &uv_info.uv_feature_indications); +} + +#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST +extern int prot_virt_guest; + +static inline int is_prot_virt_guest(void) +{ + return prot_virt_guest; +} + +static inline int share(unsigned long addr, u16 cmd) +{ + struct uv_cb_share uvcb = { + .header.cmd = cmd, + .header.len = sizeof(uvcb), + .paddr = addr + }; + + if (!is_prot_virt_guest()) + return -EOPNOTSUPP; + /* + * Sharing is page wise, if we encounter addresses that are + * not page aligned, we assume something went wrong. If + * malloced structs are passed to this function, we could leak + * data to the hypervisor. + */ + BUG_ON(addr & ~PAGE_MASK); + + if (!uv_call(0, (u64)&uvcb)) + return 0; + return -EINVAL; +} + +/* + * Guest 2 request to the Ultravisor to make a page shared with the + * hypervisor for IO. + * + * @addr: Real or absolute address of the page to be shared + */ +static inline int uv_set_shared(unsigned long addr) +{ + return share(addr, UVC_CMD_SET_SHARED_ACCESS); +} + +/* + * Guest 2 request to the Ultravisor to make a page unshared. + * + * @addr: Real or absolute address of the page to be unshared + */ +static inline int uv_remove_shared(unsigned long addr) +{ + return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS); +} + +#else +#define is_prot_virt_guest() 0 +static inline int uv_set_shared(unsigned long addr) { return 0; } +static inline int uv_remove_shared(unsigned long addr) { return 0; } +#endif + +#if IS_ENABLED(CONFIG_KVM) +extern int prot_virt_host; + +static inline int is_prot_virt_host(void) +{ + return prot_virt_host; +} + +int uv_pin_shared(unsigned long paddr); +int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb); +int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr); +int uv_destroy_owned_page(unsigned long paddr); +int uv_convert_from_secure(unsigned long paddr); +int uv_convert_owned_from_secure(unsigned long paddr); +int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr); + +void setup_uv(void); +#else +#define is_prot_virt_host() 0 +static inline void setup_uv(void) {} + +static inline int uv_pin_shared(unsigned long paddr) +{ + return 0; +} + +static inline int uv_destroy_owned_page(unsigned long paddr) +{ + return 0; +} + +static inline int uv_convert_from_secure(unsigned long paddr) +{ + return 0; +} + +static inline int uv_convert_owned_from_secure(unsigned long paddr) +{ + return 0; +} +#endif + +#endif /* _ASM_S390_UV_H */ diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h new file mode 100644 index 000000000..53165aa78 --- /dev/null +++ b/arch/s390/include/asm/vdso.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __S390_VDSO_H__ +#define __S390_VDSO_H__ + +#include + +#ifndef __ASSEMBLY__ + +#include +#ifdef CONFIG_COMPAT +#include +#endif + +#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name)) +#ifdef CONFIG_COMPAT +#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name)) +#else +#define VDSO32_SYMBOL(tsk, name) (-1UL) +#endif + +extern struct vdso_data *vdso_data; + +int vdso_getcpu_init(void); + +#endif /* __ASSEMBLY__ */ + +/* Default link address for the vDSO */ +#define VDSO_LBASE 0 + +#define __VVAR_PAGES 2 + +#define VDSO_VERSION_STRING LINUX_2.6.29 + +#endif /* __S390_VDSO_H__ */ diff --git a/arch/s390/include/asm/vdso/clocksource.h b/arch/s390/include/asm/vdso/clocksource.h new file mode 100644 index 000000000..a93eda0ce --- /dev/null +++ b/arch/s390/include/asm/vdso/clocksource.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSO_CLOCKSOURCE_H +#define __ASM_VDSO_CLOCKSOURCE_H + +#define VDSO_ARCH_CLOCKMODES \ + VDSO_CLOCKMODE_TOD + +#endif /* __ASM_VDSO_CLOCKSOURCE_H */ diff --git a/arch/s390/include/asm/vdso/data.h b/arch/s390/include/asm/vdso/data.h new file mode 100644 index 000000000..73ee89142 --- /dev/null +++ b/arch/s390/include/asm/vdso/data.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __S390_ASM_VDSO_DATA_H +#define __S390_ASM_VDSO_DATA_H + +#include +#include + +struct arch_vdso_data { + __s64 tod_steering_delta; + __u64 tod_steering_end; +}; + +#endif /* __S390_ASM_VDSO_DATA_H */ diff --git a/arch/s390/include/asm/vdso/gettimeofday.h b/arch/s390/include/asm/vdso/gettimeofday.h new file mode 100644 index 000000000..db84942eb --- /dev/null +++ b/arch/s390/include/asm/vdso/gettimeofday.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ASM_VDSO_GETTIMEOFDAY_H +#define ASM_VDSO_GETTIMEOFDAY_H + +#define VDSO_HAS_TIME 1 + +#define VDSO_HAS_CLOCK_GETRES 1 + +#include +#include +#include +#include + +#define vdso_calc_delta __arch_vdso_calc_delta +static __always_inline u64 __arch_vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) +{ + return (cycles - last) * mult; +} + +static __always_inline const struct vdso_data *__arch_get_vdso_data(void) +{ + return _vdso_data; +} + +static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_data *vd) +{ + u64 adj, now; + + now = get_tod_clock(); + adj = vd->arch_data.tod_steering_end - now; + if (unlikely((s64) adj > 0)) + now += (vd->arch_data.tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15); + return now; +} + +static __always_inline +long clock_gettime_fallback(clockid_t clkid, struct __kernel_timespec *ts) +{ + return syscall2(__NR_clock_gettime, (long)clkid, (long)ts); +} + +static __always_inline +long gettimeofday_fallback(register struct __kernel_old_timeval *tv, + register struct timezone *tz) +{ + return syscall2(__NR_gettimeofday, (long)tv, (long)tz); +} + +static __always_inline +long clock_getres_fallback(clockid_t clkid, struct __kernel_timespec *ts) +{ + return syscall2(__NR_clock_getres, (long)clkid, (long)ts); +} + +#ifdef CONFIG_TIME_NS +static __always_inline +const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) +{ + return _timens_data; +} +#endif + +#endif diff --git a/arch/s390/include/asm/vdso/processor.h b/arch/s390/include/asm/vdso/processor.h new file mode 100644 index 000000000..cfcc3e117 --- /dev/null +++ b/arch/s390/include/asm/vdso/processor.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_VDSO_PROCESSOR_H +#define __ASM_VDSO_PROCESSOR_H + +#define cpu_relax() barrier() + +#endif /* __ASM_VDSO_PROCESSOR_H */ diff --git a/arch/s390/include/asm/vdso/vsyscall.h b/arch/s390/include/asm/vdso/vsyscall.h new file mode 100644 index 000000000..6c67c08ce --- /dev/null +++ b/arch/s390/include/asm/vdso/vsyscall.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSO_VSYSCALL_H +#define __ASM_VDSO_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include +/* + * Update the vDSO data page to keep in sync with kernel timekeeping. + */ + +static __always_inline struct vdso_data *__s390_get_k_vdso_data(void) +{ + return vdso_data; +} +#define __arch_get_k_vdso_data __s390_get_k_vdso_data + +/* The asm-generic header needs to be included after the definitions above */ +#include + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_VSYSCALL_H */ diff --git a/arch/s390/include/asm/vmalloc.h b/arch/s390/include/asm/vmalloc.h new file mode 100644 index 000000000..3ba3a6bdc --- /dev/null +++ b/arch/s390/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_S390_VMALLOC_H +#define _ASM_S390_VMALLOC_H + +#endif /* _ASM_S390_VMALLOC_H */ diff --git a/arch/s390/include/asm/vmlinux.lds.h b/arch/s390/include/asm/vmlinux.lds.h new file mode 100644 index 000000000..cbe670a68 --- /dev/null +++ b/arch/s390/include/asm/vmlinux.lds.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include + +/* + * .boot.data section is shared between the decompressor code and the + * decompressed kernel. The decompressor will store values in it, and copy + * over to the decompressed image before starting it. + * + * .boot.data variables are kept in separate .boot.data. sections, + * which are sorted by alignment first, then by name before being merged + * into single .boot.data section. This way big holes cased by page aligned + * structs are avoided and linker produces consistent result. + */ +#define BOOT_DATA \ + . = ALIGN(PAGE_SIZE); \ + .boot.data : { \ + __boot_data_start = .; \ + *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.boot.data*))) \ + __boot_data_end = .; \ + } + +/* + * .boot.preserved.data is similar to .boot.data, but it is not part of the + * .init section and thus will be preserved for later use in the decompressed + * kernel. + */ +#define BOOT_DATA_PRESERVED \ + . = ALIGN(PAGE_SIZE); \ + .boot.preserved.data : { \ + __boot_data_preserved_start = .; \ + *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.boot.preserved.data*))) \ + __boot_data_preserved_end = .; \ + } diff --git a/arch/s390/include/asm/vtime.h b/arch/s390/include/asm/vtime.h new file mode 100644 index 000000000..fe17e448c --- /dev/null +++ b/arch/s390/include/asm/vtime.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _S390_VTIME_H +#define _S390_VTIME_H + +#define __ARCH_HAS_VTIME_TASK_SWITCH + +static inline void update_timer_sys(void) +{ + S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer; + S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.sys_enter_timer; + S390_lowcore.last_update_timer = S390_lowcore.sys_enter_timer; +} + +static inline void update_timer_mcck(void) +{ + S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer; + S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.mcck_enter_timer; + S390_lowcore.last_update_timer = S390_lowcore.mcck_enter_timer; +} + +#endif /* _S390_VTIME_H */ diff --git a/arch/s390/include/asm/vtimer.h b/arch/s390/include/asm/vtimer.h new file mode 100644 index 000000000..e601adaa6 --- /dev/null +++ b/arch/s390/include/asm/vtimer.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2003, 2012 + * Virtual CPU timer + * + * Author(s): Jan Glauber + */ + +#ifndef _ASM_S390_TIMER_H +#define _ASM_S390_TIMER_H + +#define VTIMER_MAX_SLICE (0x7fffffffffffffffULL) + +struct vtimer_list { + struct list_head entry; + u64 expires; + u64 interval; + void (*function)(unsigned long); + unsigned long data; +}; + +extern void init_virt_timer(struct vtimer_list *timer); +extern void add_virt_timer(struct vtimer_list *timer); +extern void add_virt_timer_periodic(struct vtimer_list *timer); +extern int mod_virt_timer(struct vtimer_list *timer, u64 expires); +extern int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires); +extern int del_virt_timer(struct vtimer_list *timer); +extern void vtime_init(void); + +#endif /* _ASM_S390_TIMER_H */ diff --git a/arch/s390/include/asm/vx-insn-asm.h b/arch/s390/include/asm/vx-insn-asm.h new file mode 100644 index 000000000..360f8b36d --- /dev/null +++ b/arch/s390/include/asm/vx-insn-asm.h @@ -0,0 +1,681 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Support for Vector Instructions + * + * Assembler macros to generate .byte/.word code for particular + * vector instructions that are supported by recent binutils (>= 2.26) only. + * + * Copyright IBM Corp. 2015 + * Author(s): Hendrik Brueckner + */ + +#ifndef __ASM_S390_VX_INSN_INTERNAL_H +#define __ASM_S390_VX_INSN_INTERNAL_H + +#ifndef __ASM_S390_VX_INSN_H +#error only can be included directly +#endif + +#ifdef __ASSEMBLY__ + +/* Macros to generate vector instruction byte code */ + +/* GR_NUM - Retrieve general-purpose register number + * + * @opd: Operand to store register number + * @r64: String designation register in the format "%rN" + */ +.macro GR_NUM opd gr + \opd = 255 + .ifc \gr,%r0 + \opd = 0 + .endif + .ifc \gr,%r1 + \opd = 1 + .endif + .ifc \gr,%r2 + \opd = 2 + .endif + .ifc \gr,%r3 + \opd = 3 + .endif + .ifc \gr,%r4 + \opd = 4 + .endif + .ifc \gr,%r5 + \opd = 5 + .endif + .ifc \gr,%r6 + \opd = 6 + .endif + .ifc \gr,%r7 + \opd = 7 + .endif + .ifc \gr,%r8 + \opd = 8 + .endif + .ifc \gr,%r9 + \opd = 9 + .endif + .ifc \gr,%r10 + \opd = 10 + .endif + .ifc \gr,%r11 + \opd = 11 + .endif + .ifc \gr,%r12 + \opd = 12 + .endif + .ifc \gr,%r13 + \opd = 13 + .endif + .ifc \gr,%r14 + \opd = 14 + .endif + .ifc \gr,%r15 + \opd = 15 + .endif + .if \opd == 255 + \opd = \gr + .endif +.endm + +/* VX_NUM - Retrieve vector register number + * + * @opd: Operand to store register number + * @vxr: String designation register in the format "%vN" + * + * The vector register number is used for as input number to the + * instruction and, as well as, to compute the RXB field of the + * instruction. + */ +.macro VX_NUM opd vxr + \opd = 255 + .ifc \vxr,%v0 + \opd = 0 + .endif + .ifc \vxr,%v1 + \opd = 1 + .endif + .ifc \vxr,%v2 + \opd = 2 + .endif + .ifc \vxr,%v3 + \opd = 3 + .endif + .ifc \vxr,%v4 + \opd = 4 + .endif + .ifc \vxr,%v5 + \opd = 5 + .endif + .ifc \vxr,%v6 + \opd = 6 + .endif + .ifc \vxr,%v7 + \opd = 7 + .endif + .ifc \vxr,%v8 + \opd = 8 + .endif + .ifc \vxr,%v9 + \opd = 9 + .endif + .ifc \vxr,%v10 + \opd = 10 + .endif + .ifc \vxr,%v11 + \opd = 11 + .endif + .ifc \vxr,%v12 + \opd = 12 + .endif + .ifc \vxr,%v13 + \opd = 13 + .endif + .ifc \vxr,%v14 + \opd = 14 + .endif + .ifc \vxr,%v15 + \opd = 15 + .endif + .ifc \vxr,%v16 + \opd = 16 + .endif + .ifc \vxr,%v17 + \opd = 17 + .endif + .ifc \vxr,%v18 + \opd = 18 + .endif + .ifc \vxr,%v19 + \opd = 19 + .endif + .ifc \vxr,%v20 + \opd = 20 + .endif + .ifc \vxr,%v21 + \opd = 21 + .endif + .ifc \vxr,%v22 + \opd = 22 + .endif + .ifc \vxr,%v23 + \opd = 23 + .endif + .ifc \vxr,%v24 + \opd = 24 + .endif + .ifc \vxr,%v25 + \opd = 25 + .endif + .ifc \vxr,%v26 + \opd = 26 + .endif + .ifc \vxr,%v27 + \opd = 27 + .endif + .ifc \vxr,%v28 + \opd = 28 + .endif + .ifc \vxr,%v29 + \opd = 29 + .endif + .ifc \vxr,%v30 + \opd = 30 + .endif + .ifc \vxr,%v31 + \opd = 31 + .endif + .if \opd == 255 + \opd = \vxr + .endif +.endm + +/* RXB - Compute most significant bit used vector registers + * + * @rxb: Operand to store computed RXB value + * @v1: First vector register designated operand + * @v2: Second vector register designated operand + * @v3: Third vector register designated operand + * @v4: Fourth vector register designated operand + */ +.macro RXB rxb v1 v2=0 v3=0 v4=0 + \rxb = 0 + .if \v1 & 0x10 + \rxb = \rxb | 0x08 + .endif + .if \v2 & 0x10 + \rxb = \rxb | 0x04 + .endif + .if \v3 & 0x10 + \rxb = \rxb | 0x02 + .endif + .if \v4 & 0x10 + \rxb = \rxb | 0x01 + .endif +.endm + +/* MRXB - Generate Element Size Control and RXB value + * + * @m: Element size control + * @v1: First vector register designated operand (for RXB) + * @v2: Second vector register designated operand (for RXB) + * @v3: Third vector register designated operand (for RXB) + * @v4: Fourth vector register designated operand (for RXB) + */ +.macro MRXB m v1 v2=0 v3=0 v4=0 + rxb = 0 + RXB rxb, \v1, \v2, \v3, \v4 + .byte (\m << 4) | rxb +.endm + +/* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields + * + * @m: Element size control + * @opc: Opcode + * @v1: First vector register designated operand (for RXB) + * @v2: Second vector register designated operand (for RXB) + * @v3: Third vector register designated operand (for RXB) + * @v4: Fourth vector register designated operand (for RXB) + */ +.macro MRXBOPC m opc v1 v2=0 v3=0 v4=0 + MRXB \m, \v1, \v2, \v3, \v4 + .byte \opc +.endm + +/* Vector support instructions */ + +/* VECTOR GENERATE BYTE MASK */ +.macro VGBM vr imm2 + VX_NUM v1, \vr + .word (0xE700 | ((v1&15) << 4)) + .word \imm2 + MRXBOPC 0, 0x44, v1 +.endm +.macro VZERO vxr + VGBM \vxr, 0 +.endm +.macro VONE vxr + VGBM \vxr, 0xFFFF +.endm + +/* VECTOR LOAD VR ELEMENT FROM GR */ +.macro VLVG v, gr, disp, m + VX_NUM v1, \v + GR_NUM b2, "%r0" + GR_NUM r3, \gr + .word 0xE700 | ((v1&15) << 4) | r3 + .word (b2 << 12) | (\disp) + MRXBOPC \m, 0x22, v1 +.endm +.macro VLVGB v, gr, index, base + VLVG \v, \gr, \index, \base, 0 +.endm +.macro VLVGH v, gr, index + VLVG \v, \gr, \index, 1 +.endm +.macro VLVGF v, gr, index + VLVG \v, \gr, \index, 2 +.endm +.macro VLVGG v, gr, index + VLVG \v, \gr, \index, 3 +.endm + +/* VECTOR LOAD REGISTER */ +.macro VLR v1, v2 + VX_NUM v1, \v1 + VX_NUM v2, \v2 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word 0 + MRXBOPC 0, 0x56, v1, v2 +.endm + +/* VECTOR LOAD */ +.macro VL v, disp, index="%r0", base + VX_NUM v1, \v + GR_NUM x2, \index + GR_NUM b2, \base + .word 0xE700 | ((v1&15) << 4) | x2 + .word (b2 << 12) | (\disp) + MRXBOPC 0, 0x06, v1 +.endm + +/* VECTOR LOAD ELEMENT */ +.macro VLEx vr1, disp, index="%r0", base, m3, opc + VX_NUM v1, \vr1 + GR_NUM x2, \index + GR_NUM b2, \base + .word 0xE700 | ((v1&15) << 4) | x2 + .word (b2 << 12) | (\disp) + MRXBOPC \m3, \opc, v1 +.endm +.macro VLEB vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x00 +.endm +.macro VLEH vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x01 +.endm +.macro VLEF vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x03 +.endm +.macro VLEG vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x02 +.endm + +/* VECTOR LOAD ELEMENT IMMEDIATE */ +.macro VLEIx vr1, imm2, m3, opc + VX_NUM v1, \vr1 + .word 0xE700 | ((v1&15) << 4) + .word \imm2 + MRXBOPC \m3, \opc, v1 +.endm +.macro VLEIB vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x40 +.endm +.macro VLEIH vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x41 +.endm +.macro VLEIF vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x43 +.endm +.macro VLEIG vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x42 +.endm + +/* VECTOR LOAD GR FROM VR ELEMENT */ +.macro VLGV gr, vr, disp, base="%r0", m + GR_NUM r1, \gr + GR_NUM b2, \base + VX_NUM v3, \vr + .word 0xE700 | (r1 << 4) | (v3&15) + .word (b2 << 12) | (\disp) + MRXBOPC \m, 0x21, v3 +.endm +.macro VLGVB gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 0 +.endm +.macro VLGVH gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 1 +.endm +.macro VLGVF gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 2 +.endm +.macro VLGVG gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 3 +.endm + +/* VECTOR LOAD MULTIPLE */ +.macro VLM vfrom, vto, disp, base, hint=3 + VX_NUM v1, \vfrom + VX_NUM v3, \vto + GR_NUM b2, \base + .word 0xE700 | ((v1&15) << 4) | (v3&15) + .word (b2 << 12) | (\disp) + MRXBOPC \hint, 0x36, v1, v3 +.endm + +/* VECTOR STORE */ +.macro VST vr1, disp, index="%r0", base + VX_NUM v1, \vr1 + GR_NUM x2, \index + GR_NUM b2, \base + .word 0xE700 | ((v1&15) << 4) | (x2&15) + .word (b2 << 12) | (\disp) + MRXBOPC 0, 0x0E, v1 +.endm + +/* VECTOR STORE MULTIPLE */ +.macro VSTM vfrom, vto, disp, base, hint=3 + VX_NUM v1, \vfrom + VX_NUM v3, \vto + GR_NUM b2, \base + .word 0xE700 | ((v1&15) << 4) | (v3&15) + .word (b2 << 12) | (\disp) + MRXBOPC \hint, 0x3E, v1, v3 +.endm + +/* VECTOR PERMUTE */ +.macro VPERM vr1, vr2, vr3, vr4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + VX_NUM v4, \vr4 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4 +.endm + +/* VECTOR UNPACK LOGICAL LOW */ +.macro VUPLL vr1, vr2, m3 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word 0x0000 + MRXBOPC \m3, 0xD4, v1, v2 +.endm +.macro VUPLLB vr1, vr2 + VUPLL \vr1, \vr2, 0 +.endm +.macro VUPLLH vr1, vr2 + VUPLL \vr1, \vr2, 1 +.endm +.macro VUPLLF vr1, vr2 + VUPLL \vr1, \vr2, 2 +.endm + +/* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */ +.macro VPDI vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0x84, v1, v2, v3 +.endm + +/* VECTOR REPLICATE */ +.macro VREP vr1, vr3, imm2, m4 + VX_NUM v1, \vr1 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v3&15) + .word \imm2 + MRXBOPC \m4, 0x4D, v1, v3 +.endm +.macro VREPB vr1, vr3, imm2 + VREP \vr1, \vr3, \imm2, 0 +.endm +.macro VREPH vr1, vr3, imm2 + VREP \vr1, \vr3, \imm2, 1 +.endm +.macro VREPF vr1, vr3, imm2 + VREP \vr1, \vr3, \imm2, 2 +.endm +.macro VREPG vr1, vr3, imm2 + VREP \vr1, \vr3, \imm2, 3 +.endm + +/* VECTOR MERGE HIGH */ +.macro VMRH vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0x61, v1, v2, v3 +.endm +.macro VMRHB vr1, vr2, vr3 + VMRH \vr1, \vr2, \vr3, 0 +.endm +.macro VMRHH vr1, vr2, vr3 + VMRH \vr1, \vr2, \vr3, 1 +.endm +.macro VMRHF vr1, vr2, vr3 + VMRH \vr1, \vr2, \vr3, 2 +.endm +.macro VMRHG vr1, vr2, vr3 + VMRH \vr1, \vr2, \vr3, 3 +.endm + +/* VECTOR MERGE LOW */ +.macro VMRL vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0x60, v1, v2, v3 +.endm +.macro VMRLB vr1, vr2, vr3 + VMRL \vr1, \vr2, \vr3, 0 +.endm +.macro VMRLH vr1, vr2, vr3 + VMRL \vr1, \vr2, \vr3, 1 +.endm +.macro VMRLF vr1, vr2, vr3 + VMRL \vr1, \vr2, \vr3, 2 +.endm +.macro VMRLG vr1, vr2, vr3 + VMRL \vr1, \vr2, \vr3, 3 +.endm + + +/* Vector integer instructions */ + +/* VECTOR AND */ +.macro VN vr1, vr2, vr3 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC 0, 0x68, v1, v2, v3 +.endm + +/* VECTOR EXCLUSIVE OR */ +.macro VX vr1, vr2, vr3 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC 0, 0x6D, v1, v2, v3 +.endm + +/* VECTOR GALOIS FIELD MULTIPLY SUM */ +.macro VGFM vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0xB4, v1, v2, v3 +.endm +.macro VGFMB vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 0 +.endm +.macro VGFMH vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 1 +.endm +.macro VGFMF vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 2 +.endm +.macro VGFMG vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 3 +.endm + +/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */ +.macro VGFMA vr1, vr2, vr3, vr4, m5 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + VX_NUM v4, \vr4 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) | (\m5 << 8) + MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4 +.endm +.macro VGFMAB vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 0 +.endm +.macro VGFMAH vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 1 +.endm +.macro VGFMAF vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 2 +.endm +.macro VGFMAG vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 3 +.endm + +/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */ +.macro VSRLB vr1, vr2, vr3 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC 0, 0x7D, v1, v2, v3 +.endm + +/* VECTOR REPLICATE IMMEDIATE */ +.macro VREPI vr1, imm2, m3 + VX_NUM v1, \vr1 + .word 0xE700 | ((v1&15) << 4) + .word \imm2 + MRXBOPC \m3, 0x45, v1 +.endm +.macro VREPIB vr1, imm2 + VREPI \vr1, \imm2, 0 +.endm +.macro VREPIH vr1, imm2 + VREPI \vr1, \imm2, 1 +.endm +.macro VREPIF vr1, imm2 + VREPI \vr1, \imm2, 2 +.endm +.macro VREPIG vr1, imm2 + VREP \vr1, \imm2, 3 +.endm + +/* VECTOR ADD */ +.macro VA vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0xF3, v1, v2, v3 +.endm +.macro VAB vr1, vr2, vr3 + VA \vr1, \vr2, \vr3, 0 +.endm +.macro VAH vr1, vr2, vr3 + VA \vr1, \vr2, \vr3, 1 +.endm +.macro VAF vr1, vr2, vr3 + VA \vr1, \vr2, \vr3, 2 +.endm +.macro VAG vr1, vr2, vr3 + VA \vr1, \vr2, \vr3, 3 +.endm +.macro VAQ vr1, vr2, vr3 + VA \vr1, \vr2, \vr3, 4 +.endm + +/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */ +.macro VESRAV vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0x7A, v1, v2, v3 +.endm + +.macro VESRAVB vr1, vr2, vr3 + VESRAV \vr1, \vr2, \vr3, 0 +.endm +.macro VESRAVH vr1, vr2, vr3 + VESRAV \vr1, \vr2, \vr3, 1 +.endm +.macro VESRAVF vr1, vr2, vr3 + VESRAV \vr1, \vr2, \vr3, 2 +.endm +.macro VESRAVG vr1, vr2, vr3 + VESRAV \vr1, \vr2, \vr3, 3 +.endm + +/* VECTOR ELEMENT ROTATE LEFT LOGICAL */ +.macro VERLL vr1, vr3, disp, base="%r0", m4 + VX_NUM v1, \vr1 + VX_NUM v3, \vr3 + GR_NUM b2, \base + .word 0xE700 | ((v1&15) << 4) | (v3&15) + .word (b2 << 12) | (\disp) + MRXBOPC \m4, 0x33, v1, v3 +.endm +.macro VERLLB vr1, vr3, disp, base="%r0" + VERLL \vr1, \vr3, \disp, \base, 0 +.endm +.macro VERLLH vr1, vr3, disp, base="%r0" + VERLL \vr1, \vr3, \disp, \base, 1 +.endm +.macro VERLLF vr1, vr3, disp, base="%r0" + VERLL \vr1, \vr3, \disp, \base, 2 +.endm +.macro VERLLG vr1, vr3, disp, base="%r0" + VERLL \vr1, \vr3, \disp, \base, 3 +.endm + +/* VECTOR SHIFT LEFT DOUBLE BY BYTE */ +.macro VSLDB vr1, vr2, vr3, imm4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) | (\imm4) + MRXBOPC 0, 0x77, v1, v2, v3 +.endm + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_S390_VX_INSN_INTERNAL_H */ diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h new file mode 100644 index 000000000..8c188f1c6 --- /dev/null +++ b/arch/s390/include/asm/vx-insn.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Support for Vector Instructions + * + * This wrapper header file allows to use the vector instruction macros in + * both assembler files as well as in inline assemblies in C files. + */ + +#ifndef __ASM_S390_VX_INSN_H +#define __ASM_S390_VX_INSN_H + +#include + +#ifndef __ASSEMBLY__ + +asm(".include \"asm/vx-insn-asm.h\"\n"); + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_S390_VX_INSN_H */ diff --git a/arch/s390/include/asm/xor.h b/arch/s390/include/asm/xor.h new file mode 100644 index 000000000..857d6759b --- /dev/null +++ b/arch/s390/include/asm/xor.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Optimited xor routines + * + * Copyright IBM Corp. 2016 + * Author(s): Martin Schwidefsky + */ +#ifndef _ASM_S390_XOR_H +#define _ASM_S390_XOR_H + +extern struct xor_block_template xor_block_xc; + +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ +do { \ + xor_speed(&xor_block_xc); \ +} while (0) + +#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_xc) + +#endif /* _ASM_S390_XOR_H */ diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild new file mode 100644 index 000000000..46c1ff0b8 --- /dev/null +++ b/arch/s390/include/uapi/asm/Kbuild @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 + +generated-y += unistd_32.h +generated-y += unistd_64.h diff --git a/arch/s390/include/uapi/asm/auxvec.h b/arch/s390/include/uapi/asm/auxvec.h new file mode 100644 index 000000000..a056c4637 --- /dev/null +++ b/arch/s390/include/uapi/asm/auxvec.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASMS390_AUXVEC_H +#define __ASMS390_AUXVEC_H + +#define AT_SYSINFO_EHDR 33 + +#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ + +#endif diff --git a/arch/s390/include/uapi/asm/bitsperlong.h b/arch/s390/include/uapi/asm/bitsperlong.h new file mode 100644 index 000000000..cceaf47b0 --- /dev/null +++ b/arch/s390/include/uapi/asm/bitsperlong.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_S390_BITSPERLONG_H +#define __ASM_S390_BITSPERLONG_H + +#ifndef __s390x__ +#define __BITS_PER_LONG 32 +#else +#define __BITS_PER_LONG 64 +#endif + +#include + +#endif /* __ASM_S390_BITSPERLONG_H */ + diff --git a/arch/s390/include/uapi/asm/bpf_perf_event.h b/arch/s390/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 000000000..3ed42ff6d --- /dev/null +++ b/arch/s390/include/uapi/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ +#define _UAPI__ASM_BPF_PERF_EVENT_H__ + +#include + +typedef user_pt_regs bpf_user_pt_regs_t; + +#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */ diff --git a/arch/s390/include/uapi/asm/byteorder.h b/arch/s390/include/uapi/asm/byteorder.h new file mode 100644 index 000000000..1442b57dd --- /dev/null +++ b/arch/s390/include/uapi/asm/byteorder.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _S390_BYTEORDER_H +#define _S390_BYTEORDER_H + +#include + +#endif /* _S390_BYTEORDER_H */ diff --git a/arch/s390/include/uapi/asm/chpid.h b/arch/s390/include/uapi/asm/chpid.h new file mode 100644 index 000000000..2ae2ed8c0 --- /dev/null +++ b/arch/s390/include/uapi/asm/chpid.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright IBM Corp. 2007, 2012 + * Author(s): Peter Oberparleiter + */ + +#ifndef _UAPI_ASM_S390_CHPID_H +#define _UAPI_ASM_S390_CHPID_H + +#include +#include + +#define __MAX_CHPID 255 + +struct chp_id { + __u8 reserved1; + __u8 cssid; + __u8 reserved2; + __u8 id; +} __attribute__((packed)); + + +#endif /* _UAPI_ASM_S390_CHPID_H */ diff --git a/arch/s390/include/uapi/asm/chsc.h b/arch/s390/include/uapi/asm/chsc.h new file mode 100644 index 000000000..83a574e95 --- /dev/null +++ b/arch/s390/include/uapi/asm/chsc.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * ioctl interface for /dev/chsc + * + * Copyright IBM Corp. 2008, 2012 + * Author(s): Cornelia Huck + */ + +#ifndef _ASM_CHSC_H +#define _ASM_CHSC_H + +#include +#include +#include +#include + +#define CHSC_SIZE 0x1000 + +struct chsc_async_header { + __u16 length; + __u16 code; + __u32 cmd_dependend; + __u32 key : 4; + __u32 : 28; + struct subchannel_id sid; +}; + +struct chsc_async_area { + struct chsc_async_header header; + __u8 data[CHSC_SIZE - sizeof(struct chsc_async_header)]; +}; + +struct chsc_header { + __u16 length; + __u16 code; +}; + +struct chsc_sync_area { + struct chsc_header header; + __u8 data[CHSC_SIZE - sizeof(struct chsc_header)]; +}; + +struct chsc_response_struct { + __u16 length; + __u16 code; + __u32 parms; + __u8 data[CHSC_SIZE - 2 * sizeof(__u16) - sizeof(__u32)]; +}; + +struct chsc_chp_cd { + struct chp_id chpid; + int m; + int fmt; + struct chsc_response_struct cpcb; +}; + +struct chsc_cu_cd { + __u16 cun; + __u8 cssid; + int m; + int fmt; + struct chsc_response_struct cucb; +}; + +struct chsc_sch_cud { + struct subchannel_id schid; + int fmt; + struct chsc_response_struct scub; +}; + +struct conf_id { + int m; + __u8 cssid; + __u8 ssid; +}; + +struct chsc_conf_info { + struct conf_id id; + int fmt; + struct chsc_response_struct scid; +}; + +struct ccl_parm_chpid { + int m; + struct chp_id chp; +}; + +struct ccl_parm_cssids { + __u8 f_cssid; + __u8 l_cssid; +}; + +struct chsc_comp_list { + struct { + enum { + CCL_CU_ON_CHP = 1, + CCL_CHP_TYPE_CAP = 2, + CCL_CSS_IMG = 4, + CCL_CSS_IMG_CONF_CHAR = 5, + CCL_IOP_CHP = 6, + } ctype; + int fmt; + struct ccl_parm_chpid chpid; + struct ccl_parm_cssids cssids; + } req; + struct chsc_response_struct sccl; +}; + +struct chsc_dcal { + struct { + enum { + DCAL_CSS_IID_PN = 4, + } atype; + __u32 list_parm[2]; + int fmt; + } req; + struct chsc_response_struct sdcal; +}; + +struct chsc_cpd_info { + struct chp_id chpid; + int m; + int fmt; + int rfmt; + int c; + struct chsc_response_struct chpdb; +}; + +#define CHSC_IOCTL_MAGIC 'c' + +#define CHSC_START _IOWR(CHSC_IOCTL_MAGIC, 0x81, struct chsc_async_area) +#define CHSC_INFO_CHANNEL_PATH _IOWR(CHSC_IOCTL_MAGIC, 0x82, \ + struct chsc_chp_cd) +#define CHSC_INFO_CU _IOWR(CHSC_IOCTL_MAGIC, 0x83, struct chsc_cu_cd) +#define CHSC_INFO_SCH_CU _IOWR(CHSC_IOCTL_MAGIC, 0x84, struct chsc_sch_cud) +#define CHSC_INFO_CI _IOWR(CHSC_IOCTL_MAGIC, 0x85, struct chsc_conf_info) +#define CHSC_INFO_CCL _IOWR(CHSC_IOCTL_MAGIC, 0x86, struct chsc_comp_list) +#define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info) +#define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal) +#define CHSC_START_SYNC _IOWR(CHSC_IOCTL_MAGIC, 0x89, struct chsc_sync_area) +#define CHSC_ON_CLOSE_SET _IOWR(CHSC_IOCTL_MAGIC, 0x8a, struct chsc_async_area) +#define CHSC_ON_CLOSE_REMOVE _IO(CHSC_IOCTL_MAGIC, 0x8b) + +#endif diff --git a/arch/s390/include/uapi/asm/clp.h b/arch/s390/include/uapi/asm/clp.h new file mode 100644 index 000000000..b36d9e9cd --- /dev/null +++ b/arch/s390/include/uapi/asm/clp.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * ioctl interface for /dev/clp + * + * Copyright IBM Corp. 2016 + * Author(s): Martin Schwidefsky + */ + +#ifndef _ASM_CLP_H +#define _ASM_CLP_H + +#include +#include + +struct clp_req { + unsigned int c : 1; + unsigned int r : 1; + unsigned int lps : 6; + unsigned int cmd : 8; + unsigned int : 16; + unsigned int reserved; + __u64 data_p; +}; + +#define CLP_IOCTL_MAGIC 'c' + +#define CLP_SYNC _IOWR(CLP_IOCTL_MAGIC, 0xC1, struct clp_req) + +#endif diff --git a/arch/s390/include/uapi/asm/cmb.h b/arch/s390/include/uapi/asm/cmb.h new file mode 100644 index 000000000..115434ab9 --- /dev/null +++ b/arch/s390/include/uapi/asm/cmb.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPIS390_CMB_H +#define _UAPIS390_CMB_H + +#include + +/** + * struct cmbdata - channel measurement block data for user space + * @size: size of the stored data + * @elapsed_time: time since last sampling + * @ssch_rsch_count: number of ssch and rsch + * @sample_count: number of samples + * @device_connect_time: time of device connect + * @function_pending_time: time of function pending + * @device_disconnect_time: time of device disconnect + * @control_unit_queuing_time: time of control unit queuing + * @device_active_only_time: time of device active only + * @device_busy_time: time of device busy (ext. format) + * @initial_command_response_time: initial command response time (ext. format) + * + * All values are stored as 64 bit for simplicity, especially + * in 32 bit emulation mode. All time values are normalized to + * nanoseconds. + * Currently, two formats are known, which differ by the size of + * this structure, i.e. the last two members are only set when + * the extended channel measurement facility (first shipped in + * z990 machines) is activated. + * Potentially, more fields could be added, which would result in a + * new ioctl number. + */ +struct cmbdata { + __u64 size; + __u64 elapsed_time; + /* basic and extended format: */ + __u64 ssch_rsch_count; + __u64 sample_count; + __u64 device_connect_time; + __u64 function_pending_time; + __u64 device_disconnect_time; + __u64 control_unit_queuing_time; + __u64 device_active_only_time; + /* extended format only: */ + __u64 device_busy_time; + __u64 initial_command_response_time; +}; + +/* enable channel measurement */ +#define BIODASDCMFENABLE _IO(DASD_IOCTL_LETTER, 32) +/* enable channel measurement */ +#define BIODASDCMFDISABLE _IO(DASD_IOCTL_LETTER, 33) +/* read channel measurement data */ +#define BIODASDREADALLCMB _IOWR(DASD_IOCTL_LETTER, 33, struct cmbdata) + +#endif /* _UAPIS390_CMB_H */ diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h new file mode 100644 index 000000000..b11d98800 --- /dev/null +++ b/arch/s390/include/uapi/asm/dasd.h @@ -0,0 +1,354 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Author(s)......: Holger Smolinski + * Bugreports.to..: + * Copyright IBM Corp. 1999, 2000 + * EMC Symmetrix ioctl Copyright EMC Corporation, 2008 + * Author.........: Nigel Hislop + * + * This file is the interface of the DASD device driver, which is exported to user space + * any future changes wrt the API will result in a change of the APIVERSION reported + * to userspace by the DASDAPIVER-ioctl + * + */ + +#ifndef DASD_H +#define DASD_H +#include +#include + +#define DASD_IOCTL_LETTER 'D' + +#define DASD_API_VERSION 6 + +/* + * struct dasd_information2_t + * represents any data about the device, which is visible to userspace. + * including format and featueres. + */ +typedef struct dasd_information2_t { + unsigned int devno; /* S/390 devno */ + unsigned int real_devno; /* for aliases */ + unsigned int schid; /* S/390 subchannel identifier */ + unsigned int cu_type : 16; /* from SenseID */ + unsigned int cu_model : 8; /* from SenseID */ + unsigned int dev_type : 16; /* from SenseID */ + unsigned int dev_model : 8; /* from SenseID */ + unsigned int open_count; + unsigned int req_queue_len; + unsigned int chanq_len; /* length of chanq */ + char type[4]; /* from discipline.name, 'none' for unknown */ + unsigned int status; /* current device level */ + unsigned int label_block; /* where to find the VOLSER */ + unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ + unsigned int characteristics_size; + unsigned int confdata_size; + char characteristics[64]; /* from read_device_characteristics */ + char configuration_data[256]; /* from read_configuration_data */ + unsigned int format; /* format info like formatted/cdl/ldl/... */ + unsigned int features; /* dasd features like 'ro',... */ + unsigned int reserved0; /* reserved for further use ,... */ + unsigned int reserved1; /* reserved for further use ,... */ + unsigned int reserved2; /* reserved for further use ,... */ + unsigned int reserved3; /* reserved for further use ,... */ + unsigned int reserved4; /* reserved for further use ,... */ + unsigned int reserved5; /* reserved for further use ,... */ + unsigned int reserved6; /* reserved for further use ,... */ + unsigned int reserved7; /* reserved for further use ,... */ +} dasd_information2_t; + +/* + * values to be used for dasd_information_t.format + * 0x00: NOT formatted + * 0x01: Linux disc layout + * 0x02: Common disc layout + */ +#define DASD_FORMAT_NONE 0 +#define DASD_FORMAT_LDL 1 +#define DASD_FORMAT_CDL 2 +/* + * values to be used for dasd_information_t.features + * 0x100: default features + * 0x001: readonly (ro) + * 0x002: use diag discipline (diag) + * 0x004: set the device initially online (internal use only) + * 0x008: enable ERP related logging + * 0x010: allow I/O to fail on lost paths + * 0x020: allow I/O to fail when a lock was stolen + * 0x040: give access to raw eckd data + * 0x080: enable discard support + * 0x100: enable autodisable for IFCC errors (default) + * 0x200: enable requeue of all requests on autoquiesce + */ +#define DASD_FEATURE_READONLY 0x001 +#define DASD_FEATURE_USEDIAG 0x002 +#define DASD_FEATURE_INITIAL_ONLINE 0x004 +#define DASD_FEATURE_ERPLOG 0x008 +#define DASD_FEATURE_FAILFAST 0x010 +#define DASD_FEATURE_FAILONSLCK 0x020 +#define DASD_FEATURE_USERAW 0x040 +#define DASD_FEATURE_DISCARD 0x080 +#define DASD_FEATURE_PATH_AUTODISABLE 0x100 +#define DASD_FEATURE_REQUEUEQUIESCE 0x200 +#define DASD_FEATURE_DEFAULT DASD_FEATURE_PATH_AUTODISABLE + +#define DASD_PARTN_BITS 2 + +/* + * struct dasd_information_t + * represents any data about the data, which is visible to userspace + */ +typedef struct dasd_information_t { + unsigned int devno; /* S/390 devno */ + unsigned int real_devno; /* for aliases */ + unsigned int schid; /* S/390 subchannel identifier */ + unsigned int cu_type : 16; /* from SenseID */ + unsigned int cu_model : 8; /* from SenseID */ + unsigned int dev_type : 16; /* from SenseID */ + unsigned int dev_model : 8; /* from SenseID */ + unsigned int open_count; + unsigned int req_queue_len; + unsigned int chanq_len; /* length of chanq */ + char type[4]; /* from discipline.name, 'none' for unknown */ + unsigned int status; /* current device level */ + unsigned int label_block; /* where to find the VOLSER */ + unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ + unsigned int characteristics_size; + unsigned int confdata_size; + char characteristics[64]; /* from read_device_characteristics */ + char configuration_data[256]; /* from read_configuration_data */ +} dasd_information_t; + +/* + * Read Subsystem Data - Performance Statistics + */ +typedef struct dasd_rssd_perf_stats_t { + unsigned char invalid:1; + unsigned char format:3; + unsigned char data_format:4; + unsigned char unit_address; + unsigned short device_status; + unsigned int nr_read_normal; + unsigned int nr_read_normal_hits; + unsigned int nr_write_normal; + unsigned int nr_write_fast_normal_hits; + unsigned int nr_read_seq; + unsigned int nr_read_seq_hits; + unsigned int nr_write_seq; + unsigned int nr_write_fast_seq_hits; + unsigned int nr_read_cache; + unsigned int nr_read_cache_hits; + unsigned int nr_write_cache; + unsigned int nr_write_fast_cache_hits; + unsigned int nr_inhibit_cache; + unsigned int nr_bybass_cache; + unsigned int nr_seq_dasd_to_cache; + unsigned int nr_dasd_to_cache; + unsigned int nr_cache_to_dasd; + unsigned int nr_delayed_fast_write; + unsigned int nr_normal_fast_write; + unsigned int nr_seq_fast_write; + unsigned int nr_cache_miss; + unsigned char status2; + unsigned int nr_quick_write_promotes; + unsigned char reserved; + unsigned short ssid; + unsigned char reseved2[96]; +} __attribute__((packed)) dasd_rssd_perf_stats_t; + +/* + * struct profile_info_t + * holds the profinling information + */ +typedef struct dasd_profile_info_t { + unsigned int dasd_io_reqs; /* number of requests processed at all */ + unsigned int dasd_io_sects; /* number of sectors processed at all */ + unsigned int dasd_io_secs[32]; /* histogram of request's sizes */ + unsigned int dasd_io_times[32]; /* histogram of requests's times */ + unsigned int dasd_io_timps[32]; /* histogram of requests's times per sector */ + unsigned int dasd_io_time1[32]; /* histogram of time from build to start */ + unsigned int dasd_io_time2[32]; /* histogram of time from start to irq */ + unsigned int dasd_io_time2ps[32]; /* histogram of time from start to irq */ + unsigned int dasd_io_time3[32]; /* histogram of time from irq to end */ + unsigned int dasd_io_nr_req[32]; /* histogram of # of requests in chanq */ +} dasd_profile_info_t; + +/* + * struct format_data_t + * represents all data necessary to format a dasd + */ +typedef struct format_data_t { + unsigned int start_unit; /* from track */ + unsigned int stop_unit; /* to track */ + unsigned int blksize; /* sectorsize */ + unsigned int intensity; +} format_data_t; + +/* + * struct dasd_copypair_swap_data_t + * represents all data necessary to issue a swap of the copy pair relation + */ +struct dasd_copypair_swap_data_t { + char primary[20]; /* BUSID of primary */ + char secondary[20]; /* BUSID of secondary */ + + /* Reserved for future updates. */ + __u8 reserved[64]; +}; + +/* + * values to be used for format_data_t.intensity + * 0/8: normal format + * 1/9: also write record zero + * 3/11: also write home address + * 4/12: invalidate track + */ +#define DASD_FMT_INT_FMT_R0 1 /* write record zero */ +#define DASD_FMT_INT_FMT_HA 2 /* write home address, also set FMT_R0 ! */ +#define DASD_FMT_INT_INVAL 4 /* invalidate tracks */ +#define DASD_FMT_INT_COMPAT 8 /* use OS/390 compatible disk layout */ +#define DASD_FMT_INT_FMT_NOR0 16 /* remove permission to write record zero */ +#define DASD_FMT_INT_ESE_FULL 32 /* release space for entire volume */ + +/* + * struct format_check_t + * represents all data necessary to evaluate the format of + * different tracks of a dasd + */ +typedef struct format_check_t { + /* Input */ + struct format_data_t expect; + + /* Output */ + unsigned int result; /* Error indication (DASD_FMT_ERR_*) */ + unsigned int unit; /* Track that is in error */ + unsigned int rec; /* Record that is in error */ + unsigned int num_records; /* Records in the track in error */ + unsigned int blksize; /* Blocksize of first record in error */ + unsigned int key_length; /* Key length of first record in error */ +} format_check_t; + +/* Values returned in format_check_t when a format error is detected: */ +/* Too few records were found on a single track */ +#define DASD_FMT_ERR_TOO_FEW_RECORDS 1 +/* Too many records were found on a single track */ +#define DASD_FMT_ERR_TOO_MANY_RECORDS 2 +/* Blocksize/data-length of a record was wrong */ +#define DASD_FMT_ERR_BLKSIZE 3 +/* A record ID is defined by cylinder, head, and record number (CHR). */ +/* On mismatch, this error is set */ +#define DASD_FMT_ERR_RECORD_ID 4 +/* If key-length was != 0 */ +#define DASD_FMT_ERR_KEY_LENGTH 5 + +/* + * struct attrib_data_t + * represents the operation (cache) bits for the device. + * Used in DE to influence caching of the DASD. + */ +typedef struct attrib_data_t { + unsigned char operation:3; /* cache operation mode */ + unsigned char reserved:5; /* cache operation mode */ + __u16 nr_cyl; /* no of cyliners for read ahaed */ + __u8 reserved2[29]; /* for future use */ +} __attribute__ ((packed)) attrib_data_t; + +/* definition of operation (cache) bits within attributes of DE */ +#define DASD_NORMAL_CACHE 0x0 +#define DASD_BYPASS_CACHE 0x1 +#define DASD_INHIBIT_LOAD 0x2 +#define DASD_SEQ_ACCESS 0x3 +#define DASD_SEQ_PRESTAGE 0x4 +#define DASD_REC_ACCESS 0x5 + +/* + * Perform EMC Symmetrix I/O + */ +typedef struct dasd_symmio_parms { + unsigned char reserved[8]; /* compat with older releases */ + unsigned long long psf_data; /* char * cast to u64 */ + unsigned long long rssd_result; /* char * cast to u64 */ + int psf_data_len; + int rssd_result_len; +} __attribute__ ((packed)) dasd_symmio_parms_t; + +/* + * Data returned by Sense Path Group ID (SNID) + */ +struct dasd_snid_data { + struct { + __u8 group:2; + __u8 reserve:2; + __u8 mode:1; + __u8 res:3; + } __attribute__ ((packed)) path_state; + __u8 pgid[11]; +} __attribute__ ((packed)); + +struct dasd_snid_ioctl_data { + struct dasd_snid_data data; + __u8 path_mask; +} __attribute__ ((packed)); + + +/******************************************************************************** + * SECTION: Definition of IOCTLs + * + * Here ist how the ioctl-nr should be used: + * 0 - 31 DASD driver itself + * 32 - 239 still open + * 240 - 255 reserved for EMC + *******************************************************************************/ + +/* Disable the volume (for Linux) */ +#define BIODASDDISABLE _IO(DASD_IOCTL_LETTER,0) +/* Enable the volume (for Linux) */ +#define BIODASDENABLE _IO(DASD_IOCTL_LETTER,1) +/* Issue a reserve/release command, rsp. */ +#define BIODASDRSRV _IO(DASD_IOCTL_LETTER,2) /* reserve */ +#define BIODASDRLSE _IO(DASD_IOCTL_LETTER,3) /* release */ +#define BIODASDSLCK _IO(DASD_IOCTL_LETTER,4) /* steal lock */ +/* reset profiling information of a device */ +#define BIODASDPRRST _IO(DASD_IOCTL_LETTER,5) +/* Quiesce IO on device */ +#define BIODASDQUIESCE _IO(DASD_IOCTL_LETTER,6) +/* Resume IO on device */ +#define BIODASDRESUME _IO(DASD_IOCTL_LETTER,7) +/* Abort all I/O on a device */ +#define BIODASDABORTIO _IO(DASD_IOCTL_LETTER, 240) +/* Allow I/O on a device */ +#define BIODASDALLOWIO _IO(DASD_IOCTL_LETTER, 241) + + +/* retrieve API version number */ +#define DASDAPIVER _IOR(DASD_IOCTL_LETTER,0,int) +/* Get information on a dasd device */ +#define BIODASDINFO _IOR(DASD_IOCTL_LETTER,1,dasd_information_t) +/* retrieve profiling information of a device */ +#define BIODASDPRRD _IOR(DASD_IOCTL_LETTER,2,dasd_profile_info_t) +/* Get information on a dasd device (enhanced) */ +#define BIODASDINFO2 _IOR(DASD_IOCTL_LETTER,3,dasd_information2_t) +/* Performance Statistics Read */ +#define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t) +/* Get Attributes (cache operations) */ +#define BIODASDGATTR _IOR(DASD_IOCTL_LETTER,5,attrib_data_t) + + +/* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */ +#define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t) +/* Set Attributes (cache operations) */ +#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t) +/* Release Allocated Space */ +#define BIODASDRAS _IOW(DASD_IOCTL_LETTER, 3, format_data_t) +/* Swap copy pair relation */ +#define BIODASDCOPYPAIRSWAP _IOW(DASD_IOCTL_LETTER, 4, struct dasd_copypair_swap_data_t) + +/* Get Sense Path Group ID (SNID) data */ +#define BIODASDSNID _IOWR(DASD_IOCTL_LETTER, 1, struct dasd_snid_ioctl_data) +/* Check device format according to format_check_t */ +#define BIODASDCHECKFMT _IOWR(DASD_IOCTL_LETTER, 2, format_check_t) + +#define BIODASDSYMMIO _IOWR(DASD_IOCTL_LETTER, 240, dasd_symmio_parms_t) + +#endif /* DASD_H */ + diff --git a/arch/s390/include/uapi/asm/fs3270.h b/arch/s390/include/uapi/asm/fs3270.h new file mode 100644 index 000000000..c4bc1108a --- /dev/null +++ b/arch/s390/include/uapi/asm/fs3270.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_S390_UAPI_FS3270_H +#define __ASM_S390_UAPI_FS3270_H + +#include +#include + +/* ioctls for fullscreen 3270 */ +#define TUBICMD _IO('3', 3) /* set ccw command for fs reads. */ +#define TUBOCMD _IO('3', 4) /* set ccw command for fs writes. */ +#define TUBGETI _IO('3', 7) /* get ccw command for fs reads. */ +#define TUBGETO _IO('3', 8) /* get ccw command for fs writes. */ +#define TUBGETMOD _IO('3', 13) /* get characteristics like model, cols, rows */ + +/* For TUBGETMOD */ +struct raw3270_iocb { + __u16 model; + __u16 line_cnt; + __u16 col_cnt; + __u16 pf_cnt; + __u16 re_cnt; + __u16 map; +}; + +#endif /* __ASM_S390_UAPI_FS3270_H */ diff --git a/arch/s390/include/uapi/asm/guarded_storage.h b/arch/s390/include/uapi/asm/guarded_storage.h new file mode 100644 index 000000000..666af1c33 --- /dev/null +++ b/arch/s390/include/uapi/asm/guarded_storage.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _GUARDED_STORAGE_H +#define _GUARDED_STORAGE_H + +#include + +struct gs_cb { + __u64 reserved; + __u64 gsd; + __u64 gssm; + __u64 gs_epl_a; +}; + +struct gs_epl { + __u8 pad1; + union { + __u8 gs_eam; + struct { + __u8 : 6; + __u8 e : 1; + __u8 b : 1; + }; + }; + union { + __u8 gs_eci; + struct { + __u8 tx : 1; + __u8 cx : 1; + __u8 : 5; + __u8 in : 1; + }; + }; + union { + __u8 gs_eai; + struct { + __u8 : 1; + __u8 t : 1; + __u8 as : 2; + __u8 ar : 4; + }; + }; + __u32 pad2; + __u64 gs_eha; + __u64 gs_eia; + __u64 gs_eoa; + __u64 gs_eir; + __u64 gs_era; +}; + +#define GS_ENABLE 0 +#define GS_DISABLE 1 +#define GS_SET_BC_CB 2 +#define GS_CLEAR_BC_CB 3 +#define GS_BROADCAST 4 + +static inline void load_gs_cb(struct gs_cb *gs_cb) +{ + asm volatile(".insn rxy,0xe3000000004d,0,%0" : : "Q" (*gs_cb)); +} + +static inline void store_gs_cb(struct gs_cb *gs_cb) +{ + asm volatile(".insn rxy,0xe30000000049,0,%0" : : "Q" (*gs_cb)); +} + +static inline void save_gs_cb(struct gs_cb *gs_cb) +{ + if (gs_cb) + store_gs_cb(gs_cb); +} + +static inline void restore_gs_cb(struct gs_cb *gs_cb) +{ + if (gs_cb) + load_gs_cb(gs_cb); +} + +#endif /* _GUARDED_STORAGE_H */ diff --git a/arch/s390/include/uapi/asm/hwctrset.h b/arch/s390/include/uapi/asm/hwctrset.h new file mode 100644 index 000000000..e56b9dd23 --- /dev/null +++ b/arch/s390/include/uapi/asm/hwctrset.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright IBM Corp. 2021 + * Interface implementation for communication with the CPU Measurement + * counter facility device driver. + * + * Author(s): Thomas Richter + * + * Define for ioctl() commands to communicate with the CPU Measurement + * counter facility device driver. + */ + +#ifndef _PERF_CPUM_CF_DIAG_H +#define _PERF_CPUM_CF_DIAG_H + +#include +#include + +#define S390_HWCTR_DEVICE "hwctr" +#define S390_HWCTR_START_VERSION 1 + +struct s390_ctrset_start { /* Set CPUs to operate on */ + __u64 version; /* Version of interface */ + __u64 data_bytes; /* # of bytes required */ + __u64 cpumask_len; /* Length of CPU mask in bytes */ + __u64 *cpumask; /* Pointer to CPU mask */ + __u64 counter_sets; /* Bit mask of counter sets to get */ +}; + +struct s390_ctrset_setdata { /* Counter set data */ + __u32 set; /* Counter set number */ + __u32 no_cnts; /* # of counters stored in cv[] */ + __u64 cv[]; /* Counter values (variable length) */ +}; + +struct s390_ctrset_cpudata { /* Counter set data per CPU */ + __u32 cpu_nr; /* CPU number */ + __u32 no_sets; /* # of counters sets in data[] */ + struct s390_ctrset_setdata data[]; +}; + +struct s390_ctrset_read { /* Structure to get all ctr sets */ + __u64 no_cpus; /* Total # of CPUs data taken from */ + struct s390_ctrset_cpudata data[]; +}; + +#define S390_HWCTR_MAGIC 'C' /* Random magic # for ioctls */ +#define S390_HWCTR_START _IOWR(S390_HWCTR_MAGIC, 1, struct s390_ctrset_start) +#define S390_HWCTR_STOP _IO(S390_HWCTR_MAGIC, 2) +#define S390_HWCTR_READ _IOWR(S390_HWCTR_MAGIC, 3, struct s390_ctrset_read) +#endif diff --git a/arch/s390/include/uapi/asm/hypfs.h b/arch/s390/include/uapi/asm/hypfs.h new file mode 100644 index 000000000..fe6174e14 --- /dev/null +++ b/arch/s390/include/uapi/asm/hypfs.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Structures for hypfs interface + * + * Copyright IBM Corp. 2013 + * + * Author: Martin Schwidefsky + */ + +#ifndef _ASM_HYPFS_H +#define _ASM_HYPFS_H + +#include + +/* + * IOCTL for binary interface /sys/kernel/debug/diag_304 + */ +struct hypfs_diag304 { + __u32 args[2]; + __u64 data; + __u64 rc; +} __attribute__((packed)); + +#define HYPFS_IOCTL_MAGIC 0x10 + +#define HYPFS_DIAG304 \ + _IOWR(HYPFS_IOCTL_MAGIC, 0x20, struct hypfs_diag304) + +/* + * Structures for binary interface /sys/kernel/debug/diag_0c + */ +struct hypfs_diag0c_hdr { + __u64 len; /* Length of diag0c buffer without header */ + __u16 version; /* Version of header */ + char reserved1[6]; /* Reserved */ + char tod_ext[16]; /* TOD clock for diag0c */ + __u64 count; /* Number of entries (CPUs) in diag0c array */ + char reserved2[24]; /* Reserved */ +}; + +struct hypfs_diag0c_entry { + char date[8]; /* MM/DD/YY in EBCDIC */ + char time[8]; /* HH:MM:SS in EBCDIC */ + __u64 virtcpu; /* Virtual time consumed by the virt CPU (us) */ + __u64 totalproc; /* Total of virtual and simulation time (us) */ + __u32 cpu; /* Linux logical CPU number */ + __u32 reserved; /* Align to 8 byte */ +}; + +struct hypfs_diag0c_data { + struct hypfs_diag0c_hdr hdr; /* 64 byte header */ + struct hypfs_diag0c_entry entry[]; /* diag0c entry array */ +}; + +#endif diff --git a/arch/s390/include/uapi/asm/ioctls.h b/arch/s390/include/uapi/asm/ioctls.h new file mode 100644 index 000000000..342a3284e --- /dev/null +++ b/arch/s390/include/uapi/asm/ioctls.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ARCH_S390_IOCTLS_H__ +#define __ARCH_S390_IOCTLS_H__ + +#define FIOQSIZE 0x545E + +#include + +#endif diff --git a/arch/s390/include/uapi/asm/ipcbuf.h b/arch/s390/include/uapi/asm/ipcbuf.h new file mode 100644 index 000000000..1030cd186 --- /dev/null +++ b/arch/s390/include/uapi/asm/ipcbuf.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __S390_IPCBUF_H__ +#define __S390_IPCBUF_H__ + +#include + +/* + * The user_ipc_perm structure for S/390 architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 32-bit mode_t and seq + * - 2 miscellaneous 32-bit values + */ + +struct ipc64_perm +{ + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; + __kernel_mode_t mode; + unsigned short __pad1; + unsigned short seq; +#ifndef __s390x__ + unsigned short __pad2; +#endif /* ! __s390x__ */ + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* __S390_IPCBUF_H__ */ diff --git a/arch/s390/include/uapi/asm/ipl.h b/arch/s390/include/uapi/asm/ipl.h new file mode 100644 index 000000000..2cd28af50 --- /dev/null +++ b/arch/s390/include/uapi/asm/ipl.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_S390_UAPI_IPL_H +#define _ASM_S390_UAPI_IPL_H + +#include + +/* IPL Parameter List header */ +struct ipl_pl_hdr { + __u32 len; + __u8 flags; + __u8 reserved1[2]; + __u8 version; +} __packed; + +#define IPL_PL_FLAG_IPLPS 0x80 +#define IPL_PL_FLAG_SIPL 0x40 +#define IPL_PL_FLAG_IPLSR 0x20 + +/* IPL Parameter Block header */ +struct ipl_pb_hdr { + __u32 len; + __u8 pbt; +} __packed; + +/* IPL Parameter Block types */ +enum ipl_pbt { + IPL_PBT_FCP = 0, + IPL_PBT_SCP_DATA = 1, + IPL_PBT_CCW = 2, + IPL_PBT_ECKD = 3, + IPL_PBT_NVME = 4, +}; + +/* IPL Parameter Block 0 with common fields */ +struct ipl_pb0_common { + __u32 len; + __u8 pbt; + __u8 flags; + __u8 reserved1[2]; + __u8 loadparm[8]; + __u8 reserved2[84]; +} __packed; + +#define IPL_PB0_FLAG_LOADPARM 0x80 + +/* IPL Parameter Block 0 for FCP */ +struct ipl_pb0_fcp { + __u32 len; + __u8 pbt; + __u8 reserved1[3]; + __u8 loadparm[8]; + __u8 reserved2[304]; + __u8 opt; + __u8 reserved3[3]; + __u8 cssid; + __u8 reserved4[1]; + __u16 devno; + __u8 reserved5[4]; + __u64 wwpn; + __u64 lun; + __u32 bootprog; + __u8 reserved6[12]; + __u64 br_lba; + __u32 scp_data_len; + __u8 reserved7[260]; + __u8 scp_data[]; +} __packed; + +#define IPL_PB0_FCP_OPT_IPL 0x10 +#define IPL_PB0_FCP_OPT_DUMP 0x20 + +/* IPL Parameter Block 0 for NVMe */ +struct ipl_pb0_nvme { + __u32 len; + __u8 pbt; + __u8 reserved1[3]; + __u8 loadparm[8]; + __u8 reserved2[304]; + __u8 opt; + __u8 reserved3[3]; + __u32 fid; + __u8 reserved4[12]; + __u32 nsid; + __u8 reserved5[4]; + __u32 bootprog; + __u8 reserved6[12]; + __u64 br_lba; + __u32 scp_data_len; + __u8 reserved7[260]; + __u8 scp_data[]; +} __packed; + +#define IPL_PB0_NVME_OPT_IPL 0x10 +#define IPL_PB0_NVME_OPT_DUMP 0x20 + +/* IPL Parameter Block 0 for CCW */ +struct ipl_pb0_ccw { + __u32 len; + __u8 pbt; + __u8 flags; + __u8 reserved1[2]; + __u8 loadparm[8]; + __u8 reserved2[84]; + __u16 reserved3 : 13; + __u8 ssid : 3; + __u16 devno; + __u8 vm_flags; + __u8 reserved4[3]; + __u32 vm_parm_len; + __u8 nss_name[8]; + __u8 vm_parm[64]; + __u8 reserved5[8]; +} __packed; + +/* IPL Parameter Block 0 for ECKD */ +struct ipl_pb0_eckd { + __u32 len; + __u8 pbt; + __u8 reserved1[3]; + __u32 reserved2[78]; + __u8 opt; + __u8 reserved4[4]; + __u8 reserved5:5; + __u8 ssid:3; + __u16 devno; + __u32 reserved6[5]; + __u32 bootprog; + __u8 reserved7[12]; + struct { + __u16 cyl; + __u8 head; + __u8 record; + __u32 reserved; + } br_chr __packed; + __u32 scp_data_len; + __u8 reserved8[260]; + __u8 scp_data[]; +} __packed; + +#define IPL_PB0_ECKD_OPT_IPL 0x10 +#define IPL_PB0_ECKD_OPT_DUMP 0x20 + +#define IPL_PB0_CCW_VM_FLAG_NSS 0x80 +#define IPL_PB0_CCW_VM_FLAG_VP 0x40 + +/* IPL Parameter Block 1 for additional SCP data */ +struct ipl_pb1_scp_data { + __u32 len; + __u8 pbt; + __u8 scp_data[]; +} __packed; + +/* IPL Report List header */ +struct ipl_rl_hdr { + __u32 len; + __u8 flags; + __u8 reserved1[2]; + __u8 version; + __u8 reserved2[8]; +} __packed; + +/* IPL Report Block header */ +struct ipl_rb_hdr { + __u32 len; + __u8 rbt; + __u8 reserved1[11]; +} __packed; + +/* IPL Report Block types */ +enum ipl_rbt { + IPL_RBT_CERTIFICATES = 1, + IPL_RBT_COMPONENTS = 2, +}; + +/* IPL Report Block for the certificate list */ +struct ipl_rb_certificate_entry { + __u64 addr; + __u64 len; +} __packed; + +struct ipl_rb_certificates { + __u32 len; + __u8 rbt; + __u8 reserved1[11]; + struct ipl_rb_certificate_entry entries[]; +} __packed; + +/* IPL Report Block for the component list */ +struct ipl_rb_component_entry { + __u64 addr; + __u64 len; + __u8 flags; + __u8 reserved1[5]; + __u16 certificate_index; + __u8 reserved2[8]; +}; + +#define IPL_RB_COMPONENT_FLAG_SIGNED 0x80 +#define IPL_RB_COMPONENT_FLAG_VERIFIED 0x40 + +struct ipl_rb_components { + __u32 len; + __u8 rbt; + __u8 reserved1[11]; + struct ipl_rb_component_entry entries[]; +} __packed; + +#endif diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h new file mode 100644 index 000000000..abe926d43 --- /dev/null +++ b/arch/s390/include/uapi/asm/kvm.h @@ -0,0 +1,308 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __LINUX_KVM_S390_H +#define __LINUX_KVM_S390_H +/* + * KVM s390 specific structures and definitions + * + * Copyright IBM Corp. 2008, 2018 + * + * Author(s): Carsten Otte + * Christian Borntraeger + */ +#include + +#define __KVM_S390 +#define __KVM_HAVE_GUEST_DEBUG + +/* Device control API: s390-specific devices */ +#define KVM_DEV_FLIC_GET_ALL_IRQS 1 +#define KVM_DEV_FLIC_ENQUEUE 2 +#define KVM_DEV_FLIC_CLEAR_IRQS 3 +#define KVM_DEV_FLIC_APF_ENABLE 4 +#define KVM_DEV_FLIC_APF_DISABLE_WAIT 5 +#define KVM_DEV_FLIC_ADAPTER_REGISTER 6 +#define KVM_DEV_FLIC_ADAPTER_MODIFY 7 +#define KVM_DEV_FLIC_CLEAR_IO_IRQ 8 +#define KVM_DEV_FLIC_AISM 9 +#define KVM_DEV_FLIC_AIRQ_INJECT 10 +#define KVM_DEV_FLIC_AISM_ALL 11 +/* + * We can have up to 4*64k pending subchannels + 8 adapter interrupts, + * as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts. + * There are also sclp and machine checks. This gives us + * sizeof(kvm_s390_irq)*(4*65536+8+64*64+1+1) = 72 * 266250 = 19170000 + * Lets round up to 8192 pages. + */ +#define KVM_S390_MAX_FLOAT_IRQS 266250 +#define KVM_S390_FLIC_MAX_BUFFER 0x2000000 + +struct kvm_s390_io_adapter { + __u32 id; + __u8 isc; + __u8 maskable; + __u8 swap; + __u8 flags; +}; + +#define KVM_S390_ADAPTER_SUPPRESSIBLE 0x01 + +struct kvm_s390_ais_req { + __u8 isc; + __u16 mode; +}; + +struct kvm_s390_ais_all { + __u8 simm; + __u8 nimm; +}; + +#define KVM_S390_IO_ADAPTER_MASK 1 +#define KVM_S390_IO_ADAPTER_MAP 2 +#define KVM_S390_IO_ADAPTER_UNMAP 3 + +struct kvm_s390_io_adapter_req { + __u32 id; + __u8 type; + __u8 mask; + __u16 pad0; + __u64 addr; +}; + +/* kvm attr_group on vm fd */ +#define KVM_S390_VM_MEM_CTRL 0 +#define KVM_S390_VM_TOD 1 +#define KVM_S390_VM_CRYPTO 2 +#define KVM_S390_VM_CPU_MODEL 3 +#define KVM_S390_VM_MIGRATION 4 +#define KVM_S390_VM_CPU_TOPOLOGY 5 + +/* kvm attributes for mem_ctrl */ +#define KVM_S390_VM_MEM_ENABLE_CMMA 0 +#define KVM_S390_VM_MEM_CLR_CMMA 1 +#define KVM_S390_VM_MEM_LIMIT_SIZE 2 + +#define KVM_S390_NO_MEM_LIMIT U64_MAX + +/* kvm attributes for KVM_S390_VM_TOD */ +#define KVM_S390_VM_TOD_LOW 0 +#define KVM_S390_VM_TOD_HIGH 1 +#define KVM_S390_VM_TOD_EXT 2 + +struct kvm_s390_vm_tod_clock { + __u8 epoch_idx; + __u64 tod; +}; + +/* kvm attributes for KVM_S390_VM_CPU_MODEL */ +/* processor related attributes are r/w */ +#define KVM_S390_VM_CPU_PROCESSOR 0 +struct kvm_s390_vm_cpu_processor { + __u64 cpuid; + __u16 ibc; + __u8 pad[6]; + __u64 fac_list[256]; +}; + +/* machine related attributes are r/o */ +#define KVM_S390_VM_CPU_MACHINE 1 +struct kvm_s390_vm_cpu_machine { + __u64 cpuid; + __u32 ibc; + __u8 pad[4]; + __u64 fac_mask[256]; + __u64 fac_list[256]; +}; + +#define KVM_S390_VM_CPU_PROCESSOR_FEAT 2 +#define KVM_S390_VM_CPU_MACHINE_FEAT 3 + +#define KVM_S390_VM_CPU_FEAT_NR_BITS 1024 +#define KVM_S390_VM_CPU_FEAT_ESOP 0 +#define KVM_S390_VM_CPU_FEAT_SIEF2 1 +#define KVM_S390_VM_CPU_FEAT_64BSCAO 2 +#define KVM_S390_VM_CPU_FEAT_SIIF 3 +#define KVM_S390_VM_CPU_FEAT_GPERE 4 +#define KVM_S390_VM_CPU_FEAT_GSLS 5 +#define KVM_S390_VM_CPU_FEAT_IB 6 +#define KVM_S390_VM_CPU_FEAT_CEI 7 +#define KVM_S390_VM_CPU_FEAT_IBS 8 +#define KVM_S390_VM_CPU_FEAT_SKEY 9 +#define KVM_S390_VM_CPU_FEAT_CMMA 10 +#define KVM_S390_VM_CPU_FEAT_PFMFI 11 +#define KVM_S390_VM_CPU_FEAT_SIGPIF 12 +#define KVM_S390_VM_CPU_FEAT_KSS 13 +struct kvm_s390_vm_cpu_feat { + __u64 feat[16]; +}; + +#define KVM_S390_VM_CPU_PROCESSOR_SUBFUNC 4 +#define KVM_S390_VM_CPU_MACHINE_SUBFUNC 5 +/* for "test bit" instructions MSB 0 bit ordering, for "query" raw blocks */ +struct kvm_s390_vm_cpu_subfunc { + __u8 plo[32]; /* always */ + __u8 ptff[16]; /* with TOD-clock steering */ + __u8 kmac[16]; /* with MSA */ + __u8 kmc[16]; /* with MSA */ + __u8 km[16]; /* with MSA */ + __u8 kimd[16]; /* with MSA */ + __u8 klmd[16]; /* with MSA */ + __u8 pckmo[16]; /* with MSA3 */ + __u8 kmctr[16]; /* with MSA4 */ + __u8 kmf[16]; /* with MSA4 */ + __u8 kmo[16]; /* with MSA4 */ + __u8 pcc[16]; /* with MSA4 */ + __u8 ppno[16]; /* with MSA5 */ + __u8 kma[16]; /* with MSA8 */ + __u8 kdsa[16]; /* with MSA9 */ + __u8 sortl[32]; /* with STFLE.150 */ + __u8 dfltcc[32]; /* with STFLE.151 */ + __u8 reserved[1728]; +}; + +#define KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST 6 +#define KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST 7 + +#define KVM_S390_VM_CPU_UV_FEAT_NR_BITS 64 +struct kvm_s390_vm_cpu_uv_feat { + union { + struct { + __u64 : 4; + __u64 ap : 1; /* bit 4 */ + __u64 ap_intr : 1; /* bit 5 */ + __u64 : 58; + }; + __u64 feat; + }; +}; + +/* kvm attributes for crypto */ +#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0 +#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1 +#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2 +#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3 +#define KVM_S390_VM_CRYPTO_ENABLE_APIE 4 +#define KVM_S390_VM_CRYPTO_DISABLE_APIE 5 + +/* kvm attributes for migration mode */ +#define KVM_S390_VM_MIGRATION_STOP 0 +#define KVM_S390_VM_MIGRATION_START 1 +#define KVM_S390_VM_MIGRATION_STATUS 2 + +/* for KVM_GET_REGS and KVM_SET_REGS */ +struct kvm_regs { + /* general purpose regs for s390 */ + __u64 gprs[16]; +}; + +/* for KVM_GET_SREGS and KVM_SET_SREGS */ +struct kvm_sregs { + __u32 acrs[16]; + __u64 crs[16]; +}; + +/* for KVM_GET_FPU and KVM_SET_FPU */ +struct kvm_fpu { + __u32 fpc; + __u64 fprs[16]; +}; + +#define KVM_GUESTDBG_USE_HW_BP 0x00010000 + +#define KVM_HW_BP 1 +#define KVM_HW_WP_WRITE 2 +#define KVM_SINGLESTEP 4 + +struct kvm_debug_exit_arch { + __u64 addr; + __u8 type; + __u8 pad[7]; /* Should be set to 0 */ +}; + +struct kvm_hw_breakpoint { + __u64 addr; + __u64 phys_addr; + __u64 len; + __u8 type; + __u8 pad[7]; /* Should be set to 0 */ +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { + __u32 nr_hw_bp; + __u32 pad; /* Should be set to 0 */ + struct kvm_hw_breakpoint __user *hw_bp; +}; + +/* for KVM_SYNC_PFAULT and KVM_REG_S390_PFTOKEN */ +#define KVM_S390_PFAULT_TOKEN_INVALID 0xffffffffffffffffULL + +#define KVM_SYNC_PREFIX (1UL << 0) +#define KVM_SYNC_GPRS (1UL << 1) +#define KVM_SYNC_ACRS (1UL << 2) +#define KVM_SYNC_CRS (1UL << 3) +#define KVM_SYNC_ARCH0 (1UL << 4) +#define KVM_SYNC_PFAULT (1UL << 5) +#define KVM_SYNC_VRS (1UL << 6) +#define KVM_SYNC_RICCB (1UL << 7) +#define KVM_SYNC_FPRS (1UL << 8) +#define KVM_SYNC_GSCB (1UL << 9) +#define KVM_SYNC_BPBC (1UL << 10) +#define KVM_SYNC_ETOKEN (1UL << 11) +#define KVM_SYNC_DIAG318 (1UL << 12) + +#define KVM_SYNC_S390_VALID_FIELDS \ + (KVM_SYNC_PREFIX | KVM_SYNC_GPRS | KVM_SYNC_ACRS | KVM_SYNC_CRS | \ + KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT | KVM_SYNC_VRS | KVM_SYNC_RICCB | \ + KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN | \ + KVM_SYNC_DIAG318) + +/* length and alignment of the sdnx as a power of two */ +#define SDNXC 8 +#define SDNXL (1UL << SDNXC) +/* definition of registers in kvm_run */ +struct kvm_sync_regs { + __u64 prefix; /* prefix register */ + __u64 gprs[16]; /* general purpose registers */ + __u32 acrs[16]; /* access registers */ + __u64 crs[16]; /* control registers */ + __u64 todpr; /* tod programmable register [ARCH0] */ + __u64 cputm; /* cpu timer [ARCH0] */ + __u64 ckc; /* clock comparator [ARCH0] */ + __u64 pp; /* program parameter [ARCH0] */ + __u64 gbea; /* guest breaking-event address [ARCH0] */ + __u64 pft; /* pfault token [PFAULT] */ + __u64 pfs; /* pfault select [PFAULT] */ + __u64 pfc; /* pfault compare [PFAULT] */ + union { + __u64 vrs[32][2]; /* vector registers (KVM_SYNC_VRS) */ + __u64 fprs[16]; /* fp registers (KVM_SYNC_FPRS) */ + }; + __u8 reserved[512]; /* for future vector expansion */ + __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ + __u8 bpbc : 1; /* bp mode */ + __u8 reserved2 : 7; + __u8 padding1[51]; /* riccb needs to be 64byte aligned */ + __u8 riccb[64]; /* runtime instrumentation controls block */ + __u64 diag318; /* diagnose 0x318 info */ + __u8 padding2[184]; /* sdnx needs to be 256byte aligned */ + union { + __u8 sdnx[SDNXL]; /* state description annex */ + struct { + __u64 reserved1[2]; + __u64 gscb[4]; + __u64 etoken; + __u64 etoken_extension; + }; + }; +}; + +#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) +#define KVM_REG_S390_EPOCHDIFF (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x2) +#define KVM_REG_S390_CPU_TIMER (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x3) +#define KVM_REG_S390_CLOCK_COMP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x4) +#define KVM_REG_S390_PFTOKEN (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x5) +#define KVM_REG_S390_PFCOMPARE (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x6) +#define KVM_REG_S390_PFSELECT (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x7) +#define KVM_REG_S390_PP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x8) +#define KVM_REG_S390_GBEA (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x9) +#endif diff --git a/arch/s390/include/uapi/asm/kvm_para.h b/arch/s390/include/uapi/asm/kvm_para.h new file mode 100644 index 000000000..b9ab584ad --- /dev/null +++ b/arch/s390/include/uapi/asm/kvm_para.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * User API definitions for paravirtual devices on s390 + * + * Copyright IBM Corp. 2008 + * + * Author(s): Christian Borntraeger + */ diff --git a/arch/s390/include/uapi/asm/kvm_perf.h b/arch/s390/include/uapi/asm/kvm_perf.h new file mode 100644 index 000000000..84606b8cc --- /dev/null +++ b/arch/s390/include/uapi/asm/kvm_perf.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Definitions for perf-kvm on s390 + * + * Copyright 2014 IBM Corp. + * Author(s): Alexander Yarygin + */ + +#ifndef __LINUX_KVM_PERF_S390_H +#define __LINUX_KVM_PERF_S390_H + +#include + +#define DECODE_STR_LEN 40 + +#define VCPU_ID "id" + +#define KVM_ENTRY_TRACE "kvm:kvm_s390_sie_enter" +#define KVM_EXIT_TRACE "kvm:kvm_s390_sie_exit" +#define KVM_EXIT_REASON "icptcode" + +#endif diff --git a/arch/s390/include/uapi/asm/monwriter.h b/arch/s390/include/uapi/asm/monwriter.h new file mode 100644 index 000000000..03d172f31 --- /dev/null +++ b/arch/s390/include/uapi/asm/monwriter.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright IBM Corp. 2006 + * Character device driver for writing z/VM APPLDATA monitor records + * Version 1.0 + * Author(s): Melissa Howland + * + */ + +#ifndef _ASM_390_MONWRITER_H +#define _ASM_390_MONWRITER_H + +/* mon_function values */ +#define MONWRITE_START_INTERVAL 0x00 /* start interval recording */ +#define MONWRITE_STOP_INTERVAL 0x01 /* stop interval or config recording */ +#define MONWRITE_GEN_EVENT 0x02 /* generate event record */ +#define MONWRITE_START_CONFIG 0x03 /* start configuration recording */ + +/* the header the app uses in its write() data */ +struct monwrite_hdr { + unsigned char mon_function; + unsigned short applid; + unsigned char record_num; + unsigned short version; + unsigned short release; + unsigned short mod_level; + unsigned short datalen; + unsigned char hdrlen; + +} __attribute__((packed)); + +#endif /* _ASM_390_MONWRITER_H */ diff --git a/arch/s390/include/uapi/asm/perf_regs.h b/arch/s390/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000..d17dd9e5d --- /dev/null +++ b/arch/s390/include/uapi/asm/perf_regs.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_S390_PERF_REGS_H +#define _ASM_S390_PERF_REGS_H + +enum perf_event_s390_regs { + PERF_REG_S390_R0, + PERF_REG_S390_R1, + PERF_REG_S390_R2, + PERF_REG_S390_R3, + PERF_REG_S390_R4, + PERF_REG_S390_R5, + PERF_REG_S390_R6, + PERF_REG_S390_R7, + PERF_REG_S390_R8, + PERF_REG_S390_R9, + PERF_REG_S390_R10, + PERF_REG_S390_R11, + PERF_REG_S390_R12, + PERF_REG_S390_R13, + PERF_REG_S390_R14, + PERF_REG_S390_R15, + PERF_REG_S390_FP0, + PERF_REG_S390_FP1, + PERF_REG_S390_FP2, + PERF_REG_S390_FP3, + PERF_REG_S390_FP4, + PERF_REG_S390_FP5, + PERF_REG_S390_FP6, + PERF_REG_S390_FP7, + PERF_REG_S390_FP8, + PERF_REG_S390_FP9, + PERF_REG_S390_FP10, + PERF_REG_S390_FP11, + PERF_REG_S390_FP12, + PERF_REG_S390_FP13, + PERF_REG_S390_FP14, + PERF_REG_S390_FP15, + PERF_REG_S390_MASK, + PERF_REG_S390_PC, + + PERF_REG_S390_MAX +}; + +#endif /* _ASM_S390_PERF_REGS_H */ diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h new file mode 100644 index 000000000..5ad76471e --- /dev/null +++ b/arch/s390/include/uapi/asm/pkey.h @@ -0,0 +1,452 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Userspace interface to the pkey device driver + * + * Copyright IBM Corp. 2017, 2023 + * + * Author: Harald Freudenberger + * + */ + +#ifndef _UAPI_PKEY_H +#define _UAPI_PKEY_H + +#include +#include + +/* + * Ioctl calls supported by the pkey device driver + */ + +#define PKEY_IOCTL_MAGIC 'p' + +#define SECKEYBLOBSIZE 64 /* secure key blob size is always 64 bytes */ +#define PROTKEYBLOBSIZE 80 /* protected key blob size is always 80 bytes */ +#define MAXPROTKEYSIZE 64 /* a protected key blob may be up to 64 bytes */ +#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */ +#define MAXAESCIPHERKEYSIZE 136 /* our aes cipher keys have always 136 bytes */ +#define MINEP11AESKEYBLOBSIZE 256 /* min EP11 AES key blob size */ +#define MAXEP11AESKEYBLOBSIZE 336 /* max EP11 AES key blob size */ + +/* Minimum size of a key blob */ +#define MINKEYBLOBSIZE SECKEYBLOBSIZE + +/* defines for the type field within the pkey_protkey struct */ +#define PKEY_KEYTYPE_AES_128 1 +#define PKEY_KEYTYPE_AES_192 2 +#define PKEY_KEYTYPE_AES_256 3 +#define PKEY_KEYTYPE_ECC 4 +#define PKEY_KEYTYPE_ECC_P256 5 +#define PKEY_KEYTYPE_ECC_P384 6 +#define PKEY_KEYTYPE_ECC_P521 7 +#define PKEY_KEYTYPE_ECC_ED25519 8 +#define PKEY_KEYTYPE_ECC_ED448 9 + +/* the newer ioctls use a pkey_key_type enum for type information */ +enum pkey_key_type { + PKEY_TYPE_CCA_DATA = (__u32) 1, + PKEY_TYPE_CCA_CIPHER = (__u32) 2, + PKEY_TYPE_EP11 = (__u32) 3, + PKEY_TYPE_CCA_ECC = (__u32) 0x1f, + PKEY_TYPE_EP11_AES = (__u32) 6, + PKEY_TYPE_EP11_ECC = (__u32) 7, +}; + +/* the newer ioctls use a pkey_key_size enum for key size information */ +enum pkey_key_size { + PKEY_SIZE_AES_128 = (__u32) 128, + PKEY_SIZE_AES_192 = (__u32) 192, + PKEY_SIZE_AES_256 = (__u32) 256, + PKEY_SIZE_UNKNOWN = (__u32) 0xFFFFFFFF, +}; + +/* some of the newer ioctls use these flags */ +#define PKEY_FLAGS_MATCH_CUR_MKVP 0x00000002 +#define PKEY_FLAGS_MATCH_ALT_MKVP 0x00000004 + +/* keygenflags defines for CCA AES cipher keys */ +#define PKEY_KEYGEN_XPRT_SYM 0x00008000 +#define PKEY_KEYGEN_XPRT_UASY 0x00004000 +#define PKEY_KEYGEN_XPRT_AASY 0x00002000 +#define PKEY_KEYGEN_XPRT_RAW 0x00001000 +#define PKEY_KEYGEN_XPRT_CPAC 0x00000800 +#define PKEY_KEYGEN_XPRT_DES 0x00000080 +#define PKEY_KEYGEN_XPRT_AES 0x00000040 +#define PKEY_KEYGEN_XPRT_RSA 0x00000008 + +/* Struct to hold apqn target info (card/domain pair) */ +struct pkey_apqn { + __u16 card; + __u16 domain; +}; + +/* Struct to hold a CCA AES secure key blob */ +struct pkey_seckey { + __u8 seckey[SECKEYBLOBSIZE]; /* the secure key blob */ +}; + +/* Struct to hold protected key and length info */ +struct pkey_protkey { + __u32 type; /* key type, one of the PKEY_KEYTYPE_AES values */ + __u32 len; /* bytes actually stored in protkey[] */ + __u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */ +}; + +/* Struct to hold an AES clear key value */ +struct pkey_clrkey { + __u8 clrkey[MAXCLRKEYSIZE]; /* 16, 24, or 32 byte clear key value */ +}; + +/* + * EP11 key blobs of type PKEY_TYPE_EP11_AES and PKEY_TYPE_EP11_ECC + * are ep11 blobs prepended by this header: + */ +struct ep11kblob_header { + __u8 type; /* always 0x00 */ + __u8 hver; /* header version, currently needs to be 0x00 */ + __u16 len; /* total length in bytes (including this header) */ + __u8 version; /* PKEY_TYPE_EP11_AES or PKEY_TYPE_EP11_ECC */ + __u8 res0; /* unused */ + __u16 bitlen; /* clear key bit len, 0 for unknown */ + __u8 res1[8]; /* unused */ +} __packed; + +/* + * Generate CCA AES secure key. + */ +struct pkey_genseck { + __u16 cardnr; /* in: card to use or FFFF for any */ + __u16 domain; /* in: domain or FFFF for any */ + __u32 keytype; /* in: key type to generate */ + struct pkey_seckey seckey; /* out: the secure key blob */ +}; +#define PKEY_GENSECK _IOWR(PKEY_IOCTL_MAGIC, 0x01, struct pkey_genseck) + +/* + * Construct CCA AES secure key from clear key value + */ +struct pkey_clr2seck { + __u16 cardnr; /* in: card to use or FFFF for any */ + __u16 domain; /* in: domain or FFFF for any */ + __u32 keytype; /* in: key type to generate */ + struct pkey_clrkey clrkey; /* in: the clear key value */ + struct pkey_seckey seckey; /* out: the secure key blob */ +}; +#define PKEY_CLR2SECK _IOWR(PKEY_IOCTL_MAGIC, 0x02, struct pkey_clr2seck) + +/* + * Fabricate AES protected key from a CCA AES secure key + */ +struct pkey_sec2protk { + __u16 cardnr; /* in: card to use or FFFF for any */ + __u16 domain; /* in: domain or FFFF for any */ + struct pkey_seckey seckey; /* in: the secure key blob */ + struct pkey_protkey protkey; /* out: the protected key */ +}; +#define PKEY_SEC2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x03, struct pkey_sec2protk) + +/* + * Fabricate AES protected key from clear key value + */ +struct pkey_clr2protk { + __u32 keytype; /* in: key type to generate */ + struct pkey_clrkey clrkey; /* in: the clear key value */ + struct pkey_protkey protkey; /* out: the protected key */ +}; +#define PKEY_CLR2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x04, struct pkey_clr2protk) + +/* + * Search for matching crypto card based on the Master Key + * Verification Pattern provided inside a CCA AES secure key. + */ +struct pkey_findcard { + struct pkey_seckey seckey; /* in: the secure key blob */ + __u16 cardnr; /* out: card number */ + __u16 domain; /* out: domain number */ +}; +#define PKEY_FINDCARD _IOWR(PKEY_IOCTL_MAGIC, 0x05, struct pkey_findcard) + +/* + * Combined together: findcard + sec2prot + */ +struct pkey_skey2pkey { + struct pkey_seckey seckey; /* in: the secure key blob */ + struct pkey_protkey protkey; /* out: the protected key */ +}; +#define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey) + +/* + * Verify the given CCA AES secure key for being able to be usable with + * the pkey module. Check for correct key type and check for having at + * least one crypto card being able to handle this key (master key + * or old master key verification pattern matches). + * Return some info about the key: keysize in bits, keytype (currently + * only AES), flag if key is wrapped with an old MKVP. + */ +struct pkey_verifykey { + struct pkey_seckey seckey; /* in: the secure key blob */ + __u16 cardnr; /* out: card number */ + __u16 domain; /* out: domain number */ + __u16 keysize; /* out: key size in bits */ + __u32 attributes; /* out: attribute bits */ +}; +#define PKEY_VERIFYKEY _IOWR(PKEY_IOCTL_MAGIC, 0x07, struct pkey_verifykey) +#define PKEY_VERIFY_ATTR_AES 0x00000001 /* key is an AES key */ +#define PKEY_VERIFY_ATTR_OLD_MKVP 0x00000100 /* key has old MKVP value */ + +/* + * Generate AES random protected key. + */ +struct pkey_genprotk { + __u32 keytype; /* in: key type to generate */ + struct pkey_protkey protkey; /* out: the protected key */ +}; + +#define PKEY_GENPROTK _IOWR(PKEY_IOCTL_MAGIC, 0x08, struct pkey_genprotk) + +/* + * Verify an AES protected key. + */ +struct pkey_verifyprotk { + struct pkey_protkey protkey; /* in: the protected key to verify */ +}; + +#define PKEY_VERIFYPROTK _IOW(PKEY_IOCTL_MAGIC, 0x09, struct pkey_verifyprotk) + +/* + * Transform an key blob (of any type) into a protected key + */ +struct pkey_kblob2pkey { + __u8 __user *key; /* in: the key blob */ + __u32 keylen; /* in: the key blob length */ + struct pkey_protkey protkey; /* out: the protected key */ +}; +#define PKEY_KBLOB2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x0A, struct pkey_kblob2pkey) + +/* + * Generate secure key, version 2. + * Generate CCA AES secure key, CCA AES cipher key or EP11 AES secure key. + * There needs to be a list of apqns given with at least one entry in there. + * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain + * is not supported. The implementation walks through the list of apqns and + * tries to send the request to each apqn without any further checking (like + * card type or online state). If the apqn fails, simple the next one in the + * list is tried until success (return 0) or the end of the list is reached + * (return -1 with errno ENODEV). You may use the PKEY_APQNS4KT ioctl to + * generate a list of apqns based on the key type to generate. + * The keygenflags argument is passed to the low level generation functions + * individual for the key type and has a key type specific meaning. When + * generating CCA cipher keys you can use one or more of the PKEY_KEYGEN_* + * flags to widen the export possibilities. By default a cipher key is + * only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC). + * The keygenflag argument for generating an EP11 AES key should either be 0 + * to use the defaults which are XCP_BLOB_ENCRYPT, XCP_BLOB_DECRYPT and + * XCP_BLOB_PROTKEY_EXTRACTABLE or a valid combination of XCP_BLOB_* flags. + */ +struct pkey_genseck2 { + struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets*/ + __u32 apqn_entries; /* in: # of apqn target list entries */ + enum pkey_key_type type; /* in: key type to generate */ + enum pkey_key_size size; /* in: key size to generate */ + __u32 keygenflags; /* in: key generation flags */ + __u8 __user *key; /* in: pointer to key blob buffer */ + __u32 keylen; /* in: available key blob buffer size */ + /* out: actual key blob size */ +}; +#define PKEY_GENSECK2 _IOWR(PKEY_IOCTL_MAGIC, 0x11, struct pkey_genseck2) + +/* + * Generate secure key from clear key value, version 2. + * Construct an CCA AES secure key, CCA AES cipher key or EP11 AES secure + * key from a given clear key value. + * There needs to be a list of apqns given with at least one entry in there. + * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain + * is not supported. The implementation walks through the list of apqns and + * tries to send the request to each apqn without any further checking (like + * card type or online state). If the apqn fails, simple the next one in the + * list is tried until success (return 0) or the end of the list is reached + * (return -1 with errno ENODEV). You may use the PKEY_APQNS4KT ioctl to + * generate a list of apqns based on the key type to generate. + * The keygenflags argument is passed to the low level generation functions + * individual for the key type and has a key type specific meaning. When + * generating CCA cipher keys you can use one or more of the PKEY_KEYGEN_* + * flags to widen the export possibilities. By default a cipher key is + * only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC). + * The keygenflag argument for generating an EP11 AES key should either be 0 + * to use the defaults which are XCP_BLOB_ENCRYPT, XCP_BLOB_DECRYPT and + * XCP_BLOB_PROTKEY_EXTRACTABLE or a valid combination of XCP_BLOB_* flags. + */ +struct pkey_clr2seck2 { + struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets */ + __u32 apqn_entries; /* in: # of apqn target list entries */ + enum pkey_key_type type; /* in: key type to generate */ + enum pkey_key_size size; /* in: key size to generate */ + __u32 keygenflags; /* in: key generation flags */ + struct pkey_clrkey clrkey; /* in: the clear key value */ + __u8 __user *key; /* in: pointer to key blob buffer */ + __u32 keylen; /* in: available key blob buffer size */ + /* out: actual key blob size */ +}; +#define PKEY_CLR2SECK2 _IOWR(PKEY_IOCTL_MAGIC, 0x12, struct pkey_clr2seck2) + +/* + * Verify the given secure key, version 2. + * Check for correct key type. If cardnr and domain are given (are not + * 0xFFFF) also check if this apqn is able to handle this type of key. + * If cardnr and/or domain is 0xFFFF, on return these values are filled + * with one apqn able to handle this key. + * The function also checks for the master key verification patterns + * of the key matching to the current or alternate mkvp of the apqn. + * For CCA AES secure keys and CCA AES cipher keys this means to check + * the key's mkvp against the current or old mkvp of the apqns. The flags + * field is updated with some additional info about the apqn mkvp + * match: If the current mkvp matches to the key's mkvp then the + * PKEY_FLAGS_MATCH_CUR_MKVP bit is set, if the alternate mkvp matches to + * the key's mkvp the PKEY_FLAGS_MATCH_ALT_MKVP is set. For CCA keys the + * alternate mkvp is the old master key verification pattern. + * CCA AES secure keys are also checked to have the CPACF export allowed + * bit enabled (XPRTCPAC) in the kmf1 field. + * EP11 keys are also supported and the wkvp of the key is checked against + * the current wkvp of the apqns. There is no alternate for this type of + * key and so on a match the flag PKEY_FLAGS_MATCH_CUR_MKVP always is set. + * EP11 keys are also checked to have XCP_BLOB_PROTKEY_EXTRACTABLE set. + * The ioctl returns 0 as long as the given or found apqn matches to + * matches with the current or alternate mkvp to the key's mkvp. If the given + * apqn does not match or there is no such apqn found, -1 with errno + * ENODEV is returned. + */ +struct pkey_verifykey2 { + __u8 __user *key; /* in: pointer to key blob */ + __u32 keylen; /* in: key blob size */ + __u16 cardnr; /* in/out: card number */ + __u16 domain; /* in/out: domain number */ + enum pkey_key_type type; /* out: the key type */ + enum pkey_key_size size; /* out: the key size */ + __u32 flags; /* out: additional key info flags */ +}; +#define PKEY_VERIFYKEY2 _IOWR(PKEY_IOCTL_MAGIC, 0x17, struct pkey_verifykey2) + +/* + * Transform a key blob into a protected key, version 2. + * There needs to be a list of apqns given with at least one entry in there. + * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain + * is not supported. The implementation walks through the list of apqns and + * tries to send the request to each apqn without any further checking (like + * card type or online state). If the apqn fails, simple the next one in the + * list is tried until success (return 0) or the end of the list is reached + * (return -1 with errno ENODEV). You may use the PKEY_APQNS4K ioctl to + * generate a list of apqns based on the key. + * Deriving ECC protected keys from ECC secure keys is not supported with + * this ioctl, use PKEY_KBLOB2PROTK3 for this purpose. + */ +struct pkey_kblob2pkey2 { + __u8 __user *key; /* in: pointer to key blob */ + __u32 keylen; /* in: key blob size */ + struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets */ + __u32 apqn_entries; /* in: # of apqn target list entries */ + struct pkey_protkey protkey; /* out: the protected key */ +}; +#define PKEY_KBLOB2PROTK2 _IOWR(PKEY_IOCTL_MAGIC, 0x1A, struct pkey_kblob2pkey2) + +/* + * Build a list of APQNs based on a key blob given. + * Is able to find out which type of secure key is given (CCA AES secure + * key, CCA AES cipher key, CCA ECC private key, EP11 AES key, EP11 ECC private + * key) and tries to find all matching crypto cards based on the MKVP and maybe + * other criteria (like CCA AES cipher keys need a CEX5C or higher, EP11 keys + * with BLOB_PKEY_EXTRACTABLE need a CEX7 and EP11 api version 4). The list of + * APQNs is further filtered by the key's mkvp which needs to match to either + * the current mkvp (CCA and EP11) or the alternate mkvp (old mkvp, CCA adapters + * only) of the apqns. The flags argument may be used to limit the matching + * apqns. If the PKEY_FLAGS_MATCH_CUR_MKVP is given, only the current mkvp of + * each apqn is compared. Likewise with the PKEY_FLAGS_MATCH_ALT_MKVP. If both + * are given, it is assumed to return apqns where either the current or the + * alternate mkvp matches. At least one of the matching flags needs to be given. + * The flags argument for EP11 keys has no further action and is currently + * ignored (but needs to be given as PKEY_FLAGS_MATCH_CUR_MKVP) as there is only + * the wkvp from the key to match against the apqn's wkvp. + * The list of matching apqns is stored into the space given by the apqns + * argument and the number of stored entries goes into apqn_entries. If the list + * is empty (apqn_entries is 0) the apqn_entries field is updated to the number + * of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0 + * but the number of apqn targets does not fit into the list, the apqn_targets + * field is updated with the number of required entries but there are no apqn + * values stored in the list and the ioctl returns with ENOSPC. If no matching + * APQN is found, the ioctl returns with 0 but the apqn_entries value is 0. + */ +struct pkey_apqns4key { + __u8 __user *key; /* in: pointer to key blob */ + __u32 keylen; /* in: key blob size */ + __u32 flags; /* in: match controlling flags */ + struct pkey_apqn __user *apqns; /* in/out: ptr to list of apqn targets*/ + __u32 apqn_entries; /* in: max # of apqn entries in the list */ + /* out: # apqns stored into the list */ +}; +#define PKEY_APQNS4K _IOWR(PKEY_IOCTL_MAGIC, 0x1B, struct pkey_apqns4key) + +/* + * Build a list of APQNs based on a key type given. + * Build a list of APQNs based on a given key type and maybe further + * restrict the list by given master key verification patterns. + * For different key types there may be different ways to match the + * master key verification patterns. For CCA keys (CCA data key and CCA + * cipher key) the first 8 bytes of cur_mkvp refer to the current AES mkvp value + * of the apqn and the first 8 bytes of the alt_mkvp refer to the old AES mkvp. + * For CCA ECC keys it is similar but the match is against the APKA current/old + * mkvp. The flags argument controls if the apqns current and/or alternate mkvp + * should match. If the PKEY_FLAGS_MATCH_CUR_MKVP is given, only the current + * mkvp of each apqn is compared. Likewise with the PKEY_FLAGS_MATCH_ALT_MKVP. + * If both are given, it is assumed to return apqns where either the + * current or the alternate mkvp matches. If no match flag is given + * (flags is 0) the mkvp values are ignored for the match process. + * For EP11 keys there is only the current wkvp. So if the apqns should also + * match to a given wkvp, then the PKEY_FLAGS_MATCH_CUR_MKVP flag should be + * set. The wkvp value is 32 bytes but only the leftmost 16 bytes are compared + * against the leftmost 16 byte of the wkvp of the apqn. + * The list of matching apqns is stored into the space given by the apqns + * argument and the number of stored entries goes into apqn_entries. If the list + * is empty (apqn_entries is 0) the apqn_entries field is updated to the number + * of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0 + * but the number of apqn targets does not fit into the list, the apqn_targets + * field is updated with the number of required entries but there are no apqn + * values stored in the list and the ioctl returns with ENOSPC. If no matching + * APQN is found, the ioctl returns with 0 but the apqn_entries value is 0. + */ +struct pkey_apqns4keytype { + enum pkey_key_type type; /* in: key type */ + __u8 cur_mkvp[32]; /* in: current mkvp */ + __u8 alt_mkvp[32]; /* in: alternate mkvp */ + __u32 flags; /* in: match controlling flags */ + struct pkey_apqn __user *apqns; /* in/out: ptr to list of apqn targets*/ + __u32 apqn_entries; /* in: max # of apqn entries in the list */ + /* out: # apqns stored into the list */ +}; +#define PKEY_APQNS4KT _IOWR(PKEY_IOCTL_MAGIC, 0x1C, struct pkey_apqns4keytype) + +/* + * Transform a key blob into a protected key, version 3. + * The difference to version 2 of this ioctl is that the protected key + * buffer is now explicitly and not within a struct pkey_protkey any more. + * So this ioctl is also able to handle EP11 and CCA ECC secure keys and + * provide ECC protected keys. + * There needs to be a list of apqns given with at least one entry in there. + * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain + * is not supported. The implementation walks through the list of apqns and + * tries to send the request to each apqn without any further checking (like + * card type or online state). If the apqn fails, simple the next one in the + * list is tried until success (return 0) or the end of the list is reached + * (return -1 with errno ENODEV). You may use the PKEY_APQNS4K ioctl to + * generate a list of apqns based on the key. + */ +struct pkey_kblob2pkey3 { + __u8 __user *key; /* in: pointer to key blob */ + __u32 keylen; /* in: key blob size */ + struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets */ + __u32 apqn_entries; /* in: # of apqn target list entries */ + __u32 pkeytype; /* out: prot key type (enum pkey_key_type) */ + __u32 pkeylen; /* in/out: size of pkey buffer/actual len of pkey */ + __u8 __user *pkey; /* in: pkey blob buffer space ptr */ +}; +#define PKEY_KBLOB2PROTK3 _IOWR(PKEY_IOCTL_MAGIC, 0x1D, struct pkey_kblob2pkey3) + +#endif /* _UAPI_PKEY_H */ diff --git a/arch/s390/include/uapi/asm/posix_types.h b/arch/s390/include/uapi/asm/posix_types.h new file mode 100644 index 000000000..1913613e7 --- /dev/null +++ b/arch/s390/include/uapi/asm/posix_types.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * S390 version + * + */ + +#ifndef __ARCH_S390_POSIX_TYPES_H +#define __ARCH_S390_POSIX_TYPES_H + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. Also, we cannot + * assume GCC is being used. + */ + +typedef unsigned long __kernel_size_t; +typedef long __kernel_ssize_t; +#define __kernel_size_t __kernel_size_t + +typedef unsigned short __kernel_old_dev_t; +#define __kernel_old_dev_t __kernel_old_dev_t + +#ifdef __KERNEL__ +typedef unsigned short __kernel_old_uid_t; +typedef unsigned short __kernel_old_gid_t; +#define __kernel_old_uid_t __kernel_old_uid_t +#endif + +#ifndef __s390x__ + +typedef unsigned long __kernel_ino_t; +typedef unsigned short __kernel_mode_t; +typedef unsigned short __kernel_ipc_pid_t; +typedef unsigned short __kernel_uid_t; +typedef unsigned short __kernel_gid_t; +typedef int __kernel_ptrdiff_t; + +#else /* __s390x__ */ + +typedef unsigned int __kernel_ino_t; +typedef unsigned int __kernel_mode_t; +typedef int __kernel_ipc_pid_t; +typedef unsigned int __kernel_uid_t; +typedef unsigned int __kernel_gid_t; +typedef long __kernel_ptrdiff_t; +typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ + +#endif /* __s390x__ */ + +#define __kernel_ino_t __kernel_ino_t +#define __kernel_mode_t __kernel_mode_t +#define __kernel_ipc_pid_t __kernel_ipc_pid_t +#define __kernel_uid_t __kernel_uid_t +#define __kernel_gid_t __kernel_gid_t + +#include + +#endif diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h new file mode 100644 index 000000000..bb0826024 --- /dev/null +++ b/arch/s390/include/uapi/asm/ptrace.h @@ -0,0 +1,455 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * S390 version + * Copyright IBM Corp. 1999, 2000 + * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + */ + +#ifndef _UAPI_S390_PTRACE_H +#define _UAPI_S390_PTRACE_H + +#include + +/* + * Offsets in the user_regs_struct. They are used for the ptrace + * system call and in entry.S + */ +#ifndef __s390x__ + +#define PT_PSWMASK 0x00 +#define PT_PSWADDR 0x04 +#define PT_GPR0 0x08 +#define PT_GPR1 0x0C +#define PT_GPR2 0x10 +#define PT_GPR3 0x14 +#define PT_GPR4 0x18 +#define PT_GPR5 0x1C +#define PT_GPR6 0x20 +#define PT_GPR7 0x24 +#define PT_GPR8 0x28 +#define PT_GPR9 0x2C +#define PT_GPR10 0x30 +#define PT_GPR11 0x34 +#define PT_GPR12 0x38 +#define PT_GPR13 0x3C +#define PT_GPR14 0x40 +#define PT_GPR15 0x44 +#define PT_ACR0 0x48 +#define PT_ACR1 0x4C +#define PT_ACR2 0x50 +#define PT_ACR3 0x54 +#define PT_ACR4 0x58 +#define PT_ACR5 0x5C +#define PT_ACR6 0x60 +#define PT_ACR7 0x64 +#define PT_ACR8 0x68 +#define PT_ACR9 0x6C +#define PT_ACR10 0x70 +#define PT_ACR11 0x74 +#define PT_ACR12 0x78 +#define PT_ACR13 0x7C +#define PT_ACR14 0x80 +#define PT_ACR15 0x84 +#define PT_ORIGGPR2 0x88 +#define PT_FPC 0x90 +/* + * A nasty fact of life that the ptrace api + * only supports passing of longs. + */ +#define PT_FPR0_HI 0x98 +#define PT_FPR0_LO 0x9C +#define PT_FPR1_HI 0xA0 +#define PT_FPR1_LO 0xA4 +#define PT_FPR2_HI 0xA8 +#define PT_FPR2_LO 0xAC +#define PT_FPR3_HI 0xB0 +#define PT_FPR3_LO 0xB4 +#define PT_FPR4_HI 0xB8 +#define PT_FPR4_LO 0xBC +#define PT_FPR5_HI 0xC0 +#define PT_FPR5_LO 0xC4 +#define PT_FPR6_HI 0xC8 +#define PT_FPR6_LO 0xCC +#define PT_FPR7_HI 0xD0 +#define PT_FPR7_LO 0xD4 +#define PT_FPR8_HI 0xD8 +#define PT_FPR8_LO 0XDC +#define PT_FPR9_HI 0xE0 +#define PT_FPR9_LO 0xE4 +#define PT_FPR10_HI 0xE8 +#define PT_FPR10_LO 0xEC +#define PT_FPR11_HI 0xF0 +#define PT_FPR11_LO 0xF4 +#define PT_FPR12_HI 0xF8 +#define PT_FPR12_LO 0xFC +#define PT_FPR13_HI 0x100 +#define PT_FPR13_LO 0x104 +#define PT_FPR14_HI 0x108 +#define PT_FPR14_LO 0x10C +#define PT_FPR15_HI 0x110 +#define PT_FPR15_LO 0x114 +#define PT_CR_9 0x118 +#define PT_CR_10 0x11C +#define PT_CR_11 0x120 +#define PT_IEEE_IP 0x13C +#define PT_LASTOFF PT_IEEE_IP +#define PT_ENDREGS 0x140-1 + +#define GPR_SIZE 4 +#define CR_SIZE 4 + +#define STACK_FRAME_OVERHEAD 96 /* size of minimum stack frame */ + +#else /* __s390x__ */ + +#define PT_PSWMASK 0x00 +#define PT_PSWADDR 0x08 +#define PT_GPR0 0x10 +#define PT_GPR1 0x18 +#define PT_GPR2 0x20 +#define PT_GPR3 0x28 +#define PT_GPR4 0x30 +#define PT_GPR5 0x38 +#define PT_GPR6 0x40 +#define PT_GPR7 0x48 +#define PT_GPR8 0x50 +#define PT_GPR9 0x58 +#define PT_GPR10 0x60 +#define PT_GPR11 0x68 +#define PT_GPR12 0x70 +#define PT_GPR13 0x78 +#define PT_GPR14 0x80 +#define PT_GPR15 0x88 +#define PT_ACR0 0x90 +#define PT_ACR1 0x94 +#define PT_ACR2 0x98 +#define PT_ACR3 0x9C +#define PT_ACR4 0xA0 +#define PT_ACR5 0xA4 +#define PT_ACR6 0xA8 +#define PT_ACR7 0xAC +#define PT_ACR8 0xB0 +#define PT_ACR9 0xB4 +#define PT_ACR10 0xB8 +#define PT_ACR11 0xBC +#define PT_ACR12 0xC0 +#define PT_ACR13 0xC4 +#define PT_ACR14 0xC8 +#define PT_ACR15 0xCC +#define PT_ORIGGPR2 0xD0 +#define PT_FPC 0xD8 +#define PT_FPR0 0xE0 +#define PT_FPR1 0xE8 +#define PT_FPR2 0xF0 +#define PT_FPR3 0xF8 +#define PT_FPR4 0x100 +#define PT_FPR5 0x108 +#define PT_FPR6 0x110 +#define PT_FPR7 0x118 +#define PT_FPR8 0x120 +#define PT_FPR9 0x128 +#define PT_FPR10 0x130 +#define PT_FPR11 0x138 +#define PT_FPR12 0x140 +#define PT_FPR13 0x148 +#define PT_FPR14 0x150 +#define PT_FPR15 0x158 +#define PT_CR_9 0x160 +#define PT_CR_10 0x168 +#define PT_CR_11 0x170 +#define PT_IEEE_IP 0x1A8 +#define PT_LASTOFF PT_IEEE_IP +#define PT_ENDREGS 0x1B0-1 + +#define GPR_SIZE 8 +#define CR_SIZE 8 + +#define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */ + +#endif /* __s390x__ */ + +#ifndef __s390x__ + +#define PSW_MASK_PER _AC(0x40000000, UL) +#define PSW_MASK_DAT _AC(0x04000000, UL) +#define PSW_MASK_IO _AC(0x02000000, UL) +#define PSW_MASK_EXT _AC(0x01000000, UL) +#define PSW_MASK_KEY _AC(0x00F00000, UL) +#define PSW_MASK_BASE _AC(0x00080000, UL) /* always one */ +#define PSW_MASK_MCHECK _AC(0x00040000, UL) +#define PSW_MASK_WAIT _AC(0x00020000, UL) +#define PSW_MASK_PSTATE _AC(0x00010000, UL) +#define PSW_MASK_ASC _AC(0x0000C000, UL) +#define PSW_MASK_CC _AC(0x00003000, UL) +#define PSW_MASK_PM _AC(0x00000F00, UL) +#define PSW_MASK_RI _AC(0x00000000, UL) +#define PSW_MASK_EA _AC(0x00000000, UL) +#define PSW_MASK_BA _AC(0x00000000, UL) + +#define PSW_MASK_USER _AC(0x0000FF00, UL) + +#define PSW_ADDR_AMODE _AC(0x80000000, UL) +#define PSW_ADDR_INSN _AC(0x7FFFFFFF, UL) + +#define PSW_ASC_PRIMARY _AC(0x00000000, UL) +#define PSW_ASC_ACCREG _AC(0x00004000, UL) +#define PSW_ASC_SECONDARY _AC(0x00008000, UL) +#define PSW_ASC_HOME _AC(0x0000C000, UL) + +#else /* __s390x__ */ + +#define PSW_MASK_PER _AC(0x4000000000000000, UL) +#define PSW_MASK_DAT _AC(0x0400000000000000, UL) +#define PSW_MASK_IO _AC(0x0200000000000000, UL) +#define PSW_MASK_EXT _AC(0x0100000000000000, UL) +#define PSW_MASK_BASE _AC(0x0000000000000000, UL) +#define PSW_MASK_KEY _AC(0x00F0000000000000, UL) +#define PSW_MASK_MCHECK _AC(0x0004000000000000, UL) +#define PSW_MASK_WAIT _AC(0x0002000000000000, UL) +#define PSW_MASK_PSTATE _AC(0x0001000000000000, UL) +#define PSW_MASK_ASC _AC(0x0000C00000000000, UL) +#define PSW_MASK_CC _AC(0x0000300000000000, UL) +#define PSW_MASK_PM _AC(0x00000F0000000000, UL) +#define PSW_MASK_RI _AC(0x0000008000000000, UL) +#define PSW_MASK_EA _AC(0x0000000100000000, UL) +#define PSW_MASK_BA _AC(0x0000000080000000, UL) + +#define PSW_MASK_USER _AC(0x0000FF0180000000, UL) + +#define PSW_ADDR_AMODE _AC(0x0000000000000000, UL) +#define PSW_ADDR_INSN _AC(0xFFFFFFFFFFFFFFFF, UL) + +#define PSW_ASC_PRIMARY _AC(0x0000000000000000, UL) +#define PSW_ASC_ACCREG _AC(0x0000400000000000, UL) +#define PSW_ASC_SECONDARY _AC(0x0000800000000000, UL) +#define PSW_ASC_HOME _AC(0x0000C00000000000, UL) + +#endif /* __s390x__ */ + +#define NUM_GPRS 16 +#define NUM_FPRS 16 +#define NUM_CRS 16 +#define NUM_ACRS 16 + +#define NUM_CR_WORDS 3 + +#define FPR_SIZE 8 +#define FPC_SIZE 4 +#define FPC_PAD_SIZE 4 /* gcc insists on aligning the fpregs */ +#define ACR_SIZE 4 + + +#define PTRACE_OLDSETOPTIONS 21 +#define PTRACE_SYSEMU 31 +#define PTRACE_SYSEMU_SINGLESTEP 32 +#ifndef __ASSEMBLY__ +#include +#include + +typedef union { + float f; + double d; + __u64 ui; + struct + { + __u32 hi; + __u32 lo; + } fp; +} freg_t; + +typedef struct { + __u32 fpc; + __u32 pad; + freg_t fprs[NUM_FPRS]; +} s390_fp_regs; + +#define FPC_EXCEPTION_MASK 0xF8000000 +#define FPC_FLAGS_MASK 0x00F80000 +#define FPC_DXC_MASK 0x0000FF00 +#define FPC_RM_MASK 0x00000003 + +/* this typedef defines how a Program Status Word looks like */ +typedef struct { + unsigned long mask; + unsigned long addr; +} __attribute__ ((aligned(8))) psw_t; + +/* + * The s390_regs structure is used to define the elf_gregset_t. + */ +typedef struct { + psw_t psw; + unsigned long gprs[NUM_GPRS]; + unsigned int acrs[NUM_ACRS]; + unsigned long orig_gpr2; +} s390_regs; + +/* + * The user_pt_regs structure exports the beginning of + * the in-kernel pt_regs structure to user space. + */ +typedef struct { + unsigned long args[1]; + psw_t psw; + unsigned long gprs[NUM_GPRS]; +} user_pt_regs; + +/* + * Now for the user space program event recording (trace) definitions. + * The following structures are used only for the ptrace interface, don't + * touch or even look at it if you don't want to modify the user-space + * ptrace interface. In particular stay away from it for in-kernel PER. + */ +typedef struct { + unsigned long cr[NUM_CR_WORDS]; +} per_cr_words; + +#define PER_EM_MASK 0xE8000000UL + +typedef struct { +#ifdef __s390x__ + unsigned : 32; +#endif /* __s390x__ */ + unsigned em_branching : 1; + unsigned em_instruction_fetch : 1; + /* + * Switching on storage alteration automatically fixes + * the storage alteration event bit in the users std. + */ + unsigned em_storage_alteration : 1; + unsigned em_gpr_alt_unused : 1; + unsigned em_store_real_address : 1; + unsigned : 3; + unsigned branch_addr_ctl : 1; + unsigned : 1; + unsigned storage_alt_space_ctl : 1; + unsigned : 21; + unsigned long starting_addr; + unsigned long ending_addr; +} per_cr_bits; + +typedef struct { + unsigned short perc_atmid; + unsigned long address; + unsigned char access_id; +} per_lowcore_words; + +typedef struct { + unsigned perc_branching : 1; + unsigned perc_instruction_fetch : 1; + unsigned perc_storage_alteration : 1; + unsigned perc_gpr_alt_unused : 1; + unsigned perc_store_real_address : 1; + unsigned : 3; + unsigned atmid_psw_bit_31 : 1; + unsigned atmid_validity_bit : 1; + unsigned atmid_psw_bit_32 : 1; + unsigned atmid_psw_bit_5 : 1; + unsigned atmid_psw_bit_16 : 1; + unsigned atmid_psw_bit_17 : 1; + unsigned si : 2; + unsigned long address; + unsigned : 4; + unsigned access_id : 4; +} per_lowcore_bits; + +typedef struct { + union { + per_cr_words words; + per_cr_bits bits; + } control_regs; + /* + * The single_step and instruction_fetch bits are obsolete, + * the kernel always sets them to zero. To enable single + * stepping use ptrace(PTRACE_SINGLESTEP) instead. + */ + unsigned single_step : 1; + unsigned instruction_fetch : 1; + unsigned : 30; + /* + * These addresses are copied into cr10 & cr11 if single + * stepping is switched off + */ + unsigned long starting_addr; + unsigned long ending_addr; + union { + per_lowcore_words words; + per_lowcore_bits bits; + } lowcore; +} per_struct; + +typedef struct { + unsigned int len; + unsigned long kernel_addr; + unsigned long process_addr; +} ptrace_area; + +/* + * S/390 specific non posix ptrace requests. I chose unusual values so + * they are unlikely to clash with future ptrace definitions. + */ +#define PTRACE_PEEKUSR_AREA 0x5000 +#define PTRACE_POKEUSR_AREA 0x5001 +#define PTRACE_PEEKTEXT_AREA 0x5002 +#define PTRACE_PEEKDATA_AREA 0x5003 +#define PTRACE_POKETEXT_AREA 0x5004 +#define PTRACE_POKEDATA_AREA 0x5005 +#define PTRACE_GET_LAST_BREAK 0x5006 +#define PTRACE_PEEK_SYSTEM_CALL 0x5007 +#define PTRACE_POKE_SYSTEM_CALL 0x5008 +#define PTRACE_ENABLE_TE 0x5009 +#define PTRACE_DISABLE_TE 0x5010 +#define PTRACE_TE_ABORT_RAND 0x5011 + +/* + * The numbers chosen here are somewhat arbitrary but absolutely MUST + * not overlap with any of the number assigned in . + */ +#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */ + +/* + * PT_PROT definition is loosely based on hppa bsd definition in + * gdb/hppab-nat.c + */ +#define PTRACE_PROT 21 + +typedef enum { + ptprot_set_access_watchpoint, + ptprot_set_write_watchpoint, + ptprot_disable_watchpoint +} ptprot_flags; + +typedef struct { + unsigned long lowaddr; + unsigned long hiaddr; + ptprot_flags prot; +} ptprot_area; + +/* Sequence of bytes for breakpoint illegal instruction. */ +#define S390_BREAKPOINT {0x0,0x1} +#define S390_BREAKPOINT_U16 ((__u16)0x0001) +#define S390_SYSCALL_OPCODE ((__u16)0x0a00) +#define S390_SYSCALL_SIZE 2 + +/* + * The user_regs_struct defines the way the user registers are + * store on the stack for signal handling. + */ +struct user_regs_struct { + psw_t psw; + unsigned long gprs[NUM_GPRS]; + unsigned int acrs[NUM_ACRS]; + unsigned long orig_gpr2; + s390_fp_regs fp_regs; + /* + * These per registers are in here so that gdb can modify them + * itself as there is no "official" ptrace interface for hardware + * watchpoints. This is the way intel does it. + */ + per_struct per_info; + unsigned long ieee_instruction_pointer; /* obsolete, always 0 */ +}; + +#endif /* __ASSEMBLY__ */ + +#endif /* _UAPI_S390_PTRACE_H */ diff --git a/arch/s390/include/uapi/asm/qeth.h b/arch/s390/include/uapi/asm/qeth.h new file mode 100644 index 000000000..fac9995df --- /dev/null +++ b/arch/s390/include/uapi/asm/qeth.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * ioctl definitions for qeth driver + * + * Copyright IBM Corp. 2004 + * + * Author(s): Thomas Spatzier + * + */ +#ifndef __ASM_S390_QETH_IOCTL_H__ +#define __ASM_S390_QETH_IOCTL_H__ +#include +#include + +#define SIOC_QETH_ARP_SET_NO_ENTRIES (SIOCDEVPRIVATE) +#define SIOC_QETH_ARP_QUERY_INFO (SIOCDEVPRIVATE + 1) +#define SIOC_QETH_ARP_ADD_ENTRY (SIOCDEVPRIVATE + 2) +#define SIOC_QETH_ARP_REMOVE_ENTRY (SIOCDEVPRIVATE + 3) +#define SIOC_QETH_ARP_FLUSH_CACHE (SIOCDEVPRIVATE + 4) +#define SIOC_QETH_ADP_SET_SNMP_CONTROL (SIOCDEVPRIVATE + 5) +#define SIOC_QETH_GET_CARD_TYPE (SIOCDEVPRIVATE + 6) +#define SIOC_QETH_QUERY_OAT (SIOCDEVPRIVATE + 7) + +struct qeth_arp_cache_entry { + __u8 macaddr[6]; + __u8 reserved1[2]; + __u8 ipaddr[16]; /* for both IPv4 and IPv6 */ + __u8 reserved2[32]; +} __attribute__ ((packed)); + +enum qeth_arp_ipaddrtype { + QETHARP_IP_ADDR_V4 = 1, + QETHARP_IP_ADDR_V6 = 2, +}; +struct qeth_arp_entrytype { + __u8 mac; + __u8 ip; +} __attribute__((packed)); + +#define QETH_QARP_MEDIASPECIFIC_BYTES 32 +#define QETH_QARP_MACADDRTYPE_BYTES 1 +struct qeth_arp_qi_entry7 { + __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES]; + struct qeth_arp_entrytype type; + __u8 macaddr[6]; + __u8 ipaddr[4]; +} __attribute__((packed)); + +struct qeth_arp_qi_entry7_ipv6 { + __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES]; + struct qeth_arp_entrytype type; + __u8 macaddr[6]; + __u8 ipaddr[16]; +} __attribute__((packed)); + +struct qeth_arp_qi_entry7_short { + struct qeth_arp_entrytype type; + __u8 macaddr[6]; + __u8 ipaddr[4]; +} __attribute__((packed)); + +struct qeth_arp_qi_entry7_short_ipv6 { + struct qeth_arp_entrytype type; + __u8 macaddr[6]; + __u8 ipaddr[16]; +} __attribute__((packed)); + +struct qeth_arp_qi_entry5 { + __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES]; + struct qeth_arp_entrytype type; + __u8 ipaddr[4]; +} __attribute__((packed)); + +struct qeth_arp_qi_entry5_ipv6 { + __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES]; + struct qeth_arp_entrytype type; + __u8 ipaddr[16]; +} __attribute__((packed)); + +struct qeth_arp_qi_entry5_short { + struct qeth_arp_entrytype type; + __u8 ipaddr[4]; +} __attribute__((packed)); + +struct qeth_arp_qi_entry5_short_ipv6 { + struct qeth_arp_entrytype type; + __u8 ipaddr[16]; +} __attribute__((packed)); +/* + * can be set by user if no "media specific information" is wanted + * -> saves a lot of space in user space buffer + */ +#define QETH_QARP_STRIP_ENTRIES 0x8000 +#define QETH_QARP_WITH_IPV6 0x4000 +#define QETH_QARP_REQUEST_MASK 0x00ff + +/* data sent to user space as result of query arp ioctl */ +#define QETH_QARP_USER_DATA_SIZE 20000 +#define QETH_QARP_MASK_OFFSET 4 +#define QETH_QARP_ENTRIES_OFFSET 6 +struct qeth_arp_query_user_data { + union { + __u32 data_len; /* set by user space program */ + __u32 no_entries; /* set by kernel */ + } u; + __u16 mask_bits; + char *entries; +} __attribute__((packed)); + +struct qeth_query_oat_data { + __u32 command; + __u32 buffer_len; + __u32 response_len; + __u64 ptr; +}; +#endif /* __ASM_S390_QETH_IOCTL_H__ */ diff --git a/arch/s390/include/uapi/asm/raw3270.h b/arch/s390/include/uapi/asm/raw3270.h new file mode 100644 index 000000000..6676f102b --- /dev/null +++ b/arch/s390/include/uapi/asm/raw3270.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_S390_UAPI_RAW3270_H +#define __ASM_S390_UAPI_RAW3270_H + +/* Local Channel Commands */ +#define TC_WRITE 0x01 /* Write */ +#define TC_RDBUF 0x02 /* Read Buffer */ +#define TC_EWRITE 0x05 /* Erase write */ +#define TC_READMOD 0x06 /* Read modified */ +#define TC_EWRITEA 0x0d /* Erase write alternate */ +#define TC_WRITESF 0x11 /* Write structured field */ + +/* Buffer Control Orders */ +#define TO_GE 0x08 /* Graphics Escape */ +#define TO_SF 0x1d /* Start field */ +#define TO_SBA 0x11 /* Set buffer address */ +#define TO_IC 0x13 /* Insert cursor */ +#define TO_PT 0x05 /* Program tab */ +#define TO_RA 0x3c /* Repeat to address */ +#define TO_SFE 0x29 /* Start field extended */ +#define TO_EUA 0x12 /* Erase unprotected to address */ +#define TO_MF 0x2c /* Modify field */ +#define TO_SA 0x28 /* Set attribute */ + +/* Field Attribute Bytes */ +#define TF_INPUT 0x40 /* Visible input */ +#define TF_INPUTN 0x4c /* Invisible input */ +#define TF_INMDT 0xc1 /* Visible, Set-MDT */ +#define TF_LOG 0x60 + +/* Character Attribute Bytes */ +#define TAT_RESET 0x00 +#define TAT_FIELD 0xc0 +#define TAT_EXTHI 0x41 +#define TAT_FGCOLOR 0x42 +#define TAT_CHARS 0x43 +#define TAT_BGCOLOR 0x45 +#define TAT_TRANS 0x46 + +/* Extended-Highlighting Bytes */ +#define TAX_RESET 0x00 +#define TAX_BLINK 0xf1 +#define TAX_REVER 0xf2 +#define TAX_UNDER 0xf4 + +/* Reset value */ +#define TAR_RESET 0x00 + +/* Color values */ +#define TAC_RESET 0x00 +#define TAC_BLUE 0xf1 +#define TAC_RED 0xf2 +#define TAC_PINK 0xf3 +#define TAC_GREEN 0xf4 +#define TAC_TURQ 0xf5 +#define TAC_YELLOW 0xf6 +#define TAC_WHITE 0xf7 +#define TAC_DEFAULT 0x00 + +/* Write Control Characters */ +#define TW_NONE 0x40 /* No particular action */ +#define TW_KR 0xc2 /* Keyboard restore */ +#define TW_PLUSALARM 0x04 /* Add this bit for alarm */ + +#define RAW3270_FIRSTMINOR 1 /* First minor number */ +#define RAW3270_MAXDEVS 255 /* Max number of 3270 devices */ + +#define AID_CLEAR 0x6d +#define AID_ENTER 0x7d +#define AID_PF3 0xf3 +#define AID_PF7 0xf7 +#define AID_PF8 0xf8 +#define AID_READ_PARTITION 0x88 + +#endif /* __ASM_S390_UAPI_RAW3270_H */ diff --git a/arch/s390/include/uapi/asm/runtime_instr.h b/arch/s390/include/uapi/asm/runtime_instr.h new file mode 100644 index 000000000..455da46e3 --- /dev/null +++ b/arch/s390/include/uapi/asm/runtime_instr.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _S390_UAPI_RUNTIME_INSTR_H +#define _S390_UAPI_RUNTIME_INSTR_H + +#include + +#define S390_RUNTIME_INSTR_START 0x1 +#define S390_RUNTIME_INSTR_STOP 0x2 + +struct runtime_instr_cb { + __u64 rca; + __u64 roa; + __u64 rla; + + __u32 v : 1; + __u32 s : 1; + __u32 k : 1; + __u32 h : 1; + __u32 a : 1; + __u32 reserved1 : 3; + __u32 ps : 1; + __u32 qs : 1; + __u32 pc : 1; + __u32 qc : 1; + __u32 reserved2 : 1; + __u32 g : 1; + __u32 u : 1; + __u32 l : 1; + __u32 key : 4; + __u32 reserved3 : 8; + __u32 t : 1; + __u32 rgs : 3; + + __u32 m : 4; + __u32 n : 1; + __u32 mae : 1; + __u32 reserved4 : 2; + __u32 c : 1; + __u32 r : 1; + __u32 b : 1; + __u32 j : 1; + __u32 e : 1; + __u32 x : 1; + __u32 reserved5 : 2; + __u32 bpxn : 1; + __u32 bpxt : 1; + __u32 bpti : 1; + __u32 bpni : 1; + __u32 reserved6 : 2; + + __u32 d : 1; + __u32 f : 1; + __u32 ic : 4; + __u32 dc : 4; + + __u64 reserved7; + __u64 sf; + __u64 rsic; + __u64 reserved8; +} __attribute__((__packed__, __aligned__(8))); + +static inline void load_runtime_instr_cb(struct runtime_instr_cb *cb) +{ + asm volatile(".insn rsy,0xeb0000000060,0,0,%0" /* LRIC */ + : : "Q" (*cb)); +} + +static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb) +{ + asm volatile(".insn rsy,0xeb0000000061,0,0,%0" /* STRIC */ + : "=Q" (*cb) : : "cc"); +} + +#endif /* _S390_UAPI_RUNTIME_INSTR_H */ diff --git a/arch/s390/include/uapi/asm/schid.h b/arch/s390/include/uapi/asm/schid.h new file mode 100644 index 000000000..a3e1cf168 --- /dev/null +++ b/arch/s390/include/uapi/asm/schid.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPIASM_SCHID_H +#define _UAPIASM_SCHID_H + +#include + +#ifndef __ASSEMBLY__ + +struct subchannel_id { + __u32 cssid : 8; + __u32 : 4; + __u32 m : 1; + __u32 ssid : 2; + __u32 one : 1; + __u32 sch_no : 16; +} __attribute__ ((packed, aligned(4))); + +#endif /* __ASSEMBLY__ */ + +#endif /* _UAPIASM_SCHID_H */ diff --git a/arch/s390/include/uapi/asm/sclp_ctl.h b/arch/s390/include/uapi/asm/sclp_ctl.h new file mode 100644 index 000000000..e4e8c4dcd --- /dev/null +++ b/arch/s390/include/uapi/asm/sclp_ctl.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * IOCTL interface for SCLP + * + * Copyright IBM Corp. 2012 + * + * Author: Michael Holzheu + */ + +#ifndef _ASM_SCLP_CTL_H +#define _ASM_SCLP_CTL_H + +#include + +struct sclp_ctl_sccb { + __u32 cmdw; + __u64 sccb; +} __attribute__((packed)); + +#define SCLP_CTL_IOCTL_MAGIC 0x10 + +#define SCLP_CTL_SCCB \ + _IOWR(SCLP_CTL_IOCTL_MAGIC, 0x10, struct sclp_ctl_sccb) + +#endif diff --git a/arch/s390/include/uapi/asm/setup.h b/arch/s390/include/uapi/asm/setup.h new file mode 100644 index 000000000..598d769e7 --- /dev/null +++ b/arch/s390/include/uapi/asm/setup.h @@ -0,0 +1 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h new file mode 100644 index 000000000..ede318653 --- /dev/null +++ b/arch/s390/include/uapi/asm/sie.h @@ -0,0 +1,252 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_S390_SIE_H +#define _UAPI_ASM_S390_SIE_H + +#define diagnose_codes \ + { 0x10, "DIAG (0x10) release pages" }, \ + { 0x44, "DIAG (0x44) time slice end" }, \ + { 0x9c, "DIAG (0x9c) time slice end directed" }, \ + { 0x204, "DIAG (0x204) logical-cpu utilization" }, \ + { 0x258, "DIAG (0x258) page-reference services" }, \ + { 0x288, "DIAG (0x288) watchdog functions" }, \ + { 0x308, "DIAG (0x308) ipl functions" }, \ + { 0x500, "DIAG (0x500) KVM virtio functions" }, \ + { 0x501, "DIAG (0x501) KVM breakpoint" } + +#define sigp_order_codes \ + { 0x01, "SIGP sense" }, \ + { 0x02, "SIGP external call" }, \ + { 0x03, "SIGP emergency signal" }, \ + { 0x04, "SIGP start" }, \ + { 0x05, "SIGP stop" }, \ + { 0x06, "SIGP restart" }, \ + { 0x09, "SIGP stop and store status" }, \ + { 0x0b, "SIGP initial cpu reset" }, \ + { 0x0c, "SIGP cpu reset" }, \ + { 0x0d, "SIGP set prefix" }, \ + { 0x0e, "SIGP store status at address" }, \ + { 0x12, "SIGP set architecture" }, \ + { 0x13, "SIGP conditional emergency signal" }, \ + { 0x15, "SIGP sense running" }, \ + { 0x16, "SIGP set multithreading"}, \ + { 0x17, "SIGP store additional status at address"} + +#define icpt_prog_codes \ + { 0x0001, "Prog Operation" }, \ + { 0x0002, "Prog Privileged Operation" }, \ + { 0x0003, "Prog Execute" }, \ + { 0x0004, "Prog Protection" }, \ + { 0x0005, "Prog Addressing" }, \ + { 0x0006, "Prog Specification" }, \ + { 0x0007, "Prog Data" }, \ + { 0x0008, "Prog Fixedpoint overflow" }, \ + { 0x0009, "Prog Fixedpoint divide" }, \ + { 0x000A, "Prog Decimal overflow" }, \ + { 0x000B, "Prog Decimal divide" }, \ + { 0x000C, "Prog HFP exponent overflow" }, \ + { 0x000D, "Prog HFP exponent underflow" }, \ + { 0x000E, "Prog HFP significance" }, \ + { 0x000F, "Prog HFP divide" }, \ + { 0x0010, "Prog Segment translation" }, \ + { 0x0011, "Prog Page translation" }, \ + { 0x0012, "Prog Translation specification" }, \ + { 0x0013, "Prog Special operation" }, \ + { 0x0015, "Prog Operand" }, \ + { 0x0016, "Prog Trace table" }, \ + { 0x0017, "Prog ASNtranslation specification" }, \ + { 0x001C, "Prog Spaceswitch event" }, \ + { 0x001D, "Prog HFP square root" }, \ + { 0x001F, "Prog PCtranslation specification" }, \ + { 0x0020, "Prog AFX translation" }, \ + { 0x0021, "Prog ASX translation" }, \ + { 0x0022, "Prog LX translation" }, \ + { 0x0023, "Prog EX translation" }, \ + { 0x0024, "Prog Primary authority" }, \ + { 0x0025, "Prog Secondary authority" }, \ + { 0x0026, "Prog LFXtranslation exception" }, \ + { 0x0027, "Prog LSXtranslation exception" }, \ + { 0x0028, "Prog ALET specification" }, \ + { 0x0029, "Prog ALEN translation" }, \ + { 0x002A, "Prog ALE sequence" }, \ + { 0x002B, "Prog ASTE validity" }, \ + { 0x002C, "Prog ASTE sequence" }, \ + { 0x002D, "Prog Extended authority" }, \ + { 0x002E, "Prog LSTE sequence" }, \ + { 0x002F, "Prog ASTE instance" }, \ + { 0x0030, "Prog Stack full" }, \ + { 0x0031, "Prog Stack empty" }, \ + { 0x0032, "Prog Stack specification" }, \ + { 0x0033, "Prog Stack type" }, \ + { 0x0034, "Prog Stack operation" }, \ + { 0x0039, "Prog Region first translation" }, \ + { 0x003A, "Prog Region second translation" }, \ + { 0x003B, "Prog Region third translation" }, \ + { 0x0040, "Prog Monitor event" }, \ + { 0x0080, "Prog PER event" }, \ + { 0x0119, "Prog Crypto operation" } + +#define exit_code_ipa0(ipa0, opcode, mnemonic) \ + { (ipa0 << 8 | opcode), #ipa0 " " mnemonic } +#define exit_code(opcode, mnemonic) \ + { opcode, mnemonic } + +#define icpt_insn_codes \ + exit_code_ipa0(0x01, 0x01, "PR"), \ + exit_code_ipa0(0x01, 0x04, "PTFF"), \ + exit_code_ipa0(0x01, 0x07, "SCKPF"), \ + exit_code_ipa0(0xAA, 0x00, "RINEXT"), \ + exit_code_ipa0(0xAA, 0x01, "RION"), \ + exit_code_ipa0(0xAA, 0x02, "TRIC"), \ + exit_code_ipa0(0xAA, 0x03, "RIOFF"), \ + exit_code_ipa0(0xAA, 0x04, "RIEMIT"), \ + exit_code_ipa0(0xB2, 0x02, "STIDP"), \ + exit_code_ipa0(0xB2, 0x04, "SCK"), \ + exit_code_ipa0(0xB2, 0x05, "STCK"), \ + exit_code_ipa0(0xB2, 0x06, "SCKC"), \ + exit_code_ipa0(0xB2, 0x07, "STCKC"), \ + exit_code_ipa0(0xB2, 0x08, "SPT"), \ + exit_code_ipa0(0xB2, 0x09, "STPT"), \ + exit_code_ipa0(0xB2, 0x0d, "PTLB"), \ + exit_code_ipa0(0xB2, 0x10, "SPX"), \ + exit_code_ipa0(0xB2, 0x11, "STPX"), \ + exit_code_ipa0(0xB2, 0x12, "STAP"), \ + exit_code_ipa0(0xB2, 0x14, "SIE"), \ + exit_code_ipa0(0xB2, 0x16, "SETR"), \ + exit_code_ipa0(0xB2, 0x17, "STETR"), \ + exit_code_ipa0(0xB2, 0x18, "PC"), \ + exit_code_ipa0(0xB2, 0x20, "SERVC"), \ + exit_code_ipa0(0xB2, 0x21, "IPTE"), \ + exit_code_ipa0(0xB2, 0x28, "PT"), \ + exit_code_ipa0(0xB2, 0x29, "ISKE"), \ + exit_code_ipa0(0xB2, 0x2a, "RRBE"), \ + exit_code_ipa0(0xB2, 0x2b, "SSKE"), \ + exit_code_ipa0(0xB2, 0x2c, "TB"), \ + exit_code_ipa0(0xB2, 0x2e, "PGIN"), \ + exit_code_ipa0(0xB2, 0x2f, "PGOUT"), \ + exit_code_ipa0(0xB2, 0x30, "CSCH"), \ + exit_code_ipa0(0xB2, 0x31, "HSCH"), \ + exit_code_ipa0(0xB2, 0x32, "MSCH"), \ + exit_code_ipa0(0xB2, 0x33, "SSCH"), \ + exit_code_ipa0(0xB2, 0x34, "STSCH"), \ + exit_code_ipa0(0xB2, 0x35, "TSCH"), \ + exit_code_ipa0(0xB2, 0x36, "TPI"), \ + exit_code_ipa0(0xB2, 0x37, "SAL"), \ + exit_code_ipa0(0xB2, 0x38, "RSCH"), \ + exit_code_ipa0(0xB2, 0x39, "STCRW"), \ + exit_code_ipa0(0xB2, 0x3a, "STCPS"), \ + exit_code_ipa0(0xB2, 0x3b, "RCHP"), \ + exit_code_ipa0(0xB2, 0x3c, "SCHM"), \ + exit_code_ipa0(0xB2, 0x40, "BAKR"), \ + exit_code_ipa0(0xB2, 0x48, "PALB"), \ + exit_code_ipa0(0xB2, 0x4c, "TAR"), \ + exit_code_ipa0(0xB2, 0x50, "CSP"), \ + exit_code_ipa0(0xB2, 0x54, "MVPG"), \ + exit_code_ipa0(0xB2, 0x56, "STHYI"), \ + exit_code_ipa0(0xB2, 0x58, "BSG"), \ + exit_code_ipa0(0xB2, 0x5a, "BSA"), \ + exit_code_ipa0(0xB2, 0x5f, "CHSC"), \ + exit_code_ipa0(0xB2, 0x74, "SIGA"), \ + exit_code_ipa0(0xB2, 0x76, "XSCH"), \ + exit_code_ipa0(0xB2, 0x78, "STCKE"), \ + exit_code_ipa0(0xB2, 0x7c, "STCKF"), \ + exit_code_ipa0(0xB2, 0x7d, "STSI"), \ + exit_code_ipa0(0xB2, 0xb0, "STFLE"), \ + exit_code_ipa0(0xB2, 0xb1, "STFL"), \ + exit_code_ipa0(0xB2, 0xb2, "LPSWE"), \ + exit_code_ipa0(0xB2, 0xf8, "TEND"), \ + exit_code_ipa0(0xB2, 0xfc, "TABORT"), \ + exit_code_ipa0(0xB9, 0x1e, "KMAC"), \ + exit_code_ipa0(0xB9, 0x28, "PCKMO"), \ + exit_code_ipa0(0xB9, 0x2a, "KMF"), \ + exit_code_ipa0(0xB9, 0x2b, "KMO"), \ + exit_code_ipa0(0xB9, 0x2d, "KMCTR"), \ + exit_code_ipa0(0xB9, 0x2e, "KM"), \ + exit_code_ipa0(0xB9, 0x2f, "KMC"), \ + exit_code_ipa0(0xB9, 0x3e, "KIMD"), \ + exit_code_ipa0(0xB9, 0x3f, "KLMD"), \ + exit_code_ipa0(0xB9, 0x8a, "CSPG"), \ + exit_code_ipa0(0xB9, 0x8d, "EPSW"), \ + exit_code_ipa0(0xB9, 0x8e, "IDTE"), \ + exit_code_ipa0(0xB9, 0x8f, "CRDTE"), \ + exit_code_ipa0(0xB9, 0x9c, "EQBS"), \ + exit_code_ipa0(0xB9, 0xa2, "PTF"), \ + exit_code_ipa0(0xB9, 0xab, "ESSA"), \ + exit_code_ipa0(0xB9, 0xae, "RRBM"), \ + exit_code_ipa0(0xB9, 0xaf, "PFMF"), \ + exit_code_ipa0(0xE3, 0x03, "LRAG"), \ + exit_code_ipa0(0xE3, 0x13, "LRAY"), \ + exit_code_ipa0(0xE3, 0x25, "NTSTG"), \ + exit_code_ipa0(0xE5, 0x00, "LASP"), \ + exit_code_ipa0(0xE5, 0x01, "TPROT"), \ + exit_code_ipa0(0xE5, 0x60, "TBEGIN"), \ + exit_code_ipa0(0xE5, 0x61, "TBEGINC"), \ + exit_code_ipa0(0xEB, 0x25, "STCTG"), \ + exit_code_ipa0(0xEB, 0x2f, "LCTLG"), \ + exit_code_ipa0(0xEB, 0x60, "LRIC"), \ + exit_code_ipa0(0xEB, 0x61, "STRIC"), \ + exit_code_ipa0(0xEB, 0x62, "MRIC"), \ + exit_code_ipa0(0xEB, 0x8a, "SQBS"), \ + exit_code_ipa0(0xC8, 0x01, "ECTG"), \ + exit_code(0x0a, "SVC"), \ + exit_code(0x80, "SSM"), \ + exit_code(0x82, "LPSW"), \ + exit_code(0x83, "DIAG"), \ + exit_code(0xae, "SIGP"), \ + exit_code(0xac, "STNSM"), \ + exit_code(0xad, "STOSM"), \ + exit_code(0xb1, "LRA"), \ + exit_code(0xb6, "STCTL"), \ + exit_code(0xb7, "LCTL"), \ + exit_code(0xee, "PLO") + +#define sie_intercept_code \ + { 0x00, "Host interruption" }, \ + { 0x04, "Instruction" }, \ + { 0x08, "Program interruption" }, \ + { 0x0c, "Instruction and program interruption" }, \ + { 0x10, "External request" }, \ + { 0x14, "External interruption" }, \ + { 0x18, "I/O request" }, \ + { 0x1c, "Wait state" }, \ + { 0x20, "Validity" }, \ + { 0x28, "Stop request" }, \ + { 0x2c, "Operation exception" }, \ + { 0x38, "Partial-execution" }, \ + { 0x3c, "I/O interruption" }, \ + { 0x40, "I/O instruction" }, \ + { 0x48, "Timing subset" } + +/* + * This is the simple interceptable instructions decoder. + * + * It will be used as userspace interface and it can be used in places + * that does not allow to use general decoder functions, + * such as trace events declarations. + * + * Some userspace tools may want to parse this code + * and would be confused by switch(), if() and other statements, + * but they can understand conditional operator. + */ +#define INSN_DECODE_IPA0(ipa0, insn, rshift, mask) \ + (insn >> 56) == (ipa0) ? \ + ((ipa0 << 8) | ((insn >> rshift) & mask)) : + +#define INSN_DECODE(insn) (insn >> 56) + +/* + * The macro icpt_insn_decoder() takes an intercepted instruction + * and returns a key, which can be used to find a mnemonic name + * of the instruction in the icpt_insn_codes table. + */ +#define icpt_insn_decoder(insn) ( \ + INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \ + INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \ + INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \ + INSN_DECODE_IPA0(0xb9, insn, 48, 0xff) \ + INSN_DECODE_IPA0(0xe3, insn, 48, 0xff) \ + INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \ + INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \ + INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \ + INSN_DECODE(insn)) + +#endif /* _UAPI_ASM_S390_SIE_H */ diff --git a/arch/s390/include/uapi/asm/sigcontext.h b/arch/s390/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000..8b3503333 --- /dev/null +++ b/arch/s390/include/uapi/asm/sigcontext.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * S390 version + * Copyright IBM Corp. 1999, 2000 + */ + +#ifndef _ASM_S390_SIGCONTEXT_H +#define _ASM_S390_SIGCONTEXT_H + +#include +#include + +#define __NUM_GPRS 16 +#define __NUM_FPRS 16 +#define __NUM_ACRS 16 +#define __NUM_VXRS 32 +#define __NUM_VXRS_LOW 16 +#define __NUM_VXRS_HIGH 16 + +#ifndef __s390x__ + +/* Has to be at least _NSIG_WORDS from asm/signal.h */ +#define _SIGCONTEXT_NSIG 64 +#define _SIGCONTEXT_NSIG_BPW 32 +/* Size of stack frame allocated when calling signal handler. */ +#define __SIGNAL_FRAMESIZE 96 + +#else /* __s390x__ */ + +/* Has to be at least _NSIG_WORDS from asm/signal.h */ +#define _SIGCONTEXT_NSIG 64 +#define _SIGCONTEXT_NSIG_BPW 64 +/* Size of stack frame allocated when calling signal handler. */ +#define __SIGNAL_FRAMESIZE 160 + +#endif /* __s390x__ */ + +#define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) +#define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) + +typedef struct +{ + unsigned long mask; + unsigned long addr; +} __attribute__ ((aligned(8))) _psw_t; + +typedef struct +{ + _psw_t psw; + unsigned long gprs[__NUM_GPRS]; + unsigned int acrs[__NUM_ACRS]; +} _s390_regs_common; + +typedef struct +{ + unsigned int fpc; + unsigned int pad; + double fprs[__NUM_FPRS]; +} _s390_fp_regs; + +typedef struct +{ + _s390_regs_common regs; + _s390_fp_regs fpregs; +} _sigregs; + +typedef struct +{ +#ifndef __s390x__ + unsigned long gprs_high[__NUM_GPRS]; +#endif + unsigned long long vxrs_low[__NUM_VXRS_LOW]; + __vector128 vxrs_high[__NUM_VXRS_HIGH]; + unsigned char __reserved[128]; +} _sigregs_ext; + +struct sigcontext +{ + unsigned long oldmask[_SIGCONTEXT_NSIG_WORDS]; + _sigregs __user *sregs; +}; + + +#endif + diff --git a/arch/s390/include/uapi/asm/signal.h b/arch/s390/include/uapi/asm/signal.h new file mode 100644 index 000000000..e74d6ba1b --- /dev/null +++ b/arch/s390/include/uapi/asm/signal.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * S390 version + * + * Derived from "include/asm-i386/signal.h" + */ + +#ifndef _UAPI_ASMS390_SIGNAL_H +#define _UAPI_ASMS390_SIGNAL_H + +#include +#include + +/* Avoid too many header ordering problems. */ +struct siginfo; +struct pt_regs; + +#ifndef __KERNEL__ +/* Here we must cater to libcs that poke about in kernel headers. */ + +#define NSIG 32 +typedef unsigned long sigset_t; + +#endif /* __KERNEL__ */ + +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGIOT 6 +#define SIGBUS 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGUSR1 10 +#define SIGSEGV 11 +#define SIGUSR2 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGSTKFLT 16 +#define SIGCHLD 17 +#define SIGCONT 18 +#define SIGSTOP 19 +#define SIGTSTP 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGURG 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGIO 29 +#define SIGPOLL SIGIO +/* +#define SIGLOST 29 +*/ +#define SIGPWR 30 +#define SIGSYS 31 +#define SIGUNUSED 31 + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX _NSIG + +#define SA_RESTORER 0x04000000 + +#define MINSIGSTKSZ 2048 +#define SIGSTKSZ 8192 + +#include + +#ifndef __KERNEL__ + +/* + * There are two system calls in regard to sigaction, sys_rt_sigaction + * and sys_sigaction. Internally the kernel uses the struct old_sigaction + * for the older sys_sigaction system call, and the kernel version of the + * struct sigaction for the newer sys_rt_sigaction. + * + * The uapi definition for struct sigaction has made a strange distinction + * between 31-bit and 64-bit in the past. For 64-bit the uapi structure + * looks like the kernel struct sigaction, but for 31-bit it used to + * look like the kernel struct old_sigaction. That practically made the + * structure unusable for either system call. To get around this problem + * the glibc always had its own definitions for the sigaction structures. + * + * The current struct sigaction uapi definition below is suitable for the + * sys_rt_sigaction system call only. + */ +struct sigaction { + union { + __sighandler_t _sa_handler; + void (*_sa_sigaction)(int, struct siginfo *, void *); + } _u; + unsigned long sa_flags; + void (*sa_restorer)(void); + sigset_t sa_mask; +}; + +#define sa_handler _u._sa_handler +#define sa_sigaction _u._sa_sigaction + +#endif /* __KERNEL__ */ + +typedef struct sigaltstack { + void __user *ss_sp; + int ss_flags; + __kernel_size_t ss_size; +} stack_t; + + +#endif /* _UAPI_ASMS390_SIGNAL_H */ diff --git a/arch/s390/include/uapi/asm/stat.h b/arch/s390/include/uapi/asm/stat.h new file mode 100644 index 000000000..ac253d236 --- /dev/null +++ b/arch/s390/include/uapi/asm/stat.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * S390 version + * + * Derived from "include/asm-i386/stat.h" + */ + +#ifndef _S390_STAT_H +#define _S390_STAT_H + +#ifndef __s390x__ +struct __old_kernel_stat { + unsigned short st_dev; + unsigned short st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; +}; + +struct stat { + unsigned short st_dev; + unsigned short __pad1; + unsigned long st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned short __pad2; + unsigned long st_size; + unsigned long st_blksize; + unsigned long st_blocks; + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + unsigned long __unused4; + unsigned long __unused5; +}; + +/* This matches struct stat64 in glibc2.1, hence the absolutely + * insane amounts of padding around dev_t's. + */ +struct stat64 { + unsigned long long st_dev; + unsigned int __pad1; +#define STAT64_HAS_BROKEN_ST_INO 1 + unsigned long __st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned long st_uid; + unsigned long st_gid; + unsigned long long st_rdev; + unsigned int __pad3; + long long st_size; + unsigned long st_blksize; + unsigned char __pad4[4]; + unsigned long __pad5; /* future possible st_blocks high bits */ + unsigned long st_blocks; /* Number 512-byte blocks allocated. */ + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */ + unsigned long long st_ino; +}; + +#else /* __s390x__ */ + +struct stat { + unsigned long st_dev; + unsigned long st_ino; + unsigned long st_nlink; + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int __pad1; + unsigned long st_rdev; + unsigned long st_size; + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + unsigned long st_blksize; + long st_blocks; + unsigned long __unused[3]; +}; + +#endif /* __s390x__ */ + +#define STAT_HAVE_NSEC 1 + +#endif diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h new file mode 100644 index 000000000..f85b50723 --- /dev/null +++ b/arch/s390/include/uapi/asm/statfs.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * S390 version + * + * Derived from "include/asm-i386/statfs.h" + */ + +#ifndef _S390_STATFS_H +#define _S390_STATFS_H + +/* + * We can't use because in 64-bit mode + * we mix ints of different sizes in our struct statfs. + */ + +#ifndef __KERNEL_STRICT_NAMES +#include +typedef __kernel_fsid_t fsid_t; +#endif + +struct statfs { + unsigned int f_type; + unsigned int f_bsize; + unsigned long f_blocks; + unsigned long f_bfree; + unsigned long f_bavail; + unsigned long f_files; + unsigned long f_ffree; + __kernel_fsid_t f_fsid; + unsigned int f_namelen; + unsigned int f_frsize; + unsigned int f_flags; + unsigned int f_spare[5]; +}; + +struct statfs64 { + unsigned int f_type; + unsigned int f_bsize; + unsigned long long f_blocks; + unsigned long long f_bfree; + unsigned long long f_bavail; + unsigned long long f_files; + unsigned long long f_ffree; + __kernel_fsid_t f_fsid; + unsigned int f_namelen; + unsigned int f_frsize; + unsigned int f_flags; + unsigned int f_spare[5]; +}; + +#endif diff --git a/arch/s390/include/uapi/asm/sthyi.h b/arch/s390/include/uapi/asm/sthyi.h new file mode 100644 index 000000000..b1b022316 --- /dev/null +++ b/arch/s390/include/uapi/asm/sthyi.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_STHYI_H +#define _UAPI_ASM_STHYI_H + +#define STHYI_FC_CP_IFL_CAP 0 + +#endif /* _UAPI_ASM_STHYI_H */ diff --git a/arch/s390/include/uapi/asm/tape390.h b/arch/s390/include/uapi/asm/tape390.h new file mode 100644 index 000000000..90266c696 --- /dev/null +++ b/arch/s390/include/uapi/asm/tape390.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/************************************************************************* + * + * enables user programs to display messages and control encryption + * on s390 tape devices + * + * Copyright IBM Corp. 2001, 2006 + * Author(s): Michael Holzheu + * + *************************************************************************/ + +#ifndef _TAPE390_H +#define _TAPE390_H + +#define TAPE390_DISPLAY _IOW('d', 1, struct display_struct) + +/* + * The TAPE390_DISPLAY ioctl calls the Load Display command + * which transfers 17 bytes of data from the channel to the subsystem: + * - 1 format control byte, and + * - two 8-byte messages + * + * Format control byte: + * 0-2: New Message Overlay + * 3: Alternate Messages + * 4: Blink Message + * 5: Display Low/High Message + * 6: Reserved + * 7: Automatic Load Request + * + */ + +typedef struct display_struct { + char cntrl; + char message1[8]; + char message2[8]; +} display_struct; + +/* + * Tape encryption support + */ + +struct tape390_crypt_info { + char capability; + char status; + char medium_status; +} __attribute__ ((packed)); + + +/* Macros for "capable" field */ +#define TAPE390_CRYPT_SUPPORTED_MASK 0x01 +#define TAPE390_CRYPT_SUPPORTED(x) \ + ((x.capability & TAPE390_CRYPT_SUPPORTED_MASK)) + +/* Macros for "status" field */ +#define TAPE390_CRYPT_ON_MASK 0x01 +#define TAPE390_CRYPT_ON(x) (((x.status) & TAPE390_CRYPT_ON_MASK)) + +/* Macros for "medium status" field */ +#define TAPE390_MEDIUM_LOADED_MASK 0x01 +#define TAPE390_MEDIUM_ENCRYPTED_MASK 0x02 +#define TAPE390_MEDIUM_ENCRYPTED(x) \ + (((x.medium_status) & TAPE390_MEDIUM_ENCRYPTED_MASK)) +#define TAPE390_MEDIUM_LOADED(x) \ + (((x.medium_status) & TAPE390_MEDIUM_LOADED_MASK)) + +/* + * The TAPE390_CRYPT_SET ioctl is used to switch on/off encryption. + * The "encryption_capable" and "tape_status" fields are ignored for this ioctl! + */ +#define TAPE390_CRYPT_SET _IOW('d', 2, struct tape390_crypt_info) + +/* + * The TAPE390_CRYPT_QUERY ioctl is used to query the encryption state. + */ +#define TAPE390_CRYPT_QUERY _IOR('d', 3, struct tape390_crypt_info) + +/* Values for "kekl1/2_type" and "kekl1/2_type_on_tape" fields */ +#define TAPE390_KEKL_TYPE_NONE 0 +#define TAPE390_KEKL_TYPE_LABEL 1 +#define TAPE390_KEKL_TYPE_HASH 2 + +struct tape390_kekl { + unsigned char type; + unsigned char type_on_tape; + char label[65]; +} __attribute__ ((packed)); + +struct tape390_kekl_pair { + struct tape390_kekl kekl[2]; +} __attribute__ ((packed)); + +/* + * The TAPE390_KEKL_SET ioctl is used to set Key Encrypting Key labels. + */ +#define TAPE390_KEKL_SET _IOW('d', 4, struct tape390_kekl_pair) + +/* + * The TAPE390_KEKL_QUERY ioctl is used to query Key Encrypting Key labels. + */ +#define TAPE390_KEKL_QUERY _IOR('d', 5, struct tape390_kekl_pair) + +#endif diff --git a/arch/s390/include/uapi/asm/types.h b/arch/s390/include/uapi/asm/types.h new file mode 100644 index 000000000..84457dbb2 --- /dev/null +++ b/arch/s390/include/uapi/asm/types.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * S390 version + * + * Derived from "include/asm-i386/types.h" + */ + +#ifndef _UAPI_S390_TYPES_H +#define _UAPI_S390_TYPES_H + +#include + +#ifndef __ASSEMBLY__ + +typedef unsigned long addr_t; +typedef __signed__ long saddr_t; + +typedef struct { + union { + struct { + __u64 high; + __u64 low; + }; + __u32 u[4]; + }; +} __attribute__((packed, aligned(4))) __vector128; + +#endif /* __ASSEMBLY__ */ + +#endif /* _UAPI_S390_TYPES_H */ diff --git a/arch/s390/include/uapi/asm/ucontext.h b/arch/s390/include/uapi/asm/ucontext.h new file mode 100644 index 000000000..c95f42e85 --- /dev/null +++ b/arch/s390/include/uapi/asm/ucontext.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * S390 version + * + * Derived from "include/asm-i386/ucontext.h" + */ + +#ifndef _ASM_S390_UCONTEXT_H +#define _ASM_S390_UCONTEXT_H + +#define UC_GPRS_HIGH 1 /* uc_mcontext_ext has valid high gprs */ +#define UC_VXRS 2 /* uc_mcontext_ext has valid vector regs */ + +/* + * The struct ucontext_extended describes how the registers are stored + * on a rt signal frame. Please note that the structure is not fixed, + * if new CPU registers are added to the user state the size of the + * struct ucontext_extended will increase. + */ +struct ucontext_extended { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + _sigregs uc_mcontext; + sigset_t uc_sigmask; + /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ + unsigned char __unused[128 - sizeof(sigset_t)]; + _sigregs_ext uc_mcontext_ext; +}; + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + _sigregs uc_mcontext; + sigset_t uc_sigmask; + /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ + unsigned char __unused[128 - sizeof(sigset_t)]; +}; + +#endif /* !_ASM_S390_UCONTEXT_H */ diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h new file mode 100644 index 000000000..01b5fe8b9 --- /dev/null +++ b/arch/s390/include/uapi/asm/unistd.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * S390 version + * + * Derived from "include/asm-i386/unistd.h" + */ + +#ifndef _UAPI_ASM_S390_UNISTD_H_ +#define _UAPI_ASM_S390_UNISTD_H_ + +#ifdef __s390x__ +#include +#else +#include +#endif + +#endif /* _UAPI_ASM_S390_UNISTD_H_ */ diff --git a/arch/s390/include/uapi/asm/uvdevice.h b/arch/s390/include/uapi/asm/uvdevice.h new file mode 100644 index 000000000..b9c2f14a6 --- /dev/null +++ b/arch/s390/include/uapi/asm/uvdevice.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright IBM Corp. 2022 + * Author(s): Steffen Eiden + */ +#ifndef __S390_ASM_UVDEVICE_H +#define __S390_ASM_UVDEVICE_H + +#include + +struct uvio_ioctl_cb { + __u32 flags; + __u16 uv_rc; /* UV header rc value */ + __u16 uv_rrc; /* UV header rrc value */ + __u64 argument_addr; /* Userspace address of uvio argument */ + __u32 argument_len; + __u8 reserved14[0x40 - 0x14]; /* must be zero */ +}; + +#define UVIO_ATT_USER_DATA_LEN 0x100 +#define UVIO_ATT_UID_LEN 0x10 +struct uvio_attest { + __u64 arcb_addr; /* 0x0000 */ + __u64 meas_addr; /* 0x0008 */ + __u64 add_data_addr; /* 0x0010 */ + __u8 user_data[UVIO_ATT_USER_DATA_LEN]; /* 0x0018 */ + __u8 config_uid[UVIO_ATT_UID_LEN]; /* 0x0118 */ + __u32 arcb_len; /* 0x0128 */ + __u32 meas_len; /* 0x012c */ + __u32 add_data_len; /* 0x0130 */ + __u16 user_data_len; /* 0x0134 */ + __u16 reserved136; /* 0x0136 */ +}; + +/** + * uvio_uvdev_info - Information of supported functions + * @supp_uvio_cmds - supported IOCTLs by this device + * @supp_uv_cmds - supported UVCs corresponding to the IOCTL + * + * UVIO request to get information about supported request types by this + * uvdevice and the Ultravisor. Everything is output. Bits are in LSB0 + * ordering. If the bit is set in both, @supp_uvio_cmds and @supp_uv_cmds, the + * uvdevice and the Ultravisor support that call. + * + * Note that bit 0 (UVIO_IOCTL_UVDEV_INFO_NR) is always zero for `supp_uv_cmds` + * as there is no corresponding UV-call. + */ +struct uvio_uvdev_info { + /* + * If bit `n` is set, this device supports the IOCTL with nr `n`. + */ + __u64 supp_uvio_cmds; + /* + * If bit `n` is set, the Ultravisor(UV) supports the UV-call + * corresponding to the IOCTL with nr `n` in the calling contextx (host + * or guest). The value is only valid if the corresponding bit in + * @supp_uvio_cmds is set as well. + */ + __u64 supp_uv_cmds; +}; + +/* + * The following max values define an upper length for the IOCTL in/out buffers. + * However, they do not represent the maximum the Ultravisor allows which is + * often way smaller. By allowing larger buffer sizes we hopefully do not need + * to update the code with every machine update. It is therefore possible for + * userspace to request more memory than actually used by kernel/UV. + */ +#define UVIO_ATT_ARCB_MAX_LEN 0x100000 +#define UVIO_ATT_MEASUREMENT_MAX_LEN 0x8000 +#define UVIO_ATT_ADDITIONAL_MAX_LEN 0x8000 +#define UVIO_ADD_SECRET_MAX_LEN 0x100000 +#define UVIO_LIST_SECRETS_LEN 0x1000 + +#define UVIO_DEVICE_NAME "uv" +#define UVIO_TYPE_UVC 'u' + +enum UVIO_IOCTL_NR { + UVIO_IOCTL_UVDEV_INFO_NR = 0x00, + UVIO_IOCTL_ATT_NR, + UVIO_IOCTL_ADD_SECRET_NR, + UVIO_IOCTL_LIST_SECRETS_NR, + UVIO_IOCTL_LOCK_SECRETS_NR, + /* must be the last entry */ + UVIO_IOCTL_NUM_IOCTLS +}; + +#define UVIO_IOCTL(nr) _IOWR(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb) +#define UVIO_IOCTL_UVDEV_INFO UVIO_IOCTL(UVIO_IOCTL_UVDEV_INFO_NR) +#define UVIO_IOCTL_ATT UVIO_IOCTL(UVIO_IOCTL_ATT_NR) +#define UVIO_IOCTL_ADD_SECRET UVIO_IOCTL(UVIO_IOCTL_ADD_SECRET_NR) +#define UVIO_IOCTL_LIST_SECRETS UVIO_IOCTL(UVIO_IOCTL_LIST_SECRETS_NR) +#define UVIO_IOCTL_LOCK_SECRETS UVIO_IOCTL(UVIO_IOCTL_LOCK_SECRETS_NR) + +#define UVIO_SUPP_CALL(nr) (1ULL << (nr)) +#define UVIO_SUPP_UDEV_INFO UVIO_SUPP_CALL(UVIO_IOCTL_UDEV_INFO_NR) +#define UVIO_SUPP_ATT UVIO_SUPP_CALL(UVIO_IOCTL_ATT_NR) +#define UVIO_SUPP_ADD_SECRET UVIO_SUPP_CALL(UVIO_IOCTL_ADD_SECRET_NR) +#define UVIO_SUPP_LIST_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LIST_SECRETS_NR) +#define UVIO_SUPP_LOCK_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LOCK_SECRETS_NR) + +#endif /* __S390_ASM_UVDEVICE_H */ diff --git a/arch/s390/include/uapi/asm/virtio-ccw.h b/arch/s390/include/uapi/asm/virtio-ccw.h new file mode 100644 index 000000000..2b605f7e8 --- /dev/null +++ b/arch/s390/include/uapi/asm/virtio-ccw.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* + * Definitions for virtio-ccw devices. + * + * Copyright IBM Corp. 2013 + * + * Author(s): Cornelia Huck + */ +#ifndef __KVM_VIRTIO_CCW_H +#define __KVM_VIRTIO_CCW_H + +/* Alignment of vring buffers. */ +#define KVM_VIRTIO_CCW_RING_ALIGN 4096 + +/* Subcode for diagnose 500 (virtio hypercall). */ +#define KVM_S390_VIRTIO_CCW_NOTIFY 3 + +#endif diff --git a/arch/s390/include/uapi/asm/vmcp.h b/arch/s390/include/uapi/asm/vmcp.h new file mode 100644 index 000000000..aeaaa0300 --- /dev/null +++ b/arch/s390/include/uapi/asm/vmcp.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright IBM Corp. 2004, 2005 + * Interface implementation for communication with the z/VM control program + * Version 1.0 + * Author(s): Christian Borntraeger + * + * + * z/VMs CP offers the possibility to issue commands via the diagnose code 8 + * this driver implements a character device that issues these commands and + * returns the answer of CP. + * + * The idea of this driver is based on cpint from Neale Ferguson + */ + +#ifndef _UAPI_ASM_VMCP_H +#define _UAPI_ASM_VMCP_H + +#include + +#define VMCP_GETCODE _IOR(0x10, 1, int) +#define VMCP_SETBUF _IOW(0x10, 2, int) +#define VMCP_GETSIZE _IOR(0x10, 3, int) + +#endif /* _UAPI_ASM_VMCP_H */ diff --git a/arch/s390/include/uapi/asm/vtoc.h b/arch/s390/include/uapi/asm/vtoc.h new file mode 100644 index 000000000..50c1d7b9e --- /dev/null +++ b/arch/s390/include/uapi/asm/vtoc.h @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * This file contains volume label definitions for DASD devices. + * + * Copyright IBM Corp. 2005 + * + * Author(s): Volker Sameske + * + */ + +#ifndef _ASM_S390_VTOC_H +#define _ASM_S390_VTOC_H + +#include + +struct vtoc_ttr +{ + __u16 tt; + __u8 r; +} __attribute__ ((packed)); + +struct vtoc_cchhb +{ + __u16 cc; + __u16 hh; + __u8 b; +} __attribute__ ((packed)); + +struct vtoc_cchh +{ + __u16 cc; + __u16 hh; +} __attribute__ ((packed)); + +struct vtoc_labeldate +{ + __u8 year; + __u16 day; +} __attribute__ ((packed)); + +struct vtoc_volume_label_cdl +{ + char volkey[4]; /* volume key = volume label */ + char vollbl[4]; /* volume label */ + char volid[6]; /* volume identifier */ + __u8 security; /* security byte */ + struct vtoc_cchhb vtoc; /* VTOC address */ + char res1[5]; /* reserved */ + char cisize[4]; /* CI-size for FBA,... */ + /* ...blanks for CKD */ + char blkperci[4]; /* no of blocks per CI (FBA), blanks for CKD */ + char labperci[4]; /* no of labels per CI (FBA), blanks for CKD */ + char res2[4]; /* reserved */ + char lvtoc[14]; /* owner code for LVTOC */ + char res3[29]; /* reserved */ +} __attribute__ ((packed)); + +struct vtoc_volume_label_ldl { + char vollbl[4]; /* volume label */ + char volid[6]; /* volume identifier */ + char res3[69]; /* reserved */ + char ldl_version; /* version number, valid for ldl format */ + __u64 formatted_blocks; /* valid when ldl_version >= f2 */ +} __attribute__ ((packed)); + +struct vtoc_extent +{ + __u8 typeind; /* extent type indicator */ + __u8 seqno; /* extent sequence number */ + struct vtoc_cchh llimit; /* starting point of this extent */ + struct vtoc_cchh ulimit; /* ending point of this extent */ +} __attribute__ ((packed)); + +struct vtoc_dev_const +{ + __u16 DS4DSCYL; /* number of logical cyls */ + __u16 DS4DSTRK; /* number of tracks in a logical cylinder */ + __u16 DS4DEVTK; /* device track length */ + __u8 DS4DEVI; /* non-last keyed record overhead */ + __u8 DS4DEVL; /* last keyed record overhead */ + __u8 DS4DEVK; /* non-keyed record overhead differential */ + __u8 DS4DEVFG; /* flag byte */ + __u16 DS4DEVTL; /* device tolerance */ + __u8 DS4DEVDT; /* number of DSCB's per track */ + __u8 DS4DEVDB; /* number of directory blocks per track */ +} __attribute__ ((packed)); + +struct vtoc_format1_label +{ + char DS1DSNAM[44]; /* data set name */ + __u8 DS1FMTID; /* format identifier */ + char DS1DSSN[6]; /* data set serial number */ + __u16 DS1VOLSQ; /* volume sequence number */ + struct vtoc_labeldate DS1CREDT; /* creation date: ydd */ + struct vtoc_labeldate DS1EXPDT; /* expiration date */ + __u8 DS1NOEPV; /* number of extents on volume */ + __u8 DS1NOBDB; /* no. of bytes used in last direction blk */ + __u8 DS1FLAG1; /* flag 1 */ + char DS1SYSCD[13]; /* system code */ + struct vtoc_labeldate DS1REFD; /* date last referenced */ + __u8 DS1SMSFG; /* system managed storage indicators */ + __u8 DS1SCXTF; /* sec. space extension flag byte */ + __u16 DS1SCXTV; /* secondary space extension value */ + __u8 DS1DSRG1; /* data set organisation byte 1 */ + __u8 DS1DSRG2; /* data set organisation byte 2 */ + __u8 DS1RECFM; /* record format */ + __u8 DS1OPTCD; /* option code */ + __u16 DS1BLKL; /* block length */ + __u16 DS1LRECL; /* record length */ + __u8 DS1KEYL; /* key length */ + __u16 DS1RKP; /* relative key position */ + __u8 DS1DSIND; /* data set indicators */ + __u8 DS1SCAL1; /* secondary allocation flag byte */ + char DS1SCAL3[3]; /* secondary allocation quantity */ + struct vtoc_ttr DS1LSTAR; /* last used track and block on track */ + __u16 DS1TRBAL; /* space remaining on last used track */ + __u16 res1; /* reserved */ + struct vtoc_extent DS1EXT1; /* first extent description */ + struct vtoc_extent DS1EXT2; /* second extent description */ + struct vtoc_extent DS1EXT3; /* third extent description */ + struct vtoc_cchhb DS1PTRDS; /* possible pointer to f2 or f3 DSCB */ +} __attribute__ ((packed)); + +struct vtoc_format4_label +{ + char DS4KEYCD[44]; /* key code for VTOC labels: 44 times 0x04 */ + __u8 DS4IDFMT; /* format identifier */ + struct vtoc_cchhb DS4HPCHR; /* highest address of a format 1 DSCB */ + __u16 DS4DSREC; /* number of available DSCB's */ + struct vtoc_cchh DS4HCCHH; /* CCHH of next available alternate track */ + __u16 DS4NOATK; /* number of remaining alternate tracks */ + __u8 DS4VTOCI; /* VTOC indicators */ + __u8 DS4NOEXT; /* number of extents in VTOC */ + __u8 DS4SMSFG; /* system managed storage indicators */ + __u8 DS4DEVAC; /* number of alternate cylinders. + * Subtract from first two bytes of + * DS4DEVSZ to get number of usable + * cylinders. can be zero. valid + * only if DS4DEVAV on. */ + struct vtoc_dev_const DS4DEVCT; /* device constants */ + char DS4AMTIM[8]; /* VSAM time stamp */ + char DS4AMCAT[3]; /* VSAM catalog indicator */ + char DS4R2TIM[8]; /* VSAM volume/catalog match time stamp */ + char res1[5]; /* reserved */ + char DS4F6PTR[5]; /* pointer to first format 6 DSCB */ + struct vtoc_extent DS4VTOCE; /* VTOC extent description */ + char res2[10]; /* reserved */ + __u8 DS4EFLVL; /* extended free-space management level */ + struct vtoc_cchhb DS4EFPTR; /* pointer to extended free-space info */ + char res3; /* reserved */ + __u32 DS4DCYL; /* number of logical cyls */ + char res4[2]; /* reserved */ + __u8 DS4DEVF2; /* device flags */ + char res5; /* reserved */ +} __attribute__ ((packed)); + +struct vtoc_ds5ext +{ + __u16 t; /* RTA of the first track of free extent */ + __u16 fc; /* number of whole cylinders in free ext. */ + __u8 ft; /* number of remaining free tracks */ +} __attribute__ ((packed)); + +struct vtoc_format5_label +{ + char DS5KEYID[4]; /* key identifier */ + struct vtoc_ds5ext DS5AVEXT; /* first available (free-space) extent. */ + struct vtoc_ds5ext DS5EXTAV[7]; /* seven available extents */ + __u8 DS5FMTID; /* format identifier */ + struct vtoc_ds5ext DS5MAVET[18]; /* eighteen available extents */ + struct vtoc_cchhb DS5PTRDS; /* pointer to next format5 DSCB */ +} __attribute__ ((packed)); + +struct vtoc_ds7ext +{ + __u32 a; /* starting RTA value */ + __u32 b; /* ending RTA value + 1 */ +} __attribute__ ((packed)); + +struct vtoc_format7_label +{ + char DS7KEYID[4]; /* key identifier */ + struct vtoc_ds7ext DS7EXTNT[5]; /* space for 5 extent descriptions */ + __u8 DS7FMTID; /* format identifier */ + struct vtoc_ds7ext DS7ADEXT[11]; /* space for 11 extent descriptions */ + char res1[2]; /* reserved */ + struct vtoc_cchhb DS7PTRDS; /* pointer to next FMT7 DSCB */ +} __attribute__ ((packed)); + +struct vtoc_cms_label { + __u8 label_id[4]; /* Label identifier */ + __u8 vol_id[6]; /* Volid */ + __u16 version_id; /* Version identifier */ + __u32 block_size; /* Disk block size */ + __u32 origin_ptr; /* Disk origin pointer */ + __u32 usable_count; /* Number of usable cylinders/blocks */ + __u32 formatted_count; /* Maximum number of formatted cylinders/ + * blocks */ + __u32 block_count; /* Disk size in CMS blocks */ + __u32 used_count; /* Number of CMS blocks in use */ + __u32 fst_size; /* File Status Table (FST) size */ + __u32 fst_count; /* Number of FSTs per CMS block */ + __u8 format_date[6]; /* Disk FORMAT date */ + __u8 reserved1[2]; + __u32 disk_offset; /* Disk offset when reserved*/ + __u32 map_block; /* Allocation Map Block with next hole */ + __u32 hblk_disp; /* Displacement into HBLK data of next hole */ + __u32 user_disp; /* Displacement into user part of Allocation + * map */ + __u8 reserved2[4]; + __u8 segment_name[8]; /* Name of shared segment */ +} __attribute__ ((packed)); + +#endif /* _ASM_S390_VTOC_H */ diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h new file mode 100644 index 000000000..f4785abe1 --- /dev/null +++ b/arch/s390/include/uapi/asm/zcrypt.h @@ -0,0 +1,373 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * include/asm-s390/zcrypt.h + * + * zcrypt 2.2.1 (user-visible header) + * + * Copyright IBM Corp. 2001, 2022 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + */ + +#ifndef __ASM_S390_ZCRYPT_H +#define __ASM_S390_ZCRYPT_H + +#define ZCRYPT_VERSION 2 +#define ZCRYPT_RELEASE 2 +#define ZCRYPT_VARIANT 1 + +#include +#include +#include + +/* Name of the zcrypt device driver. */ +#define ZCRYPT_NAME "zcrypt" + +/** + * struct ica_rsa_modexpo + * + * Requirements: + * - outputdatalength is at least as large as inputdatalength. + * - All key parts are right justified in their fields, padded on + * the left with zeroes. + * - length(b_key) = inputdatalength + * - length(n_modulus) = inputdatalength + */ +struct ica_rsa_modexpo { + __u8 __user *inputdata; + __u32 inputdatalength; + __u8 __user *outputdata; + __u32 outputdatalength; + __u8 __user *b_key; + __u8 __user *n_modulus; +}; + +/** + * struct ica_rsa_modexpo_crt + * + * Requirements: + * - inputdatalength is even. + * - outputdatalength is at least as large as inputdatalength. + * - All key parts are right justified in their fields, padded on + * the left with zeroes. + * - length(bp_key) = inputdatalength/2 + 8 + * - length(bq_key) = inputdatalength/2 + * - length(np_key) = inputdatalength/2 + 8 + * - length(nq_key) = inputdatalength/2 + * - length(u_mult_inv) = inputdatalength/2 + 8 + */ +struct ica_rsa_modexpo_crt { + __u8 __user *inputdata; + __u32 inputdatalength; + __u8 __user *outputdata; + __u32 outputdatalength; + __u8 __user *bp_key; + __u8 __user *bq_key; + __u8 __user *np_prime; + __u8 __user *nq_prime; + __u8 __user *u_mult_inv; +}; + +/** + * CPRBX + * Note that all shorts and ints are big-endian. + * All pointer fields are 16 bytes long, and mean nothing. + * + * A request CPRB is followed by a request_parameter_block. + * + * The request (or reply) parameter block is organized thus: + * function code + * VUD block + * key block + */ +struct CPRBX { + __u16 cprb_len; /* CPRB length 220 */ + __u8 cprb_ver_id; /* CPRB version id. 0x02 */ + __u8 ctfm; /* Command Type Filtering Mask */ + __u8 pad_000[2]; /* Alignment pad bytes */ + __u8 func_id[2]; /* function id 0x5432 */ + __u8 cprb_flags[4]; /* Flags */ + __u32 req_parml; /* request parameter buffer len */ + __u32 req_datal; /* request data buffer */ + __u32 rpl_msgbl; /* reply message block length */ + __u32 rpld_parml; /* replied parameter block len */ + __u32 rpl_datal; /* reply data block len */ + __u32 rpld_datal; /* replied data block len */ + __u32 req_extbl; /* request extension block len */ + __u8 _pad_001[4]; /* reserved */ + __u32 rpld_extbl; /* replied extension block len */ + __u8 _pad_002[16 - sizeof(__u8 *)]; + __u8 __user *req_parmb; /* request parm block 'address' */ + __u8 _pad_003[16 - sizeof(__u8 *)]; + __u8 __user *req_datab; /* request data block 'address' */ + __u8 _pad_004[16 - sizeof(__u8 *)]; + __u8 __user *rpl_parmb; /* reply parm block 'address' */ + __u8 _pad_005[16 - sizeof(__u8 *)]; + __u8 __user *rpl_datab; /* reply data block 'address' */ + __u8 _pad_006[16 - sizeof(__u8 *)]; + __u8 __user *req_extb; /* request extension block 'addr'*/ + __u8 _pad_007[16 - sizeof(__u8 *)]; + __u8 __user *rpl_extb; /* reply extension block 'address'*/ + __u16 ccp_rtcode; /* server return code */ + __u16 ccp_rscode; /* server reason code */ + __u32 mac_data_len; /* Mac Data Length */ + __u8 logon_id[8]; /* Logon Identifier */ + __u8 mac_value[8]; /* Mac Value */ + __u8 mac_content_flgs; /* Mac content flag byte */ + __u8 _pad_008; /* Alignment */ + __u16 domain; /* Domain */ + __u8 _pad_009[12]; /* reserved, checked for zeros */ + __u8 _pad_010[36]; /* reserved */ +} __attribute__((packed)); + +/** + * xcRB + */ +struct ica_xcRB { + __u16 agent_ID; + __u32 user_defined; + __u16 request_ID; + __u32 request_control_blk_length; + __u8 _padding1[16 - sizeof(__u8 *)]; + __u8 __user *request_control_blk_addr; + __u32 request_data_length; + __u8 _padding2[16 - sizeof(__u8 *)]; + __u8 __user *request_data_address; + __u32 reply_control_blk_length; + __u8 _padding3[16 - sizeof(__u8 *)]; + __u8 __user *reply_control_blk_addr; + __u32 reply_data_length; + __u8 __padding4[16 - sizeof(__u8 *)]; + __u8 __user *reply_data_addr; + __u16 priority_window; + __u32 status; +} __attribute__((packed)); + +/** + * struct ep11_cprb - EP11 connectivity programming request block + * @cprb_len: CPRB header length [0x0020] + * @cprb_ver_id: CPRB version id. [0x04] + * @pad_000: Alignment pad bytes + * @flags: Admin bit [0x80], Special bit [0x20] + * @func_id: Function id / subtype [0x5434] "T4" + * @source_id: Source id [originator id] + * @target_id: Target id [usage/ctrl domain id] + * @ret_code: Return code + * @reserved1: Reserved + * @reserved2: Reserved + * @payload_len: Payload length + */ +struct ep11_cprb { + __u16 cprb_len; + __u8 cprb_ver_id; + __u8 pad_000[2]; + __u8 flags; + __u8 func_id[2]; + __u32 source_id; + __u32 target_id; + __u32 ret_code; + __u32 reserved1; + __u32 reserved2; + __u32 payload_len; +} __attribute__((packed)); + +/** + * struct ep11_target_dev - EP11 target device list + * @ap_id: AP device id + * @dom_id: Usage domain id + */ +struct ep11_target_dev { + __u16 ap_id; + __u16 dom_id; +}; + +/** + * struct ep11_urb - EP11 user request block + * @targets_num: Number of target adapters + * @targets: Addr to target adapter list + * @weight: Level of request priority + * @req_no: Request id/number + * @req_len: Request length + * @req: Addr to request block + * @resp_len: Response length + * @resp: Addr to response block + */ +struct ep11_urb { + __u16 targets_num; + __u8 __user *targets; + __u64 weight; + __u64 req_no; + __u64 req_len; + __u8 __user *req; + __u64 resp_len; + __u8 __user *resp; +} __attribute__((packed)); + +/** + * struct zcrypt_device_status_ext + * @hwtype: raw hardware type + * @qid: 8 bit device index, 8 bit domain + * @functions: AP device function bit field 'abcdef' + * a, b, c = reserved + * d = CCA coprocessor + * e = Accelerator + * f = EP11 coprocessor + * @online online status + * @reserved reserved + */ +struct zcrypt_device_status_ext { + unsigned int hwtype:8; + unsigned int qid:16; + unsigned int online:1; + unsigned int functions:6; + unsigned int reserved:1; +}; + +#define MAX_ZDEV_CARDIDS_EXT 256 +#define MAX_ZDEV_DOMAINS_EXT 256 + +/* Maximum number of zcrypt devices */ +#define MAX_ZDEV_ENTRIES_EXT (MAX_ZDEV_CARDIDS_EXT * MAX_ZDEV_DOMAINS_EXT) + +/* Device matrix of all zcrypt devices */ +struct zcrypt_device_matrix_ext { + struct zcrypt_device_status_ext device[MAX_ZDEV_ENTRIES_EXT]; +}; + +#define AUTOSELECT 0xFFFFFFFF +#define AUTOSEL_AP ((__u16)0xFFFF) +#define AUTOSEL_DOM ((__u16)0xFFFF) + +#define ZCRYPT_IOCTL_MAGIC 'z' + +/** + * Interface notes: + * + * The ioctl()s which are implemented (along with relevant details) + * are: + * + * ICARSAMODEXPO + * Perform an RSA operation using a Modulus-Exponent pair + * This takes an ica_rsa_modexpo struct as its arg. + * + * NOTE: please refer to the comments preceding this structure + * for the implementation details for the contents of the + * block + * + * ICARSACRT + * Perform an RSA operation using a Chinese-Remainder Theorem key + * This takes an ica_rsa_modexpo_crt struct as its arg. + * + * NOTE: please refer to the comments preceding this structure + * for the implementation details for the contents of the + * block + * + * ZSECSENDCPRB + * Send an arbitrary CPRB to a crypto card. + * + * ZSENDEP11CPRB + * Send an arbitrary EP11 CPRB to an EP11 coprocessor crypto card. + * + * ZCRYPT_DEVICE_STATUS + * The given struct zcrypt_device_matrix_ext is updated with + * status information for each currently known apqn. + * + * ZCRYPT_STATUS_MASK + * Return an MAX_ZDEV_CARDIDS_EXT element array of unsigned chars for the + * status of all devices. + * 0x01: PCICA + * 0x02: PCICC + * 0x03: PCIXCC_MCL2 + * 0x04: PCIXCC_MCL3 + * 0x05: CEX2C + * 0x06: CEX2A + * 0x07: CEX3C + * 0x08: CEX3A + * 0x0a: CEX4 + * 0x0b: CEX5 + * 0x0c: CEX6, CEX7 or CEX8 + * 0x0d: device is disabled + * + * ZCRYPT_QDEPTH_MASK + * Return an MAX_ZDEV_CARDIDS_EXT element array of unsigned chars for the + * queue depth of all devices. + * + * ZCRYPT_PERDEV_REQCNT + * Return an MAX_ZDEV_CARDIDS_EXT element array of unsigned integers for + * the number of successfully completed requests per device since the + * device was detected and made available. + * + */ + +/** + * Supported ioctl calls + */ +#define ICARSAMODEXPO _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) +#define ICARSACRT _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) +#define ZSECSENDCPRB _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) +#define ZSENDEP11CPRB _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) + +#define ZCRYPT_DEVICE_STATUS _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x5f, 0) +#define ZCRYPT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x58, char[MAX_ZDEV_CARDIDS_EXT]) +#define ZCRYPT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x59, char[MAX_ZDEV_CARDIDS_EXT]) +#define ZCRYPT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x5a, int[MAX_ZDEV_CARDIDS_EXT]) + +/* + * Support for multiple zcrypt device nodes. + */ + +/* Nr of minor device node numbers to allocate. */ +#define ZCRYPT_MAX_MINOR_NODES 256 + +/* Max amount of possible ioctls */ +#define MAX_ZDEV_IOCTLS (1 << _IOC_NRBITS) + +/* + * Only deprecated defines, structs and ioctls below this line. + */ + +/* Deprecated: use MAX_ZDEV_CARDIDS_EXT */ +#define MAX_ZDEV_CARDIDS 64 +/* Deprecated: use MAX_ZDEV_DOMAINS_EXT */ +#define MAX_ZDEV_DOMAINS 256 + +/* Deprecated: use MAX_ZDEV_ENTRIES_EXT */ +#define MAX_ZDEV_ENTRIES (MAX_ZDEV_CARDIDS * MAX_ZDEV_DOMAINS) + +/* Deprecated: use struct zcrypt_device_status_ext */ +struct zcrypt_device_status { + unsigned int hwtype:8; + unsigned int qid:14; + unsigned int online:1; + unsigned int functions:6; + unsigned int reserved:3; +}; + +/* Deprecated: use struct zcrypt_device_matrix_ext */ +struct zcrypt_device_matrix { + struct zcrypt_device_status device[MAX_ZDEV_ENTRIES]; +}; + +/* Deprecated: use ZCRYPT_DEVICE_STATUS */ +#define ZDEVICESTATUS _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0) +/* Deprecated: use ZCRYPT_STATUS_MASK */ +#define Z90STAT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x48, char[64]) +/* Deprecated: use ZCRYPT_QDEPTH_MASK */ +#define Z90STAT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x49, char[64]) +/* Deprecated: use ZCRYPT_PERDEV_REQCNT */ +#define Z90STAT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4a, int[64]) + +/* Deprecated: use sysfs to query these values */ +#define Z90STAT_REQUESTQ_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x44, int) +#define Z90STAT_PENDINGQ_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x45, int) +#define Z90STAT_TOTALOPEN_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x46, int) +#define Z90STAT_DOMAIN_INDEX _IOR(ZCRYPT_IOCTL_MAGIC, 0x47, int) + +/* + * The ioctl number ranges 0x40 - 0x42 and 0x4b - 0x4e had been used in the + * past, don't assign new ioctls for these. + */ + +#endif /* __ASM_S390_ZCRYPT_H */ -- cgit v1.2.3