summaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/fpu.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:27 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:27 +0000
commit34996e42f82bfd60bc2c191e5cae3c6ab233ec6c (patch)
tree62db60558cbf089714b48daeabca82bf2b20b20e /arch/s390/kernel/fpu.c
parentAdding debian version 6.8.12-1. (diff)
downloadlinux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.tar.xz
linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.zip
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/s390/kernel/fpu.c')
-rw-r--r--arch/s390/kernel/fpu.c372
1 files changed, 151 insertions, 221 deletions
diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c
index a4f3449cc8..fa90bbdc5e 100644
--- a/arch/s390/kernel/fpu.c
+++ b/arch/s390/kernel/fpu.c
@@ -8,256 +8,186 @@
#include <linux/kernel.h>
#include <linux/cpu.h>
#include <linux/sched.h>
-#include <asm/fpu/types.h>
-#include <asm/fpu/api.h>
-#include <asm/vx-insn.h>
+#include <asm/fpu.h>
-void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
+void __kernel_fpu_begin(struct kernel_fpu *state, int flags)
{
+ __vector128 *vxrs = state->vxrs;
+ int mask;
+
/*
* Limit the save to the FPU/vector registers already
- * in use by the previous context
+ * in use by the previous context.
*/
- flags &= state->mask;
-
+ flags &= state->hdr.mask;
if (flags & KERNEL_FPC)
- /* Save floating point control */
- asm volatile("stfpc %0" : "=Q" (state->fpc));
-
+ fpu_stfpc(&state->hdr.fpc);
if (!cpu_has_vx()) {
- if (flags & KERNEL_VXR_V0V7) {
- /* Save floating-point registers */
- asm volatile("std 0,%0" : "=Q" (state->fprs[0]));
- asm volatile("std 1,%0" : "=Q" (state->fprs[1]));
- asm volatile("std 2,%0" : "=Q" (state->fprs[2]));
- asm volatile("std 3,%0" : "=Q" (state->fprs[3]));
- asm volatile("std 4,%0" : "=Q" (state->fprs[4]));
- asm volatile("std 5,%0" : "=Q" (state->fprs[5]));
- asm volatile("std 6,%0" : "=Q" (state->fprs[6]));
- asm volatile("std 7,%0" : "=Q" (state->fprs[7]));
- asm volatile("std 8,%0" : "=Q" (state->fprs[8]));
- asm volatile("std 9,%0" : "=Q" (state->fprs[9]));
- asm volatile("std 10,%0" : "=Q" (state->fprs[10]));
- asm volatile("std 11,%0" : "=Q" (state->fprs[11]));
- asm volatile("std 12,%0" : "=Q" (state->fprs[12]));
- asm volatile("std 13,%0" : "=Q" (state->fprs[13]));
- asm volatile("std 14,%0" : "=Q" (state->fprs[14]));
- asm volatile("std 15,%0" : "=Q" (state->fprs[15]));
- }
+ if (flags & KERNEL_VXR_LOW)
+ save_fp_regs_vx(vxrs);
return;
}
-
- /* Test and save vector registers */
- asm volatile (
- /*
- * Test if any vector register must be saved and, if so,
- * test if all register can be saved.
- */
- " la 1,%[vxrs]\n" /* load save area */
- " tmll %[m],30\n" /* KERNEL_VXR */
- " jz 7f\n" /* no work -> done */
- " jo 5f\n" /* -> save V0..V31 */
- /*
- * Test for special case KERNEL_FPU_MID only. In this
- * case a vstm V8..V23 is the best instruction
- */
- " chi %[m],12\n" /* KERNEL_VXR_MID */
- " jne 0f\n" /* -> save V8..V23 */
- " VSTM 8,23,128,1\n" /* vstm %v8,%v23,128(%r1) */
- " j 7f\n"
- /* Test and save the first half of 16 vector registers */
- "0: tmll %[m],6\n" /* KERNEL_VXR_LOW */
- " jz 3f\n" /* -> KERNEL_VXR_HIGH */
- " jo 2f\n" /* 11 -> save V0..V15 */
- " brc 2,1f\n" /* 10 -> save V8..V15 */
- " VSTM 0,7,0,1\n" /* vstm %v0,%v7,0(%r1) */
- " j 3f\n"
- "1: VSTM 8,15,128,1\n" /* vstm %v8,%v15,128(%r1) */
- " j 3f\n"
- "2: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */
- /* Test and save the second half of 16 vector registers */
- "3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */
- " jz 7f\n"
- " jo 6f\n" /* 11 -> save V16..V31 */
- " brc 2,4f\n" /* 10 -> save V24..V31 */
- " VSTM 16,23,256,1\n" /* vstm %v16,%v23,256(%r1) */
- " j 7f\n"
- "4: VSTM 24,31,384,1\n" /* vstm %v24,%v31,384(%r1) */
- " j 7f\n"
- "5: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */
- "6: VSTM 16,31,256,1\n" /* vstm %v16,%v31,256(%r1) */
- "7:"
- : [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
- : [m] "d" (flags)
- : "1", "cc");
+ mask = flags & KERNEL_VXR;
+ if (mask == KERNEL_VXR) {
+ vxrs += fpu_vstm(0, 15, vxrs);
+ vxrs += fpu_vstm(16, 31, vxrs);
+ return;
+ }
+ if (mask == KERNEL_VXR_MID) {
+ vxrs += fpu_vstm(8, 23, vxrs);
+ return;
+ }
+ mask = flags & KERNEL_VXR_LOW;
+ if (mask) {
+ if (mask == KERNEL_VXR_LOW)
+ vxrs += fpu_vstm(0, 15, vxrs);
+ else if (mask == KERNEL_VXR_V0V7)
+ vxrs += fpu_vstm(0, 7, vxrs);
+ else
+ vxrs += fpu_vstm(8, 15, vxrs);
+ }
+ mask = flags & KERNEL_VXR_HIGH;
+ if (mask) {
+ if (mask == KERNEL_VXR_HIGH)
+ vxrs += fpu_vstm(16, 31, vxrs);
+ else if (mask == KERNEL_VXR_V16V23)
+ vxrs += fpu_vstm(16, 23, vxrs);
+ else
+ vxrs += fpu_vstm(24, 31, vxrs);
+ }
}
EXPORT_SYMBOL(__kernel_fpu_begin);
-void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
+void __kernel_fpu_end(struct kernel_fpu *state, int flags)
{
+ __vector128 *vxrs = state->vxrs;
+ int mask;
+
/*
* Limit the restore to the FPU/vector registers of the
- * previous context that have been overwritte by the
- * current context
+ * previous context that have been overwritten by the
+ * current context.
*/
- flags &= state->mask;
-
+ flags &= state->hdr.mask;
if (flags & KERNEL_FPC)
- /* Restore floating-point controls */
- asm volatile("lfpc %0" : : "Q" (state->fpc));
-
+ fpu_lfpc(&state->hdr.fpc);
if (!cpu_has_vx()) {
- if (flags & KERNEL_VXR_V0V7) {
- /* Restore floating-point registers */
- asm volatile("ld 0,%0" : : "Q" (state->fprs[0]));
- asm volatile("ld 1,%0" : : "Q" (state->fprs[1]));
- asm volatile("ld 2,%0" : : "Q" (state->fprs[2]));
- asm volatile("ld 3,%0" : : "Q" (state->fprs[3]));
- asm volatile("ld 4,%0" : : "Q" (state->fprs[4]));
- asm volatile("ld 5,%0" : : "Q" (state->fprs[5]));
- asm volatile("ld 6,%0" : : "Q" (state->fprs[6]));
- asm volatile("ld 7,%0" : : "Q" (state->fprs[7]));
- asm volatile("ld 8,%0" : : "Q" (state->fprs[8]));
- asm volatile("ld 9,%0" : : "Q" (state->fprs[9]));
- asm volatile("ld 10,%0" : : "Q" (state->fprs[10]));
- asm volatile("ld 11,%0" : : "Q" (state->fprs[11]));
- asm volatile("ld 12,%0" : : "Q" (state->fprs[12]));
- asm volatile("ld 13,%0" : : "Q" (state->fprs[13]));
- asm volatile("ld 14,%0" : : "Q" (state->fprs[14]));
- asm volatile("ld 15,%0" : : "Q" (state->fprs[15]));
- }
+ if (flags & KERNEL_VXR_LOW)
+ load_fp_regs_vx(vxrs);
return;
}
-
- /* Test and restore (load) vector registers */
- asm volatile (
- /*
- * Test if any vector register must be loaded and, if so,
- * test if all registers can be loaded at once.
- */
- " la 1,%[vxrs]\n" /* load restore area */
- " tmll %[m],30\n" /* KERNEL_VXR */
- " jz 7f\n" /* no work -> done */
- " jo 5f\n" /* -> restore V0..V31 */
- /*
- * Test for special case KERNEL_FPU_MID only. In this
- * case a vlm V8..V23 is the best instruction
- */
- " chi %[m],12\n" /* KERNEL_VXR_MID */
- " jne 0f\n" /* -> restore V8..V23 */
- " VLM 8,23,128,1\n" /* vlm %v8,%v23,128(%r1) */
- " j 7f\n"
- /* Test and restore the first half of 16 vector registers */
- "0: tmll %[m],6\n" /* KERNEL_VXR_LOW */
- " jz 3f\n" /* -> KERNEL_VXR_HIGH */
- " jo 2f\n" /* 11 -> restore V0..V15 */
- " brc 2,1f\n" /* 10 -> restore V8..V15 */
- " VLM 0,7,0,1\n" /* vlm %v0,%v7,0(%r1) */
- " j 3f\n"
- "1: VLM 8,15,128,1\n" /* vlm %v8,%v15,128(%r1) */
- " j 3f\n"
- "2: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */
- /* Test and restore the second half of 16 vector registers */
- "3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */
- " jz 7f\n"
- " jo 6f\n" /* 11 -> restore V16..V31 */
- " brc 2,4f\n" /* 10 -> restore V24..V31 */
- " VLM 16,23,256,1\n" /* vlm %v16,%v23,256(%r1) */
- " j 7f\n"
- "4: VLM 24,31,384,1\n" /* vlm %v24,%v31,384(%r1) */
- " j 7f\n"
- "5: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */
- "6: VLM 16,31,256,1\n" /* vlm %v16,%v31,256(%r1) */
- "7:"
- : [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
- : [m] "d" (flags)
- : "1", "cc");
+ mask = flags & KERNEL_VXR;
+ if (mask == KERNEL_VXR) {
+ vxrs += fpu_vlm(0, 15, vxrs);
+ vxrs += fpu_vlm(16, 31, vxrs);
+ return;
+ }
+ if (mask == KERNEL_VXR_MID) {
+ vxrs += fpu_vlm(8, 23, vxrs);
+ return;
+ }
+ mask = flags & KERNEL_VXR_LOW;
+ if (mask) {
+ if (mask == KERNEL_VXR_LOW)
+ vxrs += fpu_vlm(0, 15, vxrs);
+ else if (mask == KERNEL_VXR_V0V7)
+ vxrs += fpu_vlm(0, 7, vxrs);
+ else
+ vxrs += fpu_vlm(8, 15, vxrs);
+ }
+ mask = flags & KERNEL_VXR_HIGH;
+ if (mask) {
+ if (mask == KERNEL_VXR_HIGH)
+ vxrs += fpu_vlm(16, 31, vxrs);
+ else if (mask == KERNEL_VXR_V16V23)
+ vxrs += fpu_vlm(16, 23, vxrs);
+ else
+ vxrs += fpu_vlm(24, 31, vxrs);
+ }
}
EXPORT_SYMBOL(__kernel_fpu_end);
-void __load_fpu_regs(void)
+void load_fpu_state(struct fpu *state, int flags)
{
- unsigned long *regs = current->thread.fpu.regs;
- struct fpu *state = &current->thread.fpu;
+ __vector128 *vxrs = &state->vxrs[0];
+ int mask;
- sfpc_safe(state->fpc);
- if (likely(cpu_has_vx())) {
- asm volatile("lgr 1,%0\n"
- "VLM 0,15,0,1\n"
- "VLM 16,31,256,1\n"
- :
- : "d" (regs)
- : "1", "cc", "memory");
- } else {
- asm volatile("ld 0,%0" : : "Q" (regs[0]));
- asm volatile("ld 1,%0" : : "Q" (regs[1]));
- asm volatile("ld 2,%0" : : "Q" (regs[2]));
- asm volatile("ld 3,%0" : : "Q" (regs[3]));
- asm volatile("ld 4,%0" : : "Q" (regs[4]));
- asm volatile("ld 5,%0" : : "Q" (regs[5]));
- asm volatile("ld 6,%0" : : "Q" (regs[6]));
- asm volatile("ld 7,%0" : : "Q" (regs[7]));
- asm volatile("ld 8,%0" : : "Q" (regs[8]));
- asm volatile("ld 9,%0" : : "Q" (regs[9]));
- asm volatile("ld 10,%0" : : "Q" (regs[10]));
- asm volatile("ld 11,%0" : : "Q" (regs[11]));
- asm volatile("ld 12,%0" : : "Q" (regs[12]));
- asm volatile("ld 13,%0" : : "Q" (regs[13]));
- asm volatile("ld 14,%0" : : "Q" (regs[14]));
- asm volatile("ld 15,%0" : : "Q" (regs[15]));
+ if (flags & KERNEL_FPC)
+ fpu_lfpc(&state->fpc);
+ if (!cpu_has_vx()) {
+ if (flags & KERNEL_VXR_V0V7)
+ load_fp_regs_vx(state->vxrs);
+ return;
+ }
+ mask = flags & KERNEL_VXR;
+ if (mask == KERNEL_VXR) {
+ fpu_vlm(0, 15, &vxrs[0]);
+ fpu_vlm(16, 31, &vxrs[16]);
+ return;
+ }
+ if (mask == KERNEL_VXR_MID) {
+ fpu_vlm(8, 23, &vxrs[8]);
+ return;
+ }
+ mask = flags & KERNEL_VXR_LOW;
+ if (mask) {
+ if (mask == KERNEL_VXR_LOW)
+ fpu_vlm(0, 15, &vxrs[0]);
+ else if (mask == KERNEL_VXR_V0V7)
+ fpu_vlm(0, 7, &vxrs[0]);
+ else
+ fpu_vlm(8, 15, &vxrs[8]);
+ }
+ mask = flags & KERNEL_VXR_HIGH;
+ if (mask) {
+ if (mask == KERNEL_VXR_HIGH)
+ fpu_vlm(16, 31, &vxrs[16]);
+ else if (mask == KERNEL_VXR_V16V23)
+ fpu_vlm(16, 23, &vxrs[16]);
+ else
+ fpu_vlm(24, 31, &vxrs[24]);
}
- clear_cpu_flag(CIF_FPU);
-}
-
-void load_fpu_regs(void)
-{
- raw_local_irq_disable();
- __load_fpu_regs();
- raw_local_irq_enable();
}
-EXPORT_SYMBOL(load_fpu_regs);
-void save_fpu_regs(void)
+void save_fpu_state(struct fpu *state, int flags)
{
- unsigned long flags, *regs;
- struct fpu *state;
-
- local_irq_save(flags);
+ __vector128 *vxrs = &state->vxrs[0];
+ int mask;
- if (test_cpu_flag(CIF_FPU))
- goto out;
-
- state = &current->thread.fpu;
- regs = current->thread.fpu.regs;
-
- asm volatile("stfpc %0" : "=Q" (state->fpc));
- if (likely(cpu_has_vx())) {
- asm volatile("lgr 1,%0\n"
- "VSTM 0,15,0,1\n"
- "VSTM 16,31,256,1\n"
- :
- : "d" (regs)
- : "1", "cc", "memory");
- } else {
- asm volatile("std 0,%0" : "=Q" (regs[0]));
- asm volatile("std 1,%0" : "=Q" (regs[1]));
- asm volatile("std 2,%0" : "=Q" (regs[2]));
- asm volatile("std 3,%0" : "=Q" (regs[3]));
- asm volatile("std 4,%0" : "=Q" (regs[4]));
- asm volatile("std 5,%0" : "=Q" (regs[5]));
- asm volatile("std 6,%0" : "=Q" (regs[6]));
- asm volatile("std 7,%0" : "=Q" (regs[7]));
- asm volatile("std 8,%0" : "=Q" (regs[8]));
- asm volatile("std 9,%0" : "=Q" (regs[9]));
- asm volatile("std 10,%0" : "=Q" (regs[10]));
- asm volatile("std 11,%0" : "=Q" (regs[11]));
- asm volatile("std 12,%0" : "=Q" (regs[12]));
- asm volatile("std 13,%0" : "=Q" (regs[13]));
- asm volatile("std 14,%0" : "=Q" (regs[14]));
- asm volatile("std 15,%0" : "=Q" (regs[15]));
+ if (flags & KERNEL_FPC)
+ fpu_stfpc(&state->fpc);
+ if (!cpu_has_vx()) {
+ if (flags & KERNEL_VXR_LOW)
+ save_fp_regs_vx(state->vxrs);
+ return;
+ }
+ mask = flags & KERNEL_VXR;
+ if (mask == KERNEL_VXR) {
+ fpu_vstm(0, 15, &vxrs[0]);
+ fpu_vstm(16, 31, &vxrs[16]);
+ return;
+ }
+ if (mask == KERNEL_VXR_MID) {
+ fpu_vstm(8, 23, &vxrs[8]);
+ return;
+ }
+ mask = flags & KERNEL_VXR_LOW;
+ if (mask) {
+ if (mask == KERNEL_VXR_LOW)
+ fpu_vstm(0, 15, &vxrs[0]);
+ else if (mask == KERNEL_VXR_V0V7)
+ fpu_vstm(0, 7, &vxrs[0]);
+ else
+ fpu_vstm(8, 15, &vxrs[8]);
+ }
+ mask = flags & KERNEL_VXR_HIGH;
+ if (mask) {
+ if (mask == KERNEL_VXR_HIGH)
+ fpu_vstm(16, 31, &vxrs[16]);
+ else if (mask == KERNEL_VXR_V16V23)
+ fpu_vstm(16, 23, &vxrs[16]);
+ else
+ fpu_vstm(24, 31, &vxrs[24]);
}
- set_cpu_flag(CIF_FPU);
-out:
- local_irq_restore(flags);
}
-EXPORT_SYMBOL(save_fpu_regs);
+EXPORT_SYMBOL(save_fpu_state);