summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/powerpc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/powerpc.c')
-rw-r--r--arch/powerpc/kvm/powerpc.c76
1 files changed, 38 insertions, 38 deletions
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 7197c82566..f6af752698 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -934,11 +934,11 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
return;
if (index >= 32) {
- val.vval = VCPU_VSX_VR(vcpu, index - 32);
+ kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsxval[offset] = gpr;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
- VCPU_VSX_FPR(vcpu, index, offset) = gpr;
+ kvmppc_set_vsx_fpr(vcpu, index, offset, gpr);
}
}
@@ -949,13 +949,13 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
if (index >= 32) {
- val.vval = VCPU_VSX_VR(vcpu, index - 32);
+ kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsxval[0] = gpr;
val.vsxval[1] = gpr;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
- VCPU_VSX_FPR(vcpu, index, 0) = gpr;
- VCPU_VSX_FPR(vcpu, index, 1) = gpr;
+ kvmppc_set_vsx_fpr(vcpu, index, 0, gpr);
+ kvmppc_set_vsx_fpr(vcpu, index, 1, gpr);
}
}
@@ -970,12 +970,12 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
val.vsx32val[1] = gpr;
val.vsx32val[2] = gpr;
val.vsx32val[3] = gpr;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
val.vsx32val[0] = gpr;
val.vsx32val[1] = gpr;
- VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
- VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
+ kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]);
+ kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]);
}
}
@@ -991,15 +991,15 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
return;
if (index >= 32) {
- val.vval = VCPU_VSX_VR(vcpu, index - 32);
+ kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsx32val[offset] = gpr32;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
dword_offset = offset / 2;
word_offset = offset % 2;
- val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
+ val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset);
val.vsx32val[word_offset] = gpr32;
- VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
+ kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]);
}
}
#endif /* CONFIG_VSX */
@@ -1058,9 +1058,9 @@ static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsxval[offset] = gpr;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
@@ -1074,9 +1074,9 @@ static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx32val[offset] = gpr32;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
@@ -1090,9 +1090,9 @@ static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx16val[offset] = gpr16;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
@@ -1106,9 +1106,9 @@ static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx8val[offset] = gpr8;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
#endif /* CONFIG_ALTIVEC */
@@ -1194,14 +1194,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
- VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
+ kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
break;
#ifdef CONFIG_PPC_BOOK3S
case KVM_MMIO_REG_QPR:
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
case KVM_MMIO_REG_FQPR:
- VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
+ kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
#endif
@@ -1419,9 +1419,9 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
}
if (rs < 32) {
- *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
+ *val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset);
} else {
- reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
+ kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
*val = reg.vsxval[vsx_offset];
}
break;
@@ -1438,10 +1438,10 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
if (rs < 32) {
dword_offset = vsx_offset / 2;
word_offset = vsx_offset % 2;
- reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
+ reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset);
*val = reg.vsx32val[word_offset];
} else {
- reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
+ kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
*val = reg.vsx32val[vsx_offset];
}
break;
@@ -1556,7 +1556,7 @@ static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsxval[vmx_offset];
return result;
@@ -1574,7 +1574,7 @@ static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx32val[vmx_offset];
return result;
@@ -1592,7 +1592,7 @@ static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx16val[vmx_offset];
return result;
@@ -1610,7 +1610,7 @@ static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx8val[vmx_offset];
return result;
@@ -1719,17 +1719,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
- val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
+ kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
+ val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu));
break;
case KVM_REG_PPC_VRSAVE:
- val = get_reg_val(reg->id, vcpu->arch.vrsave);
+ val = get_reg_val(reg->id, kvmppc_get_vrsave(vcpu));
break;
#endif /* CONFIG_ALTIVEC */
default:
@@ -1770,21 +1770,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
- vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+ kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
+ kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val));
break;
case KVM_REG_PPC_VRSAVE:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- vcpu->arch.vrsave = set_reg_val(reg->id, val);
+ kvmppc_set_vrsave(vcpu, set_reg_val(reg->id, val));
break;
#endif /* CONFIG_ALTIVEC */
default: