提交 b3ecd4aa 编写于 作者: D David Hildenbrand 提交者: Christian Borntraeger

KVM: s390: vsie: use READ_ONCE to access some SCB fields

Another VCPU might try to modify the SCB while we are creating the
shadow SCB. In general this is no problem - unless the compiler decides
to not load values once, but e.g. twice.

For us, this is only relevant when checking/working with such values.
E.g. the prefix value, the mso, state of transactional execution and
addresses of satellite blocks.

E.g. if we blindly forward values (e.g. general purpose registers or
execution controls after masking), we don't care.

Leaving unpin_blocks() untouched for now, will handle it separately.

The worst thing right now that I can see would be a missed prefix
un/remap (mso, prefix, tx) or using wrong guest addresses. Nothing
critical, but let's try to avoid unpredictable behavior.
Signed-off-by: NDavid Hildenbrand <david@redhat.com>
Message-Id: <20180116171526.12343-2-david@redhat.com>
Reviewed-by: NChristian Borntraeger <borntraeger@de.ibm.com>
Acked-by: NCornelia Huck <cohuck@redhat.com>
Signed-off-by: NChristian Borntraeger <borntraeger@de.ibm.com>
上级 c0b4bd21
...@@ -28,7 +28,11 @@ struct vsie_page { ...@@ -28,7 +28,11 @@ struct vsie_page {
* the same offset as that in struct sie_page! * the same offset as that in struct sie_page!
*/ */
struct mcck_volatile_info mcck_info; /* 0x0200 */ struct mcck_volatile_info mcck_info; /* 0x0200 */
/* the pinned originial scb */ /*
* The pinned original scb. Be aware that other VCPUs can modify
* it while we read from it. Values that are used for conditions or
* are reused conditionally, should be accessed via READ_ONCE.
*/
struct kvm_s390_sie_block *scb_o; /* 0x0218 */ struct kvm_s390_sie_block *scb_o; /* 0x0218 */
/* the shadow gmap in use by the vsie_page */ /* the shadow gmap in use by the vsie_page */
struct gmap *gmap; /* 0x0220 */ struct gmap *gmap; /* 0x0220 */
...@@ -140,12 +144,13 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -140,12 +144,13 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{ {
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
u32 crycb_addr = scb_o->crycbd & 0x7ffffff8U; const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
unsigned long *b1, *b2; unsigned long *b1, *b2;
u8 ecb3_flags; u8 ecb3_flags;
scb_s->crycbd = 0; scb_s->crycbd = 0;
if (!(scb_o->crycbd & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1)) if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1))
return 0; return 0;
/* format-1 is supported with message-security-assist extension 3 */ /* format-1 is supported with message-security-assist extension 3 */
if (!test_kvm_facility(vcpu->kvm, 76)) if (!test_kvm_facility(vcpu->kvm, 76))
...@@ -183,12 +188,15 @@ static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -183,12 +188,15 @@ static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{ {
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
/* READ_ONCE does not work on bitfields - use a temporary variable */
const uint32_t __new_ibc = scb_o->ibc;
const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
__u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU; __u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
scb_s->ibc = 0; scb_s->ibc = 0;
/* ibc installed in g2 and requested for g3 */ /* ibc installed in g2 and requested for g3 */
if (vcpu->kvm->arch.model.ibc && (scb_o->ibc & 0x0fffU)) { if (vcpu->kvm->arch.model.ibc && new_ibc) {
scb_s->ibc = scb_o->ibc & 0x0fffU; scb_s->ibc = new_ibc;
/* takte care of the minimum ibc level of the machine */ /* takte care of the minimum ibc level of the machine */
if (scb_s->ibc < min_ibc) if (scb_s->ibc < min_ibc)
scb_s->ibc = min_ibc; scb_s->ibc = min_ibc;
...@@ -253,6 +261,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -253,6 +261,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{ {
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
/* READ_ONCE does not work on bitfields - use a temporary variable */
const uint32_t __new_prefix = scb_o->prefix;
const uint32_t new_prefix = READ_ONCE(__new_prefix);
const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
bool had_tx = scb_s->ecb & ECB_TE; bool had_tx = scb_s->ecb & ECB_TE;
unsigned long new_mso = 0; unsigned long new_mso = 0;
int rc; int rc;
...@@ -299,14 +311,14 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -299,14 +311,14 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->icpua = scb_o->icpua; scb_s->icpua = scb_o->icpua;
if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM)) if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
new_mso = scb_o->mso & 0xfffffffffff00000UL; new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
/* if the hva of the prefix changes, we have to remap the prefix */ /* if the hva of the prefix changes, we have to remap the prefix */
if (scb_s->mso != new_mso || scb_s->prefix != scb_o->prefix) if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
prefix_unmapped(vsie_page); prefix_unmapped(vsie_page);
/* SIE will do mso/msl validity and exception checks for us */ /* SIE will do mso/msl validity and exception checks for us */
scb_s->msl = scb_o->msl & 0xfffffffffff00000UL; scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
scb_s->mso = new_mso; scb_s->mso = new_mso;
scb_s->prefix = scb_o->prefix; scb_s->prefix = new_prefix;
/* We have to definetly flush the tlb if this scb never ran */ /* We have to definetly flush the tlb if this scb never ran */
if (scb_s->ihcpu != 0xffffU) if (scb_s->ihcpu != 0xffffU)
...@@ -318,11 +330,11 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -318,11 +330,11 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP)) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT; scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
/* transactional execution */ /* transactional execution */
if (test_kvm_facility(vcpu->kvm, 73)) { if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
/* remap the prefix is tx is toggled on */ /* remap the prefix is tx is toggled on */
if ((scb_o->ecb & ECB_TE) && !had_tx) if (!had_tx)
prefix_unmapped(vsie_page); prefix_unmapped(vsie_page);
scb_s->ecb |= scb_o->ecb & ECB_TE; scb_s->ecb |= ECB_TE;
} }
/* SIMD */ /* SIMD */
if (test_kvm_facility(vcpu->kvm, 129)) { if (test_kvm_facility(vcpu->kvm, 129)) {
...@@ -529,9 +541,9 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -529,9 +541,9 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
gpa_t gpa; gpa_t gpa;
int rc = 0; int rc = 0;
gpa = scb_o->scaol & ~0xfUL; gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO)) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
gpa |= (u64) scb_o->scaoh << 32; gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
if (gpa) { if (gpa) {
if (!(gpa & ~0x1fffUL)) if (!(gpa & ~0x1fffUL))
rc = set_validity_icpt(scb_s, 0x0038U); rc = set_validity_icpt(scb_s, 0x0038U);
...@@ -551,7 +563,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -551,7 +563,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->scaol = (u32)(u64)hpa; scb_s->scaol = (u32)(u64)hpa;
} }
gpa = scb_o->itdba & ~0xffUL; gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
if (gpa && (scb_s->ecb & ECB_TE)) { if (gpa && (scb_s->ecb & ECB_TE)) {
if (!(gpa & ~0x1fffU)) { if (!(gpa & ~0x1fffU)) {
rc = set_validity_icpt(scb_s, 0x0080U); rc = set_validity_icpt(scb_s, 0x0080U);
...@@ -566,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -566,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->itdba = hpa; scb_s->itdba = hpa;
} }
gpa = scb_o->gvrd & ~0x1ffUL; gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
if (!(gpa & ~0x1fffUL)) { if (!(gpa & ~0x1fffUL)) {
rc = set_validity_icpt(scb_s, 0x1310U); rc = set_validity_icpt(scb_s, 0x1310U);
...@@ -584,7 +596,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -584,7 +596,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->gvrd = hpa; scb_s->gvrd = hpa;
} }
gpa = scb_o->riccbd & ~0x3fUL; gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
if (gpa && (scb_s->ecb3 & ECB3_RI)) { if (gpa && (scb_s->ecb3 & ECB3_RI)) {
if (!(gpa & ~0x1fffUL)) { if (!(gpa & ~0x1fffUL)) {
rc = set_validity_icpt(scb_s, 0x0043U); rc = set_validity_icpt(scb_s, 0x0043U);
...@@ -602,8 +614,8 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -602,8 +614,8 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
unsigned long sdnxc; unsigned long sdnxc;
gpa = scb_o->sdnxo & ~0xfUL; gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
sdnxc = scb_o->sdnxo & 0xfUL; sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
if (!gpa || !(gpa & ~0x1fffUL)) { if (!gpa || !(gpa & ~0x1fffUL)) {
rc = set_validity_icpt(scb_s, 0x10b0U); rc = set_validity_icpt(scb_s, 0x10b0U);
goto unpin; goto unpin;
...@@ -768,7 +780,7 @@ static void retry_vsie_icpt(struct vsie_page *vsie_page) ...@@ -768,7 +780,7 @@ static void retry_vsie_icpt(struct vsie_page *vsie_page)
static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{ {
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
__u32 fac = vsie_page->scb_o->fac & 0x7ffffff8U; __u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U;
if (fac && test_kvm_facility(vcpu->kvm, 7)) { if (fac && test_kvm_facility(vcpu->kvm, 7)) {
retry_vsie_icpt(vsie_page); retry_vsie_icpt(vsie_page);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册