提交 92c96321 编写于 作者: D David Hildenbrand 提交者: Christian Borntraeger

KVM: s390: gaccess: introduce access modes

We will need special handling when fetching instructions, so let's
introduce new guest access modes GACC_FETCH and GACC_STORE instead
of a write flag. An additional patch will then introduce GACC_IFETCH.
Reviewed-by: NChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: NDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: NChristian Borntraeger <borntraeger@de.ibm.com>
上级 634790b8
...@@ -373,7 +373,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu) ...@@ -373,7 +373,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
} }
static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
int write) enum gacc_mode mode)
{ {
union alet alet; union alet alet;
struct ale ale; struct ale ale;
...@@ -454,7 +454,7 @@ static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, ...@@ -454,7 +454,7 @@ static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
} }
} }
if (ale.fo == 1 && write) if (ale.fo == 1 && mode == GACC_STORE)
return PGM_PROTECTION; return PGM_PROTECTION;
asce->val = aste.asce; asce->val = aste.asce;
...@@ -477,7 +477,7 @@ enum { ...@@ -477,7 +477,7 @@ enum {
}; };
static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
ar_t ar, int write) ar_t ar, enum gacc_mode mode)
{ {
int rc; int rc;
psw_t *psw = &vcpu->arch.sie_block->gpsw; psw_t *psw = &vcpu->arch.sie_block->gpsw;
...@@ -486,7 +486,7 @@ static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, ...@@ -486,7 +486,7 @@ static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
memset(pgm, 0, sizeof(*pgm)); memset(pgm, 0, sizeof(*pgm));
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; tec_bits->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
tec_bits->as = psw_bits(*psw).as; tec_bits->as = psw_bits(*psw).as;
if (!psw_bits(*psw).t) { if (!psw_bits(*psw).t) {
...@@ -506,7 +506,7 @@ static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, ...@@ -506,7 +506,7 @@ static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
asce->val = vcpu->arch.sie_block->gcr[13]; asce->val = vcpu->arch.sie_block->gcr[13];
return 0; return 0;
case PSW_AS_ACCREG: case PSW_AS_ACCREG:
rc = ar_translation(vcpu, asce, ar, write); rc = ar_translation(vcpu, asce, ar, mode);
switch (rc) { switch (rc) {
case PGM_ALEN_TRANSLATION: case PGM_ALEN_TRANSLATION:
case PGM_ALE_SEQUENCE: case PGM_ALE_SEQUENCE:
...@@ -538,7 +538,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) ...@@ -538,7 +538,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
* @gva: guest virtual address * @gva: guest virtual address
* @gpa: points to where guest physical (absolute) address should be stored * @gpa: points to where guest physical (absolute) address should be stored
* @asce: effective asce * @asce: effective asce
* @write: indicates if access is a write access * @mode: indicates the access mode to be used
* *
* Translate a guest virtual address into a guest absolute address by means * Translate a guest virtual address into a guest absolute address by means
* of dynamic address translation as specified by the architecture. * of dynamic address translation as specified by the architecture.
...@@ -554,7 +554,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) ...@@ -554,7 +554,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
*/ */
static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
unsigned long *gpa, const union asce asce, unsigned long *gpa, const union asce asce,
int write) enum gacc_mode mode)
{ {
union vaddress vaddr = {.addr = gva}; union vaddress vaddr = {.addr = gva};
union raddress raddr = {.addr = gva}; union raddress raddr = {.addr = gva};
...@@ -699,7 +699,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, ...@@ -699,7 +699,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
real_address: real_address:
raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr); raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
absolute_address: absolute_address:
if (write && dat_protection) if (mode == GACC_STORE && dat_protection)
return PGM_PROTECTION; return PGM_PROTECTION;
if (kvm_is_error_gpa(vcpu->kvm, raddr.addr)) if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
return PGM_ADDRESSING; return PGM_ADDRESSING;
...@@ -728,7 +728,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu, ...@@ -728,7 +728,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
unsigned long *pages, unsigned long nr_pages, unsigned long *pages, unsigned long nr_pages,
const union asce asce, int write) const union asce asce, enum gacc_mode mode)
{ {
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw; psw_t *psw = &vcpu->arch.sie_block->gpsw;
...@@ -740,13 +740,13 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ...@@ -740,13 +740,13 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
while (nr_pages) { while (nr_pages) {
ga = kvm_s390_logical_to_effective(vcpu, ga); ga = kvm_s390_logical_to_effective(vcpu, ga);
tec_bits->addr = ga >> PAGE_SHIFT; tec_bits->addr = ga >> PAGE_SHIFT;
if (write && lap_enabled && is_low_address(ga)) { if (mode == GACC_STORE && lap_enabled && is_low_address(ga)) {
pgm->code = PGM_PROTECTION; pgm->code = PGM_PROTECTION;
return pgm->code; return pgm->code;
} }
ga &= PAGE_MASK; ga &= PAGE_MASK;
if (psw_bits(*psw).t) { if (psw_bits(*psw).t) {
rc = guest_translate(vcpu, ga, pages, asce, write); rc = guest_translate(vcpu, ga, pages, asce, mode);
if (rc < 0) if (rc < 0)
return rc; return rc;
if (rc == PGM_PROTECTION) if (rc == PGM_PROTECTION)
...@@ -768,7 +768,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ...@@ -768,7 +768,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
} }
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len, int write) unsigned long len, enum gacc_mode mode)
{ {
psw_t *psw = &vcpu->arch.sie_block->gpsw; psw_t *psw = &vcpu->arch.sie_block->gpsw;
unsigned long _len, nr_pages, gpa, idx; unsigned long _len, nr_pages, gpa, idx;
...@@ -780,7 +780,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, ...@@ -780,7 +780,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
if (!len) if (!len)
return 0; return 0;
rc = get_vcpu_asce(vcpu, &asce, ar, write); rc = get_vcpu_asce(vcpu, &asce, ar, mode);
if (rc) if (rc)
return rc; return rc;
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
...@@ -792,11 +792,11 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, ...@@ -792,11 +792,11 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
need_ipte_lock = psw_bits(*psw).t && !asce.r; need_ipte_lock = psw_bits(*psw).t && !asce.r;
if (need_ipte_lock) if (need_ipte_lock)
ipte_lock(vcpu); ipte_lock(vcpu);
rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write); rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, mode);
for (idx = 0; idx < nr_pages && !rc; idx++) { for (idx = 0; idx < nr_pages && !rc; idx++) {
gpa = *(pages + idx) + (ga & ~PAGE_MASK); gpa = *(pages + idx) + (ga & ~PAGE_MASK);
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
if (write) if (mode == GACC_STORE)
rc = kvm_write_guest(vcpu->kvm, gpa, data, _len); rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
else else
rc = kvm_read_guest(vcpu->kvm, gpa, data, _len); rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
...@@ -812,7 +812,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, ...@@ -812,7 +812,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
} }
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
void *data, unsigned long len, int write) void *data, unsigned long len, enum gacc_mode mode)
{ {
unsigned long _len, gpa; unsigned long _len, gpa;
int rc = 0; int rc = 0;
...@@ -820,7 +820,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, ...@@ -820,7 +820,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
while (len && !rc) { while (len && !rc) {
gpa = kvm_s390_real_to_abs(vcpu, gra); gpa = kvm_s390_real_to_abs(vcpu, gra);
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
if (write) if (mode)
rc = write_guest_abs(vcpu, gpa, data, _len); rc = write_guest_abs(vcpu, gpa, data, _len);
else else
rc = read_guest_abs(vcpu, gpa, data, _len); rc = read_guest_abs(vcpu, gpa, data, _len);
...@@ -841,7 +841,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, ...@@ -841,7 +841,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* has to take care of this. * has to take care of this.
*/ */
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
unsigned long *gpa, int write) unsigned long *gpa, enum gacc_mode mode)
{ {
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw; psw_t *psw = &vcpu->arch.sie_block->gpsw;
...@@ -851,19 +851,19 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, ...@@ -851,19 +851,19 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
gva = kvm_s390_logical_to_effective(vcpu, gva); gva = kvm_s390_logical_to_effective(vcpu, gva);
tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
rc = get_vcpu_asce(vcpu, &asce, ar, write); rc = get_vcpu_asce(vcpu, &asce, ar, mode);
tec->addr = gva >> PAGE_SHIFT; tec->addr = gva >> PAGE_SHIFT;
if (rc) if (rc)
return rc; return rc;
if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
if (write) { if (mode == GACC_STORE) {
rc = pgm->code = PGM_PROTECTION; rc = pgm->code = PGM_PROTECTION;
return rc; return rc;
} }
} }
if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
rc = guest_translate(vcpu, gva, gpa, asce, write); rc = guest_translate(vcpu, gva, gpa, asce, mode);
if (rc > 0) { if (rc > 0) {
if (rc == PGM_PROTECTION) if (rc == PGM_PROTECTION)
tec->b61 = 1; tec->b61 = 1;
...@@ -883,7 +883,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, ...@@ -883,7 +883,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
* check_gva_range - test a range of guest virtual addresses for accessibility * check_gva_range - test a range of guest virtual addresses for accessibility
*/ */
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
unsigned long length, int is_write) unsigned long length, enum gacc_mode mode)
{ {
unsigned long gpa; unsigned long gpa;
unsigned long currlen; unsigned long currlen;
...@@ -892,7 +892,7 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, ...@@ -892,7 +892,7 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
ipte_lock(vcpu); ipte_lock(vcpu);
while (length > 0 && !rc) { while (length > 0 && !rc) {
currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE)); currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write); rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
gva += currlen; gva += currlen;
length -= currlen; length -= currlen;
} }
......
...@@ -155,16 +155,21 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, ...@@ -155,16 +155,21 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
return kvm_read_guest(vcpu->kvm, gpa, data, len); return kvm_read_guest(vcpu->kvm, gpa, data, len);
} }
enum gacc_mode {
GACC_FETCH,
GACC_STORE,
};
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
ar_t ar, unsigned long *gpa, int write); ar_t ar, unsigned long *gpa, enum gacc_mode mode);
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
unsigned long length, int is_write); unsigned long length, enum gacc_mode mode);
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len, int write); unsigned long len, enum gacc_mode mode);
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
void *data, unsigned long len, int write); void *data, unsigned long len, enum gacc_mode mode);
/** /**
* write_guest - copy data from kernel space to guest space * write_guest - copy data from kernel space to guest space
...@@ -215,7 +220,7 @@ static inline __must_check ...@@ -215,7 +220,7 @@ static inline __must_check
int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len) unsigned long len)
{ {
return access_guest(vcpu, ga, ar, data, len, 1); return access_guest(vcpu, ga, ar, data, len, GACC_STORE);
} }
/** /**
...@@ -235,7 +240,7 @@ static inline __must_check ...@@ -235,7 +240,7 @@ static inline __must_check
int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len) unsigned long len)
{ {
return access_guest(vcpu, ga, ar, data, len, 0); return access_guest(vcpu, ga, ar, data, len, GACC_FETCH);
} }
/** /**
......
...@@ -317,7 +317,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) ...@@ -317,7 +317,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
/* Make sure that the source is paged-in */ /* Make sure that the source is paged-in */
rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2], rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
reg2, &srcaddr, 0); reg2, &srcaddr, GACC_FETCH);
if (rc) if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc); return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
...@@ -326,7 +326,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) ...@@ -326,7 +326,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
/* Make sure that the destination is paged-in */ /* Make sure that the destination is paged-in */
rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1], rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
reg1, &dstaddr, 1); reg1, &dstaddr, GACC_STORE);
if (rc) if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc); return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
......
...@@ -2610,7 +2610,8 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, ...@@ -2610,7 +2610,8 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
switch (mop->op) { switch (mop->op) {
case KVM_S390_MEMOP_LOGICAL_READ: case KVM_S390_MEMOP_LOGICAL_READ:
if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false); r = check_gva_range(vcpu, mop->gaddr, mop->ar,
mop->size, GACC_FETCH);
break; break;
} }
r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
...@@ -2621,7 +2622,8 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, ...@@ -2621,7 +2622,8 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
break; break;
case KVM_S390_MEMOP_LOGICAL_WRITE: case KVM_S390_MEMOP_LOGICAL_WRITE:
if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true); r = check_gva_range(vcpu, mop->gaddr, mop->ar,
mop->size, GACC_STORE);
break; break;
} }
if (copy_from_user(tmpbuf, uaddr, mop->size)) { if (copy_from_user(tmpbuf, uaddr, mop->size)) {
......
...@@ -981,11 +981,12 @@ static int handle_tprot(struct kvm_vcpu *vcpu) ...@@ -981,11 +981,12 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
ipte_lock(vcpu); ipte_lock(vcpu);
ret = guest_translate_address(vcpu, address1, ar, &gpa, 1); ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
if (ret == PGM_PROTECTION) { if (ret == PGM_PROTECTION) {
/* Write protected? Try again with read-only... */ /* Write protected? Try again with read-only... */
cc = 1; cc = 1;
ret = guest_translate_address(vcpu, address1, ar, &gpa, 0); ret = guest_translate_address(vcpu, address1, ar, &gpa,
GACC_FETCH);
} }
if (ret) { if (ret) {
if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册