diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index 2e4c3b222a96253c1ff8a8b5d6dd529ad5daf82c..752cf47a81abf3a68f871a604fbb5e17b5556570 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -109,8 +109,10 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val); struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level); -int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t); -int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t); +int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, + int fake); +int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, + int fake); int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, int fake); int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index f6d556dfafcddba89e1b89a27f013ddea1c0ad02..54200208bf24fe73f0fdb8c7b1aac639e6160e86 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -971,9 +971,13 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, vaddr.addr = saddr; asce.val = sg->orig_asce; ptr = asce.origin * 4096; + if (asce.r) { + *fake = 1; + asce.dt = ASCE_TYPE_REGION1; + } switch (asce.dt) { case ASCE_TYPE_REGION1: - if (vaddr.rfx01 > asce.tl) + if (vaddr.rfx01 > asce.tl && !asce.r) return PGM_REGION_FIRST_TRANS; break; case ASCE_TYPE_REGION2: @@ -1000,6 +1004,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, case ASCE_TYPE_REGION1: { union region1_table_entry rfte; + if (*fake) { + /* offset in 16EB guest memory block */ + ptr = ptr + ((unsigned long) vaddr.rsx << 53UL); + rfte.val = ptr; + goto shadow_r2t; + } rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val); if (rc) return rc; @@ -1011,15 +1021,22 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, return PGM_REGION_SECOND_TRANS; if (sg->edat_level >= 1) *dat_protection |= rfte.p; - rc = gmap_shadow_r2t(sg, saddr, rfte.val); + ptr = rfte.rto << 12UL; +shadow_r2t: + rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake); if (rc) return rc; - ptr = rfte.rto * 4096; /* fallthrough */ } case ASCE_TYPE_REGION2: { union region2_table_entry rste; + if (*fake) { + /* offset in 8PB guest memory block */ + ptr = ptr + ((unsigned long) vaddr.rtx << 42UL); + rste.val = ptr; + goto shadow_r3t; + } rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val); if (rc) return rc; @@ -1031,16 +1048,23 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, return PGM_REGION_THIRD_TRANS; if (sg->edat_level >= 1) *dat_protection |= rste.p; + ptr = rste.rto << 12UL; +shadow_r3t: rste.p |= *dat_protection; - rc = gmap_shadow_r3t(sg, saddr, rste.val); + rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake); if (rc) return rc; - ptr = rste.rto * 4096; /* fallthrough */ } case ASCE_TYPE_REGION3: { union region3_table_entry rtte; + if (*fake) { + /* offset in 4TB guest memory block */ + ptr = ptr + ((unsigned long) vaddr.sx << 31UL); + rtte.val = ptr; + goto shadow_sgt; + } rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val); if (rc) return rc; diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index c96bf30245c0b3488f9c2c6538f64c1ce563ea36..c07d64f5cdb5a50bfdc13bcaa748e1b77e3e2100 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -1437,6 +1437,8 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, return sg; /* Create a new shadow gmap */ limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11)); + if (asce & _ASCE_REAL_SPACE) + limit = -1UL; new = gmap_alloc(limit); if (!new) return ERR_PTR(-ENOMEM); @@ -1455,6 +1457,12 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, } atomic_set(&new->ref_count, 2); list_add(&new->list, &parent->children); + if (asce & _ASCE_REAL_SPACE) { + /* nothing to protect, return right away */ + new->initialized = true; + spin_unlock(&parent->shadow_lock); + return new; + } spin_unlock(&parent->shadow_lock); /* protect after insertion, so it will get properly invalidated */ down_read(&parent->mm->mmap_sem); @@ -1479,6 +1487,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow); * @sg: pointer to the shadow guest address space structure * @saddr: faulting address in the shadow gmap * @r2t: parent gmap address of the region 2 table to get shadowed + * @fake: r2t references contiguous guest memory block, not a r2t * * The r2t parameter specifies the address of the source table. The * four pages of the source table are made read-only in the parent gmap @@ -1491,7 +1500,8 @@ EXPORT_SYMBOL_GPL(gmap_shadow); * * Called with sg->mm->mmap_sem in read. */ -int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t) +int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, + int fake) { unsigned long raddr, origin, offset, len; unsigned long *s_r2t, *table; @@ -1504,6 +1514,8 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t) if (!page) return -ENOMEM; page->index = r2t & _REGION_ENTRY_ORIGIN; + if (fake) + page->index |= GMAP_SHADOW_FAKE_TABLE; s_r2t = (unsigned long *) page_to_phys(page); /* Install shadow region second table */ spin_lock(&sg->guest_table_lock); @@ -1526,6 +1538,12 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t) if (sg->edat_level >= 1) *table |= (r2t & _REGION_ENTRY_PROTECT); list_add(&page->lru, &sg->crst_list); + if (fake) { + /* nothing to protect for fake tables */ + *table &= ~_REGION_ENTRY_INVALID; + spin_unlock(&sg->guest_table_lock); + return 0; + } spin_unlock(&sg->guest_table_lock); /* Make r2t read-only in parent gmap page table */ raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1; @@ -1558,6 +1576,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_r2t); * @sg: pointer to the shadow guest address space structure * @saddr: faulting address in the shadow gmap * @r3t: parent gmap address of the region 3 table to get shadowed + * @fake: r3t references contiguous guest memory block, not a r3t * * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the * shadow table structure is incomplete, -ENOMEM if out of memory and @@ -1565,7 +1584,8 @@ EXPORT_SYMBOL_GPL(gmap_shadow_r2t); * * Called with sg->mm->mmap_sem in read. */ -int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t) +int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, + int fake) { unsigned long raddr, origin, offset, len; unsigned long *s_r3t, *table; @@ -1578,6 +1598,8 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t) if (!page) return -ENOMEM; page->index = r3t & _REGION_ENTRY_ORIGIN; + if (fake) + page->index |= GMAP_SHADOW_FAKE_TABLE; s_r3t = (unsigned long *) page_to_phys(page); /* Install shadow region second table */ spin_lock(&sg->guest_table_lock); @@ -1599,6 +1621,12 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t) if (sg->edat_level >= 1) *table |= (r3t & _REGION_ENTRY_PROTECT); list_add(&page->lru, &sg->crst_list); + if (fake) { + /* nothing to protect for fake tables */ + *table &= ~_REGION_ENTRY_INVALID; + spin_unlock(&sg->guest_table_lock); + return 0; + } spin_unlock(&sg->guest_table_lock); /* Make r3t read-only in parent gmap page table */ raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2; @@ -1932,7 +1960,8 @@ static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr, /* Check for top level table */ start = sg->orig_asce & _ASCE_ORIGIN; end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096; - if (gaddr >= start && gaddr < end) { + if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start && + gaddr < end) { /* The complete shadow table has to go */ gmap_unshadow(sg); spin_unlock(&sg->guest_table_lock);