diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index e9bcdca32a323cd164437050780370270d44caff..8264b0b0f1cebe4d8c8fdfa0921bdbe8a4f9fb2d 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -227,6 +227,7 @@ struct kvm_vcpu_arch { struct cpuid cpu_id; u64 stidp_data; }; + struct gmap *gmap; }; struct kvm_vm_stat { @@ -237,6 +238,7 @@ struct kvm_arch{ struct sca_block *sca; debug_info_t *dbf; struct kvm_s390_float_interrupt float_int; + struct gmap *gmap; }; extern int sie64a(struct kvm_s390_sie_block *, unsigned long *); diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index b5312050b224e248222e3eafa6926861463709f0..654fc1fa37e7d791ec92a37ff460bed309cd8b74 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -160,6 +160,7 @@ static int handle_stop(struct kvm_vcpu *vcpu) static int handle_validity(struct kvm_vcpu *vcpu) { + unsigned long vmaddr; int viwhy = vcpu->arch.sie_block->ipb >> 16; int rc; @@ -170,12 +171,27 @@ static int handle_validity(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->gmsor + vcpu->arch.sie_block->prefix, 2*PAGE_SIZE); - if (rc) + if (rc) { /* user will receive sigsegv, exit to user */ rc = -EOPNOTSUPP; + goto out; + } + vmaddr = gmap_fault(vcpu->arch.sie_block->prefix, + vcpu->arch.gmap); + if (IS_ERR_VALUE(vmaddr)) { + rc = -EOPNOTSUPP; + goto out; + } + vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE, + vcpu->arch.gmap); + if (IS_ERR_VALUE(vmaddr)) { + rc = -EOPNOTSUPP; + goto out; + } } else rc = -EOPNOTSUPP; +out: if (rc) VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", viwhy); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 123ebea72282cb985fec66009467e9ae7ff09069..3ebb4ba83d9dd523b06330ff4a38ccbf06d1c702 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -190,7 +190,13 @@ int kvm_arch_init_vm(struct kvm *kvm) debug_register_view(kvm->arch.dbf, &debug_sprintf_view); VM_EVENT(kvm, 3, "%s", "vm created"); + kvm->arch.gmap = gmap_alloc(current->mm); + if (!kvm->arch.gmap) + goto out_nogmap; + return 0; +out_nogmap: + debug_unregister(kvm->arch.dbf); out_nodbf: free_page((unsigned long)(kvm->arch.sca)); out_err: @@ -235,11 +241,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_free_vcpus(kvm); free_page((unsigned long)(kvm->arch.sca)); debug_unregister(kvm->arch.dbf); + gmap_free(kvm->arch.gmap); } /* Section: vcpu related */ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { + vcpu->arch.gmap = vcpu->kvm->arch.gmap; return 0; } @@ -285,7 +293,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { - atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); + atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM); set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests); vcpu->arch.sie_block->ecb = 6; vcpu->arch.sie_block->eca = 0xC1002001U; @@ -454,6 +462,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) local_irq_disable(); kvm_guest_enter(); local_irq_enable(); + gmap_enable(vcpu->arch.gmap); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { @@ -462,6 +471,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) } VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", vcpu->arch.sie_block->icptcode); + gmap_disable(vcpu->arch.gmap); local_irq_disable(); kvm_guest_exit(); local_irq_enable(); @@ -479,13 +489,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) kvm_s390_vcpu_set_mem(vcpu); - /* verify, that memory has been registered */ - if (!vcpu->arch.sie_block->gmslm) { - vcpu_put(vcpu); - VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu"); - return -EINVAL; - } - if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); @@ -681,10 +684,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if (mem->guest_phys_addr) return -EINVAL; - if (mem->userspace_addr & (PAGE_SIZE - 1)) + if (mem->userspace_addr & 0xffffful) return -EINVAL; - if (mem->memory_size & (PAGE_SIZE - 1)) + if (mem->memory_size & 0xffffful) return -EINVAL; if (!user_alloc) @@ -698,15 +701,22 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_memory_slot old, int user_alloc) { - int i; + int i, rc; struct kvm_vcpu *vcpu; + + rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, + mem->guest_phys_addr, mem->memory_size); + if (rc) + return; + /* request update of sie control block for all available vcpus */ kvm_for_each_vcpu(i, vcpu, kvm) { if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) continue; kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); } + return; } void kvm_arch_flush_shadow(struct kvm *kvm)