提交 a2fa301f 编写于 作者: P Paolo Bonzini

Merge tag 'kvm-s390-20140304' of...

Merge tag 'kvm-s390-20140304' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next
......@@ -44,11 +44,21 @@ struct airq_iv {
struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
void airq_iv_release(struct airq_iv *iv);
unsigned long airq_iv_alloc_bit(struct airq_iv *iv);
void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit);
unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num);
void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num);
unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
unsigned long end);
static inline unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
{
return airq_iv_alloc(iv, 1);
}
static inline void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
{
airq_iv_free(iv, bit, 1);
}
static inline unsigned long airq_iv_end(struct airq_iv *iv)
{
return iv->end;
......
......@@ -53,6 +53,7 @@ enum interruption_class {
IRQIO_PCI,
IRQIO_MSI,
IRQIO_VIR,
IRQIO_VAI,
NMI_NMI,
CPU_RST,
NR_ARCH_IRQS
......
......@@ -107,7 +107,9 @@ struct kvm_s390_sie_block {
__u64 gbea; /* 0x0180 */
__u8 reserved188[24]; /* 0x0188 */
__u32 fac; /* 0x01a0 */
__u8 reserved1a4[68]; /* 0x01a4 */
__u8 reserved1a4[58]; /* 0x01a4 */
__u64 pp; /* 0x01de */
__u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */
__u8 reserved1f0[16]; /* 0x01f0 */
} __attribute__((packed));
......@@ -213,7 +215,6 @@ struct kvm_s390_float_interrupt {
int next_rr_cpu;
unsigned long idle_mask[(KVM_MAX_VCPUS + sizeof(long) - 1)
/ sizeof(long)];
struct kvm_s390_local_interrupt *local_int[KVM_MAX_VCPUS];
unsigned int irq_count;
};
......
......@@ -76,4 +76,6 @@ struct kvm_sync_regs {
#define KVM_REG_S390_PFTOKEN (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x5)
#define KVM_REG_S390_PFCOMPARE (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x6)
#define KVM_REG_S390_PFSELECT (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x7)
#define KVM_REG_S390_PP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x8)
#define KVM_REG_S390_GBEA (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x9)
#endif
......@@ -84,6 +84,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
[IRQIO_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
[IRQIO_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
[IRQIO_VIR] = {.name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
[IRQIO_VAI] = {.name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"},
[NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
[CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"},
};
......
......@@ -692,6 +692,7 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
struct kvm_s390_local_interrupt *li;
struct kvm_s390_float_interrupt *fi;
struct kvm_s390_interrupt_info *iter;
struct kvm_vcpu *dst_vcpu = NULL;
int sigcpu;
int rc = 0;
......@@ -726,9 +727,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
sigcpu = fi->next_rr_cpu++;
if (sigcpu == KVM_MAX_VCPUS)
sigcpu = fi->next_rr_cpu = 0;
} while (fi->local_int[sigcpu] == NULL);
} while (kvm_get_vcpu(kvm, sigcpu) == NULL);
}
li = fi->local_int[sigcpu];
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
li = &dst_vcpu->arch.local_int;
spin_lock_bh(&li->lock);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
if (waitqueue_active(li->wq))
......
......@@ -386,6 +386,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
vcpu->arch.guest_fpregs.fpc = 0;
asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu);
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
......@@ -459,11 +460,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
spin_lock_init(&vcpu->arch.local_int.lock);
INIT_LIST_HEAD(&vcpu->arch.local_int.list);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
spin_lock(&kvm->arch.float_int.lock);
kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
vcpu->arch.local_int.wq = &vcpu->wq;
vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
spin_unlock(&kvm->arch.float_int.lock);
rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc)
......@@ -571,6 +569,14 @@ static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
r = put_user(vcpu->arch.pfault_select,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_PP:
r = put_user(vcpu->arch.sie_block->pp,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_GBEA:
r = put_user(vcpu->arch.sie_block->gbea,
(u64 __user *)reg->addr);
break;
default:
break;
}
......@@ -612,6 +618,14 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
r = get_user(vcpu->arch.pfault_select,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_PP:
r = get_user(vcpu->arch.sie_block->pp,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_GBEA:
r = get_user(vcpu->arch.sie_block->gbea,
(u64 __user *)reg->addr);
break;
default:
break;
}
......@@ -935,7 +949,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
BUG_ON(kvm_get_vcpu(vcpu->kvm, vcpu->vcpu_id) == NULL);
switch (kvm_run->exit_reason) {
case KVM_EXIT_S390_SIEIC:
......
......@@ -396,15 +396,10 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
int cpus = 0;
int n;
spin_lock(&fi->lock);
for (n = 0; n < KVM_MAX_VCPUS; n++)
if (fi->local_int[n])
cpus++;
spin_unlock(&fi->lock);
cpus = atomic_read(&vcpu->kvm->online_vcpus);
/* deal with other level 3 hypervisors */
if (stsi(mem, 3, 2, 2))
......
......@@ -23,29 +23,30 @@
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
u64 *reg)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu = NULL;
int cpuflags;
int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL;
spin_lock(&fi->lock);
if (fi->local_int[cpu_addr] == NULL)
rc = SIGP_CC_NOT_OPERATIONAL;
else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
& (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
cpuflags = atomic_read(li->cpuflags);
if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
else {
*reg &= 0xffffffff00000000UL;
if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
& CPUSTAT_ECALL_PEND)
if (cpuflags & CPUSTAT_ECALL_PEND)
*reg |= SIGP_STATUS_EXT_CALL_PENDING;
if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
& CPUSTAT_STOPPED)
if (cpuflags & CPUSTAT_STOPPED)
*reg |= SIGP_STATUS_STOPPED;
rc = SIGP_CC_STATUS_STORED;
}
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
return rc;
......@@ -53,10 +54,9 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;
struct kvm_s390_interrupt_info *inti;
int rc;
struct kvm_vcpu *dst_vcpu = NULL;
if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL;
......@@ -68,13 +68,10 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
inti->type = KVM_S390_INT_EMERGENCY;
inti->emerg.code = vcpu->vcpu_id;
spin_lock(&fi->lock);
li = fi->local_int[cpu_addr];
if (li == NULL) {
rc = SIGP_CC_NOT_OPERATIONAL;
kfree(inti);
goto unlock;
}
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
spin_lock_bh(&li->lock);
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
......@@ -82,11 +79,9 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
if (waitqueue_active(li->wq))
wake_up_interruptible(li->wq);
spin_unlock_bh(&li->lock);
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
unlock:
spin_unlock(&fi->lock);
return rc;
return SIGP_CC_ORDER_CODE_ACCEPTED;
}
static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
......@@ -122,10 +117,9 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;
struct kvm_s390_interrupt_info *inti;
int rc;
struct kvm_vcpu *dst_vcpu = NULL;
if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL;
......@@ -137,13 +131,10 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
inti->type = KVM_S390_INT_EXTERNAL_CALL;
inti->extcall.code = vcpu->vcpu_id;
spin_lock(&fi->lock);
li = fi->local_int[cpu_addr];
if (li == NULL) {
rc = SIGP_CC_NOT_OPERATIONAL;
kfree(inti);
goto unlock;
}
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
spin_lock_bh(&li->lock);
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
......@@ -151,11 +142,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
if (waitqueue_active(li->wq))
wake_up_interruptible(li->wq);
spin_unlock_bh(&li->lock);
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
unlock:
spin_unlock(&fi->lock);
return rc;
return SIGP_CC_ORDER_CODE_ACCEPTED;
}
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
......@@ -189,31 +178,26 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu = NULL;
int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL;
spin_lock(&fi->lock);
li = fi->local_int[cpu_addr];
if (li == NULL) {
rc = SIGP_CC_NOT_OPERATIONAL;
goto unlock;
}
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
rc = __inject_sigp_stop(li, action);
unlock:
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
/* If the CPU has already been stopped, we still have
* to save the status when doing stop-and-store. This
* has to be done after unlocking all spinlocks. */
struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
rc = kvm_s390_store_status_unloaded(dst_vcpu,
KVM_S390_STORE_STATUS_NOADDR);
}
......@@ -249,12 +233,18 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
u64 *reg)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li = NULL;
struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu = NULL;
struct kvm_s390_interrupt_info *inti;
int rc;
u8 tmp;
if (cpu_addr < KVM_MAX_VCPUS)
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
/* make sure that the new value is valid memory */
address = address & 0x7fffe000u;
if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
......@@ -268,18 +258,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
if (!inti)
return SIGP_CC_BUSY;
spin_lock(&fi->lock);
if (cpu_addr < KVM_MAX_VCPUS)
li = fi->local_int[cpu_addr];
if (li == NULL) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
rc = SIGP_CC_STATUS_STORED;
kfree(inti);
goto out_fi;
}
spin_lock_bh(&li->lock);
/* cpu must be in stopped state */
if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
......@@ -302,8 +280,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li:
spin_unlock_bh(&li->lock);
out_fi:
spin_unlock(&fi->lock);
return rc;
}
......@@ -341,28 +317,26 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
u64 *reg)
{
struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu = NULL;
int rc;
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL;
spin_lock(&fi->lock);
if (fi->local_int[cpu_addr] == NULL)
rc = SIGP_CC_NOT_OPERATIONAL;
else {
if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
& CPUSTAT_RUNNING) {
/* running */
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
} else {
/* not running */
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_NOT_RUNNING;
rc = SIGP_CC_STATUS_STORED;
}
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
/* running */
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
} else {
/* not running */
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_NOT_RUNNING;
rc = SIGP_CC_STATUS_STORED;
}
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
rc);
......@@ -373,26 +347,22 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
/* Test whether the destination CPU is available and not busy */
static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;
int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
struct kvm_vcpu *dst_vcpu = NULL;
if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL;
spin_lock(&fi->lock);
li = fi->local_int[cpu_addr];
if (li == NULL) {
rc = SIGP_CC_NOT_OPERATIONAL;
goto out;
}
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
spin_lock_bh(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP)
rc = SIGP_CC_BUSY;
spin_unlock_bh(&li->lock);
out:
spin_unlock(&fi->lock);
return rc;
}
......
......@@ -186,55 +186,71 @@ void airq_iv_release(struct airq_iv *iv)
EXPORT_SYMBOL(airq_iv_release);
/**
* airq_iv_alloc_bit - allocate an irq bit from an interrupt vector
* airq_iv_alloc - allocate irq bits from an interrupt vector
* @iv: pointer to an interrupt vector structure
* @num: number of consecutive irq bits to allocate
*
* Returns the bit number of the allocated irq, or -1UL if no bit
* is available or the AIRQ_IV_ALLOC flag has not been specified
* Returns the bit number of the first irq in the allocated block of irqs,
* or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been
* specified
*/
unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
{
unsigned long bit;
unsigned long bit, i;
if (!iv->avail)
if (!iv->avail || num == 0)
return -1UL;
spin_lock(&iv->lock);
bit = find_first_bit_inv(iv->avail, iv->bits);
if (bit < iv->bits) {
clear_bit_inv(bit, iv->avail);
if (bit >= iv->end)
iv->end = bit + 1;
} else
while (bit + num <= iv->bits) {
for (i = 1; i < num; i++)
if (!test_bit_inv(bit + i, iv->avail))
break;
if (i >= num) {
/* Found a suitable block of irqs */
for (i = 0; i < num; i++)
clear_bit_inv(bit + i, iv->avail);
if (bit + num >= iv->end)
iv->end = bit + num + 1;
break;
}
bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1);
}
if (bit + num > iv->bits)
bit = -1UL;
spin_unlock(&iv->lock);
return bit;
}
EXPORT_SYMBOL(airq_iv_alloc_bit);
EXPORT_SYMBOL(airq_iv_alloc);
/**
* airq_iv_free_bit - free an irq bit of an interrupt vector
* airq_iv_free - free irq bits of an interrupt vector
* @iv: pointer to interrupt vector structure
* @bit: number of the irq bit to free
* @bit: number of the first irq bit to free
* @num: number of consecutive irq bits to free
*/
void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
{
if (!iv->avail)
unsigned long i;
if (!iv->avail || num == 0)
return;
spin_lock(&iv->lock);
/* Clear (possibly left over) interrupt bit */
clear_bit_inv(bit, iv->vector);
/* Make the bit position available again */
set_bit_inv(bit, iv->avail);
if (bit == iv->end - 1) {
for (i = 0; i < num; i++) {
/* Clear (possibly left over) interrupt bit */
clear_bit_inv(bit + i, iv->vector);
/* Make the bit positions available again */
set_bit_inv(bit + i, iv->avail);
}
if (bit + num >= iv->end) {
/* Find new end of bit-field */
while (--iv->end > 0)
if (!test_bit_inv(iv->end - 1, iv->avail))
break;
while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
iv->end--;
}
spin_unlock(&iv->lock);
}
EXPORT_SYMBOL(airq_iv_free_bit);
EXPORT_SYMBOL(airq_iv_free);
/**
* airq_iv_scan - scan interrupt vector for non-zero bits
......
/*
* ccw based virtio transport
*
* Copyright IBM Corp. 2012
* Copyright IBM Corp. 2012, 2014
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
......@@ -32,6 +32,8 @@
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/virtio-ccw.h>
#include <asm/isc.h>
#include <asm/airq.h>
/*
* virtio related functions
......@@ -58,6 +60,8 @@ struct virtio_ccw_device {
unsigned long indicators;
unsigned long indicators2;
struct vq_config_block *config_block;
bool is_thinint;
void *airq_info;
};
struct vq_info_block {
......@@ -72,15 +76,38 @@ struct virtio_feature_desc {
__u8 index;
} __packed;
struct virtio_thinint_area {
unsigned long summary_indicator;
unsigned long indicator;
u64 bit_nr;
u8 isc;
} __packed;
struct virtio_ccw_vq_info {
struct virtqueue *vq;
int num;
void *queue;
struct vq_info_block *info_block;
int bit_nr;
struct list_head node;
long cookie;
};
#define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */
#define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8)
#define MAX_AIRQ_AREAS 20
static int virtio_ccw_use_airq = 1;
struct airq_info {
rwlock_t lock;
u8 summary_indicator;
struct airq_struct airq;
struct airq_iv *aiv;
};
static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
#define CCW_CMD_SET_VQ 0x13
#define CCW_CMD_VDEV_RESET 0x33
#define CCW_CMD_SET_IND 0x43
......@@ -91,6 +118,7 @@ struct virtio_ccw_vq_info {
#define CCW_CMD_WRITE_CONF 0x21
#define CCW_CMD_WRITE_STATUS 0x31
#define CCW_CMD_READ_VQ_CONF 0x32
#define CCW_CMD_SET_IND_ADAPTER 0x73
#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
#define VIRTIO_CCW_DOING_RESET 0x00040000
......@@ -102,6 +130,7 @@ struct virtio_ccw_vq_info {
#define VIRTIO_CCW_DOING_SET_IND 0x01000000
#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
......@@ -109,6 +138,125 @@ static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
return container_of(vdev, struct virtio_ccw_device, vdev);
}
static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
{
unsigned long i, flags;
write_lock_irqsave(&info->lock, flags);
for (i = 0; i < airq_iv_end(info->aiv); i++) {
if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) {
airq_iv_free_bit(info->aiv, i);
airq_iv_set_ptr(info->aiv, i, 0);
break;
}
}
write_unlock_irqrestore(&info->lock, flags);
}
static void virtio_airq_handler(struct airq_struct *airq)
{
struct airq_info *info = container_of(airq, struct airq_info, airq);
unsigned long ai;
inc_irq_stat(IRQIO_VAI);
read_lock(&info->lock);
/* Walk through indicators field, summary indicator active. */
for (ai = 0;;) {
ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
if (ai == -1UL)
break;
vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
}
info->summary_indicator = 0;
smp_wmb();
/* Walk through indicators field, summary indicator not active. */
for (ai = 0;;) {
ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
if (ai == -1UL)
break;
vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
}
read_unlock(&info->lock);
}
static struct airq_info *new_airq_info(void)
{
struct airq_info *info;
int rc;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return NULL;
rwlock_init(&info->lock);
info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
if (!info->aiv) {
kfree(info);
return NULL;
}
info->airq.handler = virtio_airq_handler;
info->airq.lsi_ptr = &info->summary_indicator;
info->airq.lsi_mask = 0xff;
info->airq.isc = VIRTIO_AIRQ_ISC;
rc = register_adapter_interrupt(&info->airq);
if (rc) {
airq_iv_release(info->aiv);
kfree(info);
return NULL;
}
return info;
}
static void destroy_airq_info(struct airq_info *info)
{
if (!info)
return;
unregister_adapter_interrupt(&info->airq);
airq_iv_release(info->aiv);
kfree(info);
}
static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
u64 *first, void **airq_info)
{
int i, j;
struct airq_info *info;
unsigned long indicator_addr = 0;
unsigned long bit, flags;
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
if (!airq_areas[i])
airq_areas[i] = new_airq_info();
info = airq_areas[i];
if (!info)
return 0;
write_lock_irqsave(&info->lock, flags);
bit = airq_iv_alloc(info->aiv, nvqs);
if (bit == -1UL) {
/* Not enough vacancies. */
write_unlock_irqrestore(&info->lock, flags);
continue;
}
*first = bit;
*airq_info = info;
indicator_addr = (unsigned long)info->aiv->vector;
for (j = 0; j < nvqs; j++) {
airq_iv_set_ptr(info->aiv, bit + j,
(unsigned long)vqs[j]);
}
write_unlock_irqrestore(&info->lock, flags);
}
return indicator_addr;
}
static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
{
struct virtio_ccw_vq_info *info;
list_for_each_entry(info, &vcdev->virtqueues, node)
drop_airq_indicator(info->vq, vcdev->airq_info);
}
static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
{
unsigned long flags;
......@@ -145,6 +293,51 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
return ret ? ret : vcdev->err;
}
static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
struct ccw1 *ccw)
{
int ret;
unsigned long *indicatorp = NULL;
struct virtio_thinint_area *thinint_area = NULL;
struct airq_info *airq_info = vcdev->airq_info;
if (vcdev->is_thinint) {
thinint_area = kzalloc(sizeof(*thinint_area),
GFP_DMA | GFP_KERNEL);
if (!thinint_area)
return;
thinint_area->summary_indicator =
(unsigned long) &airq_info->summary_indicator;
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->count = sizeof(*thinint_area);
ccw->cda = (__u32)(unsigned long) thinint_area;
} else {
indicatorp = kmalloc(sizeof(&vcdev->indicators),
GFP_DMA | GFP_KERNEL);
if (!indicatorp)
return;
*indicatorp = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->count = sizeof(vcdev->indicators);
ccw->cda = (__u32)(unsigned long) indicatorp;
}
/* Deregister indicators from host. */
vcdev->indicators = 0;
ccw->flags = 0;
ret = ccw_io_helper(vcdev, ccw,
vcdev->is_thinint ?
VIRTIO_CCW_DOING_SET_IND_ADAPTER :
VIRTIO_CCW_DOING_SET_IND);
if (ret && (ret != -ENODEV))
dev_info(&vcdev->cdev->dev,
"Failed to deregister indicators (%d)\n", ret);
else if (vcdev->is_thinint)
virtio_ccw_drop_indicators(vcdev);
kfree(indicatorp);
kfree(thinint_area);
}
static inline long do_kvm_notify(struct subchannel_id schid,
unsigned long queue_index,
long cookie)
......@@ -232,11 +425,13 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev)
{
struct virtqueue *vq, *n;
struct ccw1 *ccw;
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
if (!ccw)
return;
virtio_ccw_drop_indicator(vcdev, ccw);
list_for_each_entry_safe(vq, n, &vdev->vqs, list)
virtio_ccw_del_vq(vq, ccw);
......@@ -326,6 +521,54 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
return ERR_PTR(err);
}
static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
struct virtqueue *vqs[], int nvqs,
struct ccw1 *ccw)
{
int ret;
struct virtio_thinint_area *thinint_area = NULL;
struct airq_info *info;
thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
if (!thinint_area) {
ret = -ENOMEM;
goto out;
}
/* Try to get an indicator. */
thinint_area->indicator = get_airq_indicator(vqs, nvqs,
&thinint_area->bit_nr,
&vcdev->airq_info);
if (!thinint_area->indicator) {
ret = -ENOSPC;
goto out;
}
info = vcdev->airq_info;
thinint_area->summary_indicator =
(unsigned long) &info->summary_indicator;
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->flags = CCW_FLAG_SLI;
ccw->count = sizeof(*thinint_area);
ccw->cda = (__u32)(unsigned long)thinint_area;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
if (ret) {
if (ret == -EOPNOTSUPP) {
/*
* The host does not support adapter interrupts
* for virtio-ccw, stop trying.
*/
virtio_ccw_use_airq = 0;
pr_info("Adapter interrupts unsupported on host\n");
} else
dev_warn(&vcdev->cdev->dev,
"enabling adapter interrupts = %d\n", ret);
virtio_ccw_drop_indicators(vcdev);
}
out:
kfree(thinint_area);
return ret;
}
static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
......@@ -355,15 +598,23 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
if (!indicatorp)
goto out;
*indicatorp = (unsigned long) &vcdev->indicators;
/* Register queue indicators with host. */
vcdev->indicators = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->flags = 0;
ccw->count = sizeof(vcdev->indicators);
ccw->cda = (__u32)(unsigned long) indicatorp;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
if (ret)
goto out;
if (vcdev->is_thinint) {
ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
if (ret)
/* no error, just fall back to legacy interrupts */
vcdev->is_thinint = 0;
}
if (!vcdev->is_thinint) {
/* Register queue indicators with host. */
vcdev->indicators = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->flags = 0;
ccw->count = sizeof(vcdev->indicators);
ccw->cda = (__u32)(unsigned long) indicatorp;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
if (ret)
goto out;
}
/* Register indicators2 with host for config changes */
*indicatorp = (unsigned long) &vcdev->indicators2;
vcdev->indicators2 = 0;
......@@ -636,6 +887,8 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
struct virtqueue *vq;
struct virtio_driver *drv;
if (!vcdev)
return;
/* Check if it's a notification from the host. */
if ((intparm == 0) &&
(scsw_stctl(&irb->scsw) ==
......@@ -663,6 +916,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
case VIRTIO_CCW_DOING_SET_CONF_IND:
case VIRTIO_CCW_DOING_RESET:
case VIRTIO_CCW_DOING_READ_VQ_CONF:
case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
vcdev->curr_io &= ~activity;
wake_up(&vcdev->wait_q);
break;
......@@ -734,23 +988,37 @@ static int virtio_ccw_probe(struct ccw_device *cdev)
return 0;
}
static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev)
{
unsigned long flags;
struct virtio_ccw_device *vcdev;
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
vcdev = dev_get_drvdata(&cdev->dev);
if (!vcdev) {
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return NULL;
}
dev_set_drvdata(&cdev->dev, NULL);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return vcdev;
}
static void virtio_ccw_remove(struct ccw_device *cdev)
{
struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
if (cdev->online) {
if (vcdev && cdev->online)
unregister_virtio_device(&vcdev->vdev);
dev_set_drvdata(&cdev->dev, NULL);
}
cdev->handler = NULL;
}
static int virtio_ccw_offline(struct ccw_device *cdev)
{
struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
unregister_virtio_device(&vcdev->vdev);
dev_set_drvdata(&cdev->dev, NULL);
if (vcdev)
unregister_virtio_device(&vcdev->vdev);
return 0;
}
......@@ -759,6 +1027,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
{
int ret;
struct virtio_ccw_device *vcdev;
unsigned long flags;
vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
if (!vcdev) {
......@@ -778,6 +1047,8 @@ static int virtio_ccw_online(struct ccw_device *cdev)
goto out_free;
}
vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
vcdev->vdev.dev.parent = &cdev->dev;
vcdev->vdev.dev.release = virtio_ccw_release_dev;
vcdev->vdev.config = &virtio_ccw_config_ops;
......@@ -786,7 +1057,9 @@ static int virtio_ccw_online(struct ccw_device *cdev)
INIT_LIST_HEAD(&vcdev->virtqueues);
spin_lock_init(&vcdev->lock);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, vcdev);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
vcdev->vdev.id.vendor = cdev->id.cu_type;
vcdev->vdev.id.device = cdev->id.cu_model;
ret = register_virtio_device(&vcdev->vdev);
......@@ -797,7 +1070,9 @@ static int virtio_ccw_online(struct ccw_device *cdev)
}
return 0;
out_put:
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, NULL);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
put_device(&vcdev->vdev.dev);
return ret;
out_free:
......@@ -935,6 +1210,10 @@ module_init(virtio_ccw_init);
static void __exit virtio_ccw_exit(void)
{
int i;
ccw_driver_unregister(&virtio_ccw_driver);
for (i = 0; i < MAX_AIRQ_AREAS; i++)
destroy_airq_info(airq_areas[i]);
}
module_exit(virtio_ccw_exit);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册