提交 d0b075ff 编写于 作者: D David Vrabel 提交者: Konrad Rzeszutek Wilk

xen/events: Refactor evtchn_to_irq array to be dynamically allocated

Refactor static array evtchn_to_irq array to be dynamically allocated by
implementing get and set functions for accesses to the array.

Two new port ops are added: max_channels (maximum supported number of
event channels) and nr_channels (number of currently usable event
channels).  For the 2-level ABI, these numbers are both the same as
the shared data structure is a fixed size. For the FIFO ABI, these
will be different as the event array is expanded dynamically.

This allows more than 65000 event channels so an unsigned short is no
longer sufficient for an event channel port number and unsigned int is
used instead.
Signed-off-by: NMalcolm Crossley <malcolm.crossley@citrix.com>
Signed-off-by: NDavid Vrabel <david.vrabel@citrix.com>
Reviewed-by: NKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: NBoris Ostrovsky <boris.ostrovsky@oracle.com>
上级 08385875
...@@ -41,6 +41,11 @@ ...@@ -41,6 +41,11 @@
static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD], static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
cpu_evtchn_mask); cpu_evtchn_mask);
static unsigned evtchn_2l_max_channels(void)
{
return NR_EVENT_CHANNELS;
}
static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu) static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
{ {
clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu))); clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
...@@ -238,7 +243,7 @@ static void evtchn_2l_handle_events(unsigned cpu) ...@@ -238,7 +243,7 @@ static void evtchn_2l_handle_events(unsigned cpu)
/* Process port. */ /* Process port. */
port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx; port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
irq = evtchn_to_irq[port]; irq = get_evtchn_to_irq(port);
if (irq != -1) { if (irq != -1) {
desc = irq_to_desc(irq); desc = irq_to_desc(irq);
...@@ -332,7 +337,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) ...@@ -332,7 +337,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
int word_idx = i / BITS_PER_EVTCHN_WORD; int word_idx = i / BITS_PER_EVTCHN_WORD;
printk(" %d: event %d -> irq %d%s%s%s\n", printk(" %d: event %d -> irq %d%s%s%s\n",
cpu_from_evtchn(i), i, cpu_from_evtchn(i), i,
evtchn_to_irq[i], get_evtchn_to_irq(i),
sync_test_bit(word_idx, BM(&v->evtchn_pending_sel)) sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
? "" : " l2-clear", ? "" : " l2-clear",
!sync_test_bit(i, BM(sh->evtchn_mask)) !sync_test_bit(i, BM(sh->evtchn_mask))
...@@ -348,6 +353,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) ...@@ -348,6 +353,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
} }
static const struct evtchn_ops evtchn_ops_2l = { static const struct evtchn_ops evtchn_ops_2l = {
.max_channels = evtchn_2l_max_channels,
.nr_channels = evtchn_2l_max_channels,
.bind_to_cpu = evtchn_2l_bind_to_cpu, .bind_to_cpu = evtchn_2l_bind_to_cpu,
.clear_pending = evtchn_2l_clear_pending, .clear_pending = evtchn_2l_clear_pending,
.set_pending = evtchn_2l_set_pending, .set_pending = evtchn_2l_set_pending,
......
...@@ -77,12 +77,16 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; ...@@ -77,12 +77,16 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
/* IRQ <-> IPI mapping */ /* IRQ <-> IPI mapping */
static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
int *evtchn_to_irq; int **evtchn_to_irq;
#ifdef CONFIG_X86 #ifdef CONFIG_X86
static unsigned long *pirq_eoi_map; static unsigned long *pirq_eoi_map;
#endif #endif
static bool (*pirq_needs_eoi)(unsigned irq); static bool (*pirq_needs_eoi)(unsigned irq);
#define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
#define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
#define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
/* Xen will never allocate port zero for any purpose. */ /* Xen will never allocate port zero for any purpose. */
#define VALID_EVTCHN(chn) ((chn) != 0) #define VALID_EVTCHN(chn) ((chn) != 0)
...@@ -92,6 +96,61 @@ static struct irq_chip xen_pirq_chip; ...@@ -92,6 +96,61 @@ static struct irq_chip xen_pirq_chip;
static void enable_dynirq(struct irq_data *data); static void enable_dynirq(struct irq_data *data);
static void disable_dynirq(struct irq_data *data); static void disable_dynirq(struct irq_data *data);
static void clear_evtchn_to_irq_row(unsigned row)
{
unsigned col;
for (col = 0; col < EVTCHN_PER_ROW; col++)
evtchn_to_irq[row][col] = -1;
}
static void clear_evtchn_to_irq_all(void)
{
unsigned row;
for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
if (evtchn_to_irq[row] == NULL)
continue;
clear_evtchn_to_irq_row(row);
}
}
static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
{
unsigned row;
unsigned col;
if (evtchn >= xen_evtchn_max_channels())
return -EINVAL;
row = EVTCHN_ROW(evtchn);
col = EVTCHN_COL(evtchn);
if (evtchn_to_irq[row] == NULL) {
/* Unallocated irq entries return -1 anyway */
if (irq == -1)
return 0;
evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
if (evtchn_to_irq[row] == NULL)
return -ENOMEM;
clear_evtchn_to_irq_row(row);
}
evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
return 0;
}
int get_evtchn_to_irq(unsigned evtchn)
{
if (evtchn >= xen_evtchn_max_channels())
return -1;
if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
return -1;
return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
}
/* Get info for IRQ */ /* Get info for IRQ */
struct irq_info *info_for_irq(unsigned irq) struct irq_info *info_for_irq(unsigned irq)
{ {
...@@ -102,9 +161,10 @@ struct irq_info *info_for_irq(unsigned irq) ...@@ -102,9 +161,10 @@ struct irq_info *info_for_irq(unsigned irq)
static int xen_irq_info_common_setup(struct irq_info *info, static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq, unsigned irq,
enum xen_irq_type type, enum xen_irq_type type,
unsigned short evtchn, unsigned evtchn,
unsigned short cpu) unsigned short cpu)
{ {
int ret;
BUG_ON(info->type != IRQT_UNBOUND && info->type != type); BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
...@@ -113,7 +173,9 @@ static int xen_irq_info_common_setup(struct irq_info *info, ...@@ -113,7 +173,9 @@ static int xen_irq_info_common_setup(struct irq_info *info,
info->evtchn = evtchn; info->evtchn = evtchn;
info->cpu = cpu; info->cpu = cpu;
evtchn_to_irq[evtchn] = irq; ret = set_evtchn_to_irq(evtchn, irq);
if (ret < 0)
return ret;
irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
...@@ -121,7 +183,7 @@ static int xen_irq_info_common_setup(struct irq_info *info, ...@@ -121,7 +183,7 @@ static int xen_irq_info_common_setup(struct irq_info *info,
} }
static int xen_irq_info_evtchn_setup(unsigned irq, static int xen_irq_info_evtchn_setup(unsigned irq,
unsigned short evtchn) unsigned evtchn)
{ {
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
...@@ -130,7 +192,7 @@ static int xen_irq_info_evtchn_setup(unsigned irq, ...@@ -130,7 +192,7 @@ static int xen_irq_info_evtchn_setup(unsigned irq,
static int xen_irq_info_ipi_setup(unsigned cpu, static int xen_irq_info_ipi_setup(unsigned cpu,
unsigned irq, unsigned irq,
unsigned short evtchn, unsigned evtchn,
enum ipi_vector ipi) enum ipi_vector ipi)
{ {
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
...@@ -144,8 +206,8 @@ static int xen_irq_info_ipi_setup(unsigned cpu, ...@@ -144,8 +206,8 @@ static int xen_irq_info_ipi_setup(unsigned cpu,
static int xen_irq_info_virq_setup(unsigned cpu, static int xen_irq_info_virq_setup(unsigned cpu,
unsigned irq, unsigned irq,
unsigned short evtchn, unsigned evtchn,
unsigned short virq) unsigned virq)
{ {
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
...@@ -157,9 +219,9 @@ static int xen_irq_info_virq_setup(unsigned cpu, ...@@ -157,9 +219,9 @@ static int xen_irq_info_virq_setup(unsigned cpu,
} }
static int xen_irq_info_pirq_setup(unsigned irq, static int xen_irq_info_pirq_setup(unsigned irq,
unsigned short evtchn, unsigned evtchn,
unsigned short pirq, unsigned pirq,
unsigned short gsi, unsigned gsi,
uint16_t domid, uint16_t domid,
unsigned char flags) unsigned char flags)
{ {
...@@ -173,6 +235,12 @@ static int xen_irq_info_pirq_setup(unsigned irq, ...@@ -173,6 +235,12 @@ static int xen_irq_info_pirq_setup(unsigned irq,
return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0); return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
} }
static void xen_irq_info_cleanup(struct irq_info *info)
{
set_evtchn_to_irq(info->evtchn, -1);
info->evtchn = 0;
}
/* /*
* Accessors for packed IRQ information. * Accessors for packed IRQ information.
*/ */
...@@ -186,7 +254,7 @@ unsigned int evtchn_from_irq(unsigned irq) ...@@ -186,7 +254,7 @@ unsigned int evtchn_from_irq(unsigned irq)
unsigned irq_from_evtchn(unsigned int evtchn) unsigned irq_from_evtchn(unsigned int evtchn)
{ {
return evtchn_to_irq[evtchn]; return get_evtchn_to_irq(evtchn);
} }
EXPORT_SYMBOL_GPL(irq_from_evtchn); EXPORT_SYMBOL_GPL(irq_from_evtchn);
...@@ -237,7 +305,7 @@ unsigned cpu_from_irq(unsigned irq) ...@@ -237,7 +305,7 @@ unsigned cpu_from_irq(unsigned irq)
unsigned int cpu_from_evtchn(unsigned int evtchn) unsigned int cpu_from_evtchn(unsigned int evtchn)
{ {
int irq = evtchn_to_irq[evtchn]; int irq = get_evtchn_to_irq(evtchn);
unsigned ret = 0; unsigned ret = 0;
if (irq != -1) if (irq != -1)
...@@ -263,7 +331,7 @@ static bool pirq_needs_eoi_flag(unsigned irq) ...@@ -263,7 +331,7 @@ static bool pirq_needs_eoi_flag(unsigned irq)
static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
{ {
int irq = evtchn_to_irq[chn]; int irq = get_evtchn_to_irq(chn);
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
BUG_ON(irq == -1); BUG_ON(irq == -1);
...@@ -386,6 +454,18 @@ static void xen_free_irq(unsigned irq) ...@@ -386,6 +454,18 @@ static void xen_free_irq(unsigned irq)
irq_free_desc(irq); irq_free_desc(irq);
} }
static void xen_evtchn_close(unsigned int port)
{
struct evtchn_close close;
close.port = port;
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
BUG();
/* Closed ports are implicitly re-bound to VCPU0. */
bind_evtchn_to_cpu(port, 0);
}
static void pirq_query_unmask(int irq) static void pirq_query_unmask(int irq)
{ {
struct physdev_irq_status_query irq_status; struct physdev_irq_status_query irq_status;
...@@ -458,7 +538,13 @@ static unsigned int __startup_pirq(unsigned int irq) ...@@ -458,7 +538,13 @@ static unsigned int __startup_pirq(unsigned int irq)
pirq_query_unmask(irq); pirq_query_unmask(irq);
evtchn_to_irq[evtchn] = irq; rc = set_evtchn_to_irq(evtchn, irq);
if (rc != 0) {
pr_err("irq%d: Failed to set port to irq mapping (%d)\n",
irq, rc);
xen_evtchn_close(evtchn);
return 0;
}
bind_evtchn_to_cpu(evtchn, 0); bind_evtchn_to_cpu(evtchn, 0);
info->evtchn = evtchn; info->evtchn = evtchn;
...@@ -476,10 +562,9 @@ static unsigned int startup_pirq(struct irq_data *data) ...@@ -476,10 +562,9 @@ static unsigned int startup_pirq(struct irq_data *data)
static void shutdown_pirq(struct irq_data *data) static void shutdown_pirq(struct irq_data *data)
{ {
struct evtchn_close close;
unsigned int irq = data->irq; unsigned int irq = data->irq;
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
int evtchn = evtchn_from_irq(irq); unsigned evtchn = evtchn_from_irq(irq);
BUG_ON(info->type != IRQT_PIRQ); BUG_ON(info->type != IRQT_PIRQ);
...@@ -487,14 +572,8 @@ static void shutdown_pirq(struct irq_data *data) ...@@ -487,14 +572,8 @@ static void shutdown_pirq(struct irq_data *data)
return; return;
mask_evtchn(evtchn); mask_evtchn(evtchn);
xen_evtchn_close(evtchn);
close.port = evtchn; xen_irq_info_cleanup(info);
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
BUG();
bind_evtchn_to_cpu(evtchn, 0);
evtchn_to_irq[evtchn] = -1;
info->evtchn = 0;
} }
static void enable_pirq(struct irq_data *data) static void enable_pirq(struct irq_data *data)
...@@ -525,7 +604,6 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi); ...@@ -525,7 +604,6 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
static void __unbind_from_irq(unsigned int irq) static void __unbind_from_irq(unsigned int irq)
{ {
struct evtchn_close close;
int evtchn = evtchn_from_irq(irq); int evtchn = evtchn_from_irq(irq);
struct irq_info *info = irq_get_handler_data(irq); struct irq_info *info = irq_get_handler_data(irq);
...@@ -536,27 +614,22 @@ static void __unbind_from_irq(unsigned int irq) ...@@ -536,27 +614,22 @@ static void __unbind_from_irq(unsigned int irq)
} }
if (VALID_EVTCHN(evtchn)) { if (VALID_EVTCHN(evtchn)) {
close.port = evtchn; unsigned int cpu = cpu_from_irq(irq);
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
BUG(); xen_evtchn_close(evtchn);
switch (type_from_irq(irq)) { switch (type_from_irq(irq)) {
case IRQT_VIRQ: case IRQT_VIRQ:
per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
[virq_from_irq(irq)] = -1;
break; break;
case IRQT_IPI: case IRQT_IPI:
per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
[ipi_from_irq(irq)] = -1;
break; break;
default: default:
break; break;
} }
/* Closed ports are implicitly re-bound to VCPU0. */ xen_irq_info_cleanup(info);
bind_evtchn_to_cpu(evtchn, 0);
evtchn_to_irq[evtchn] = -1;
} }
BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
...@@ -760,9 +833,12 @@ int bind_evtchn_to_irq(unsigned int evtchn) ...@@ -760,9 +833,12 @@ int bind_evtchn_to_irq(unsigned int evtchn)
int irq; int irq;
int ret; int ret;
if (evtchn >= xen_evtchn_max_channels())
return -ENOMEM;
mutex_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
irq = evtchn_to_irq[evtchn]; irq = get_evtchn_to_irq(evtchn);
if (irq == -1) { if (irq == -1) {
irq = xen_allocate_irq_dynamic(); irq = xen_allocate_irq_dynamic();
...@@ -852,7 +928,7 @@ static int find_virq(unsigned int virq, unsigned int cpu) ...@@ -852,7 +928,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
int port, rc = -ENOENT; int port, rc = -ENOENT;
memset(&status, 0, sizeof(status)); memset(&status, 0, sizeof(status));
for (port = 0; port <= NR_EVENT_CHANNELS; port++) { for (port = 0; port < xen_evtchn_max_channels(); port++) {
status.dom = DOMID_SELF; status.dom = DOMID_SELF;
status.port = port; status.port = port;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
...@@ -1022,7 +1098,7 @@ EXPORT_SYMBOL_GPL(unbind_from_irqhandler); ...@@ -1022,7 +1098,7 @@ EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
int evtchn_make_refcounted(unsigned int evtchn) int evtchn_make_refcounted(unsigned int evtchn)
{ {
int irq = evtchn_to_irq[evtchn]; int irq = get_evtchn_to_irq(evtchn);
struct irq_info *info; struct irq_info *info;
if (irq == -1) if (irq == -1)
...@@ -1047,12 +1123,12 @@ int evtchn_get(unsigned int evtchn) ...@@ -1047,12 +1123,12 @@ int evtchn_get(unsigned int evtchn)
struct irq_info *info; struct irq_info *info;
int err = -ENOENT; int err = -ENOENT;
if (evtchn >= NR_EVENT_CHANNELS) if (evtchn >= xen_evtchn_max_channels())
return -EINVAL; return -EINVAL;
mutex_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
irq = evtchn_to_irq[evtchn]; irq = get_evtchn_to_irq(evtchn);
if (irq == -1) if (irq == -1)
goto done; goto done;
...@@ -1076,7 +1152,7 @@ EXPORT_SYMBOL_GPL(evtchn_get); ...@@ -1076,7 +1152,7 @@ EXPORT_SYMBOL_GPL(evtchn_get);
void evtchn_put(unsigned int evtchn) void evtchn_put(unsigned int evtchn)
{ {
int irq = evtchn_to_irq[evtchn]; int irq = get_evtchn_to_irq(evtchn);
if (WARN_ON(irq == -1)) if (WARN_ON(irq == -1))
return; return;
unbind_from_irq(irq); unbind_from_irq(irq);
...@@ -1163,7 +1239,7 @@ void rebind_evtchn_irq(int evtchn, int irq) ...@@ -1163,7 +1239,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
mutex_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
/* After resume the irq<->evtchn mappings are all cleared out */ /* After resume the irq<->evtchn mappings are all cleared out */
BUG_ON(evtchn_to_irq[evtchn] != -1); BUG_ON(get_evtchn_to_irq(evtchn) != -1);
/* Expect irq to have been bound before, /* Expect irq to have been bound before,
so there should be a proper type */ so there should be a proper type */
BUG_ON(info->type == IRQT_UNBOUND); BUG_ON(info->type == IRQT_UNBOUND);
...@@ -1448,15 +1524,14 @@ void xen_irq_resume(void) ...@@ -1448,15 +1524,14 @@ void xen_irq_resume(void)
struct irq_info *info; struct irq_info *info;
/* New event-channel space is not 'live' yet. */ /* New event-channel space is not 'live' yet. */
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
mask_evtchn(evtchn); mask_evtchn(evtchn);
/* No IRQ <-> event-channel mappings. */ /* No IRQ <-> event-channel mappings. */
list_for_each_entry(info, &xen_irq_list_head, list) list_for_each_entry(info, &xen_irq_list_head, list)
info->evtchn = 0; /* zap event-channel binding */ info->evtchn = 0; /* zap event-channel binding */
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) clear_evtchn_to_irq_all();
evtchn_to_irq[evtchn] = -1;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
restore_cpu_virqs(cpu); restore_cpu_virqs(cpu);
...@@ -1553,14 +1628,12 @@ void __init xen_init_IRQ(void) ...@@ -1553,14 +1628,12 @@ void __init xen_init_IRQ(void)
xen_evtchn_2l_init(); xen_evtchn_2l_init();
evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
GFP_KERNEL); sizeof(*evtchn_to_irq), GFP_KERNEL);
BUG_ON(!evtchn_to_irq); BUG_ON(!evtchn_to_irq);
for (i = 0; i < NR_EVENT_CHANNELS; i++)
evtchn_to_irq[i] = -1;
/* No event channels are 'live' right now. */ /* No event channels are 'live' right now. */
for (i = 0; i < NR_EVENT_CHANNELS; i++) for (i = 0; i < xen_evtchn_nr_channels(); i++)
mask_evtchn(i); mask_evtchn(i);
pirq_needs_eoi = pirq_needs_eoi_flag; pirq_needs_eoi = pirq_needs_eoi_flag;
......
...@@ -35,7 +35,7 @@ struct irq_info { ...@@ -35,7 +35,7 @@ struct irq_info {
int refcnt; int refcnt;
enum xen_irq_type type; /* type */ enum xen_irq_type type; /* type */
unsigned irq; unsigned irq;
unsigned short evtchn; /* event channel */ unsigned int evtchn; /* event channel */
unsigned short cpu; /* cpu bound */ unsigned short cpu; /* cpu bound */
union { union {
...@@ -55,6 +55,9 @@ struct irq_info { ...@@ -55,6 +55,9 @@ struct irq_info {
#define PIRQ_SHAREABLE (1 << 1) #define PIRQ_SHAREABLE (1 << 1)
struct evtchn_ops { struct evtchn_ops {
unsigned (*max_channels)(void);
unsigned (*nr_channels)(void);
int (*setup)(struct irq_info *info); int (*setup)(struct irq_info *info);
void (*bind_to_cpu)(struct irq_info *info, unsigned cpu); void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
...@@ -70,12 +73,23 @@ struct evtchn_ops { ...@@ -70,12 +73,23 @@ struct evtchn_ops {
extern const struct evtchn_ops *evtchn_ops; extern const struct evtchn_ops *evtchn_ops;
extern int *evtchn_to_irq; extern int **evtchn_to_irq;
int get_evtchn_to_irq(unsigned int evtchn);
struct irq_info *info_for_irq(unsigned irq); struct irq_info *info_for_irq(unsigned irq);
unsigned cpu_from_irq(unsigned irq); unsigned cpu_from_irq(unsigned irq);
unsigned cpu_from_evtchn(unsigned int evtchn); unsigned cpu_from_evtchn(unsigned int evtchn);
static inline unsigned xen_evtchn_max_channels(void)
{
return evtchn_ops->max_channels();
}
static inline unsigned xen_evtchn_nr_channels(void)
{
return evtchn_ops->nr_channels();
}
/* /*
* Do any ABI specific setup for a bound event channel before it can * Do any ABI specific setup for a bound event channel before it can
* be unmasked and used. * be unmasked and used.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册