提交 9f01ec53 编写于 作者: K K. Y. Srinivasan 提交者: Greg Kroah-Hartman

Drivers: hv: vmbus: Improve the CPU affiliation for channels

The current code tracks the assigned CPUs within a NUMA node in the context of
the primary channel. So, if we have a VM with a single NUMA node with 8 VCPUs, we may
end up unevenly distributing the channel load. Fix the issue by tracking affiliations
globally.
Signed-off-by: NK. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 35464483
...@@ -392,6 +392,7 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui ...@@ -392,6 +392,7 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
struct vmbus_channel *primary = channel->primary_channel; struct vmbus_channel *primary = channel->primary_channel;
int next_node; int next_node;
struct cpumask available_mask; struct cpumask available_mask;
struct cpumask *alloced_mask;
for (i = IDE; i < MAX_PERF_CHN; i++) { for (i = IDE; i < MAX_PERF_CHN; i++) {
if (!memcmp(type_guid->b, hp_devs[i].guid, if (!memcmp(type_guid->b, hp_devs[i].guid,
...@@ -409,7 +410,6 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui ...@@ -409,7 +410,6 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
* channel, bind it to cpu 0. * channel, bind it to cpu 0.
*/ */
channel->numa_node = 0; channel->numa_node = 0;
cpumask_set_cpu(0, &channel->alloced_cpus_in_node);
channel->target_cpu = 0; channel->target_cpu = 0;
channel->target_vp = hv_context.vp_index[0]; channel->target_vp = hv_context.vp_index[0];
return; return;
...@@ -434,21 +434,22 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui ...@@ -434,21 +434,22 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
channel->numa_node = next_node; channel->numa_node = next_node;
primary = channel; primary = channel;
} }
alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
if (cpumask_weight(&primary->alloced_cpus_in_node) == if (cpumask_weight(alloced_mask) ==
cpumask_weight(cpumask_of_node(primary->numa_node))) { cpumask_weight(cpumask_of_node(primary->numa_node))) {
/* /*
* We have cycled through all the CPUs in the node; * We have cycled through all the CPUs in the node;
* reset the alloced map. * reset the alloced map.
*/ */
cpumask_clear(&primary->alloced_cpus_in_node); cpumask_clear(alloced_mask);
} }
cpumask_xor(&available_mask, &primary->alloced_cpus_in_node, cpumask_xor(&available_mask, alloced_mask,
cpumask_of_node(primary->numa_node)); cpumask_of_node(primary->numa_node));
cur_cpu = cpumask_next(-1, &available_mask); cur_cpu = cpumask_next(-1, &available_mask);
cpumask_set_cpu(cur_cpu, &primary->alloced_cpus_in_node); cpumask_set_cpu(cur_cpu, alloced_mask);
channel->target_cpu = cur_cpu; channel->target_cpu = cur_cpu;
channel->target_vp = hv_context.vp_index[cur_cpu]; channel->target_vp = hv_context.vp_index[cur_cpu];
......
...@@ -332,6 +332,13 @@ int hv_synic_alloc(void) ...@@ -332,6 +332,13 @@ int hv_synic_alloc(void)
size_t ced_size = sizeof(struct clock_event_device); size_t ced_size = sizeof(struct clock_event_device);
int cpu; int cpu;
hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
GFP_ATOMIC);
if (hv_context.hv_numa_map == NULL) {
pr_err("Unable to allocate NUMA map\n");
goto err;
}
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC); hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
if (hv_context.event_dpc[cpu] == NULL) { if (hv_context.event_dpc[cpu] == NULL) {
...@@ -345,6 +352,7 @@ int hv_synic_alloc(void) ...@@ -345,6 +352,7 @@ int hv_synic_alloc(void)
pr_err("Unable to allocate clock event device\n"); pr_err("Unable to allocate clock event device\n");
goto err; goto err;
} }
hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu); hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
hv_context.synic_message_page[cpu] = hv_context.synic_message_page[cpu] =
...@@ -393,6 +401,7 @@ void hv_synic_free(void) ...@@ -393,6 +401,7 @@ void hv_synic_free(void)
{ {
int cpu; int cpu;
kfree(hv_context.hv_numa_map);
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
hv_synic_free_cpu(cpu); hv_synic_free_cpu(cpu);
} }
......
...@@ -551,6 +551,11 @@ struct hv_context { ...@@ -551,6 +551,11 @@ struct hv_context {
* Support PV clockevent device. * Support PV clockevent device.
*/ */
struct clock_event_device *clk_evt[NR_CPUS]; struct clock_event_device *clk_evt[NR_CPUS];
/*
* To manage allocations in a NUMA node.
* Array indexed by numa node ID.
*/
struct cpumask *hv_numa_map;
}; };
extern struct hv_context hv_context; extern struct hv_context hv_context;
......
...@@ -699,7 +699,6 @@ struct vmbus_channel { ...@@ -699,7 +699,6 @@ struct vmbus_channel {
/* /*
* State to manage the CPU affiliation of channels. * State to manage the CPU affiliation of channels.
*/ */
struct cpumask alloced_cpus_in_node;
int numa_node; int numa_node;
/* /*
* Support for sub-channels. For high performance devices, * Support for sub-channels. For high performance devices,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册