提交 d3ba720d 编写于 作者: K K. Y. Srinivasan 提交者: Greg Kroah-Hartman

Drivers: hv: Eliminate the channel spinlock in the callback path

By ensuring that we set the callback handler to NULL in the channel close
path on the same CPU that the channel is bound to, we can eliminate this lock
acquisition and release in a performance critical path.
Signed-off-by: NK. Y. Srinivasan <kys@microsoft.com>
Reviewed-by: NHaiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 e4d8270e
...@@ -471,18 +471,26 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) ...@@ -471,18 +471,26 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
} }
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl); EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
static void reset_channel_cb(void *arg)
{
struct vmbus_channel *channel = arg;
channel->onchannel_callback = NULL;
}
static void vmbus_close_internal(struct vmbus_channel *channel) static void vmbus_close_internal(struct vmbus_channel *channel)
{ {
struct vmbus_channel_close_channel *msg; struct vmbus_channel_close_channel *msg;
int ret; int ret;
unsigned long flags;
channel->state = CHANNEL_OPEN_STATE; channel->state = CHANNEL_OPEN_STATE;
channel->sc_creation_callback = NULL; channel->sc_creation_callback = NULL;
/* Stop callback and cancel the timer asap */ /* Stop callback and cancel the timer asap */
spin_lock_irqsave(&channel->inbound_lock, flags); if (channel->target_cpu != smp_processor_id())
channel->onchannel_callback = NULL; smp_call_function_single(channel->target_cpu, reset_channel_cb,
spin_unlock_irqrestore(&channel->inbound_lock, flags); channel, true);
else
reset_channel_cb(channel);
/* Send a closing message */ /* Send a closing message */
......
...@@ -365,7 +365,7 @@ static u32 next_vp; ...@@ -365,7 +365,7 @@ static u32 next_vp;
* performance critical channels (IDE, SCSI and Network) will be uniformly * performance critical channels (IDE, SCSI and Network) will be uniformly
* distributed across all available CPUs. * distributed across all available CPUs.
*/ */
static u32 get_vp_index(uuid_le *type_guid) static void init_vp_index(struct vmbus_channel *channel, uuid_le *type_guid)
{ {
u32 cur_cpu; u32 cur_cpu;
int i; int i;
...@@ -387,10 +387,13 @@ static u32 get_vp_index(uuid_le *type_guid) ...@@ -387,10 +387,13 @@ static u32 get_vp_index(uuid_le *type_guid)
* Also if the channel is not a performance critical * Also if the channel is not a performance critical
* channel, bind it to cpu 0. * channel, bind it to cpu 0.
*/ */
return 0; channel->target_cpu = 0;
channel->target_vp = 0;
return;
} }
cur_cpu = (++next_vp % max_cpus); cur_cpu = (++next_vp % max_cpus);
return hv_context.vp_index[cur_cpu]; channel->target_cpu = cur_cpu;
channel->target_vp = hv_context.vp_index[cur_cpu];
} }
/* /*
...@@ -438,7 +441,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) ...@@ -438,7 +441,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
offer->connection_id; offer->connection_id;
} }
newchannel->target_vp = get_vp_index(&offer->offer.if_type); init_vp_index(newchannel, &offer->offer.if_type);
memcpy(&newchannel->offermsg, offer, memcpy(&newchannel->offermsg, offer,
sizeof(struct vmbus_channel_offer_channel)); sizeof(struct vmbus_channel_offer_channel));
......
...@@ -277,7 +277,6 @@ struct vmbus_channel *relid2channel(u32 relid) ...@@ -277,7 +277,6 @@ struct vmbus_channel *relid2channel(u32 relid)
static void process_chn_event(u32 relid) static void process_chn_event(u32 relid)
{ {
struct vmbus_channel *channel; struct vmbus_channel *channel;
unsigned long flags;
void *arg; void *arg;
bool read_state; bool read_state;
u32 bytes_to_read; u32 bytes_to_read;
...@@ -296,13 +295,12 @@ static void process_chn_event(u32 relid) ...@@ -296,13 +295,12 @@ static void process_chn_event(u32 relid)
/* /*
* A channel once created is persistent even when there * A channel once created is persistent even when there
* is no driver handling the device. An unloading driver * is no driver handling the device. An unloading driver
* sets the onchannel_callback to NULL under the * sets the onchannel_callback to NULL on the same CPU
* protection of the channel inbound_lock. Thus, checking * as where this interrupt is handled (in an interrupt context).
* and invoking the driver specific callback takes care of * Thus, checking and invoking the driver specific callback takes
* orderly unloading of the driver. * care of orderly unloading of the driver.
*/ */
spin_lock_irqsave(&channel->inbound_lock, flags);
if (channel->onchannel_callback != NULL) { if (channel->onchannel_callback != NULL) {
arg = channel->channel_callback_context; arg = channel->channel_callback_context;
read_state = channel->batched_reading; read_state = channel->batched_reading;
...@@ -327,7 +325,6 @@ static void process_chn_event(u32 relid) ...@@ -327,7 +325,6 @@ static void process_chn_event(u32 relid)
pr_err("no channel callback for relid - %u\n", relid); pr_err("no channel callback for relid - %u\n", relid);
} }
spin_unlock_irqrestore(&channel->inbound_lock, flags);
} }
/* /*
......
...@@ -696,6 +696,8 @@ struct vmbus_channel { ...@@ -696,6 +696,8 @@ struct vmbus_channel {
* preserve the earlier behavior. * preserve the earlier behavior.
*/ */
u32 target_vp; u32 target_vp;
/* The corresponding CPUID in the guest */
u32 target_cpu;
/* /*
* Support for sub-channels. For high performance devices, * Support for sub-channels. For high performance devices,
* it will be useful to have multiple sub-channels to support * it will be useful to have multiple sub-channels to support
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册