提交 6e47dd3e 编写于 作者: S Stephen Hemminger 提交者: Greg Kroah-Hartman

vmbus: expose hv_begin/end_read

In order to implement NAPI in netvsc, the driver needs access to
control host interrupt mask.
Signed-off-by: NStephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: NK. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 5529eaf6
...@@ -292,10 +292,6 @@ int hv_ringbuffer_read(struct vmbus_channel *channel, ...@@ -292,10 +292,6 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info); struct hv_ring_buffer_debug_info *debug_info);
void hv_begin_read(struct hv_ring_buffer_info *rbi);
u32 hv_end_read(struct hv_ring_buffer_info *rbi);
/* /*
* Maximum channels is determined by the size of the interrupt page * Maximum channels is determined by the size of the interrupt page
* which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt * which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
......
...@@ -32,26 +32,6 @@ ...@@ -32,26 +32,6 @@
#include "hyperv_vmbus.h" #include "hyperv_vmbus.h"
void hv_begin_read(struct hv_ring_buffer_info *rbi)
{
rbi->ring_buffer->interrupt_mask = 1;
virt_mb();
}
u32 hv_end_read(struct hv_ring_buffer_info *rbi)
{
rbi->ring_buffer->interrupt_mask = 0;
virt_mb();
/*
* Now check to see if the ring buffer is still empty.
* If it is not, we raced and we need to process new
* incoming messages.
*/
return hv_get_bytes_to_read(rbi);
}
/* /*
* When we write to the ring buffer, check if the host needs to * When we write to the ring buffer, check if the host needs to
* be signaled. Here is the details of this protocol: * be signaled. Here is the details of this protocol:
......
...@@ -1512,6 +1512,36 @@ init_cached_read_index(struct vmbus_channel *channel) ...@@ -1512,6 +1512,36 @@ init_cached_read_index(struct vmbus_channel *channel)
rbi->cached_read_index = rbi->ring_buffer->read_index; rbi->cached_read_index = rbi->ring_buffer->read_index;
} }
/*
* Mask off host interrupt callback notifications
*/
static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
{
rbi->ring_buffer->interrupt_mask = 1;
/* make sure mask update is not reordered */
virt_mb();
}
/*
* Re-enable host callback and return number of outstanding bytes
*/
static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
{
rbi->ring_buffer->interrupt_mask = 0;
/* make sure mask update is not reordered */
virt_mb();
/*
* Now check to see if the ring buffer is still empty.
* If it is not, we raced and we need to process new
* incoming messages.
*/
return hv_get_bytes_to_read(rbi);
}
/* /*
* An API to support in-place processing of incoming VMBUS packets. * An API to support in-place processing of incoming VMBUS packets.
*/ */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册