提交 b7502279 编写于 作者: E Eli Cohen 提交者: Pengyuan Zhao

net/vdpa: Use readers/writers semaphore instead of cf_mutex

mainline inclusion
from mainline-v5.19-rc1
commit a6a51adc
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5WXCZ
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=a6a51adc6e8aafebfe0c4beb80e99694ea562b40

----------------------------------------------------------------------

Replace cf_mutex with rw_semaphore to reflect the fact that some calls
could be called concurrently but can suffice with read lock.
Suggested-by: NSi-Wei Liu <si-wei.liu@oracle.com>
Signed-off-by: NEli Cohen <elic@nvidia.com>
Message-Id: <20220518133804.1075129-5-elic@nvidia.com>
Signed-off-by: NMichael S. Tsirkin <mst@redhat.com>
Signed-off-by: NPengyuan Zhao <zhaopengyuan@hisilicon.com>
上级 a48d7bea
...@@ -23,9 +23,9 @@ static DEFINE_IDA(vdpa_index_ida); ...@@ -23,9 +23,9 @@ static DEFINE_IDA(vdpa_index_ida);
void vdpa_set_status(struct vdpa_device *vdev, u8 status) void vdpa_set_status(struct vdpa_device *vdev, u8 status)
{ {
mutex_lock(&vdev->cf_mutex); down_write(&vdev->cf_lock);
vdev->config->set_status(vdev, status); vdev->config->set_status(vdev, status);
mutex_unlock(&vdev->cf_mutex); up_write(&vdev->cf_lock);
} }
EXPORT_SYMBOL(vdpa_set_status); EXPORT_SYMBOL(vdpa_set_status);
...@@ -151,7 +151,6 @@ static void vdpa_release_dev(struct device *d) ...@@ -151,7 +151,6 @@ static void vdpa_release_dev(struct device *d)
ops->free(vdev); ops->free(vdev);
ida_simple_remove(&vdpa_index_ida, vdev->index); ida_simple_remove(&vdpa_index_ida, vdev->index);
mutex_destroy(&vdev->cf_mutex);
kfree(vdev->driver_override); kfree(vdev->driver_override);
kfree(vdev); kfree(vdev);
} }
...@@ -214,7 +213,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, ...@@ -214,7 +213,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
if (err) if (err)
goto err_name; goto err_name;
mutex_init(&vdev->cf_mutex); init_rwsem(&vdev->cf_lock);
device_initialize(&vdev->dev); device_initialize(&vdev->dev);
return vdev; return vdev;
...@@ -410,9 +409,9 @@ static void vdpa_get_config_unlocked(struct vdpa_device *vdev, ...@@ -410,9 +409,9 @@ static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len) void *buf, unsigned int len)
{ {
mutex_lock(&vdev->cf_mutex); down_read(&vdev->cf_lock);
vdpa_get_config_unlocked(vdev, offset, buf, len); vdpa_get_config_unlocked(vdev, offset, buf, len);
mutex_unlock(&vdev->cf_mutex); up_read(&vdev->cf_lock);
} }
EXPORT_SYMBOL_GPL(vdpa_get_config); EXPORT_SYMBOL_GPL(vdpa_get_config);
...@@ -426,9 +425,9 @@ EXPORT_SYMBOL_GPL(vdpa_get_config); ...@@ -426,9 +425,9 @@ EXPORT_SYMBOL_GPL(vdpa_get_config);
void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset, void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
const void *buf, unsigned int length) const void *buf, unsigned int length)
{ {
mutex_lock(&vdev->cf_mutex); down_write(&vdev->cf_lock);
vdev->config->set_config(vdev, offset, buf, length); vdev->config->set_config(vdev, offset, buf, length);
mutex_unlock(&vdev->cf_mutex); up_write(&vdev->cf_lock);
} }
EXPORT_SYMBOL_GPL(vdpa_set_config); EXPORT_SYMBOL_GPL(vdpa_set_config);
...@@ -869,7 +868,7 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, ...@@ -869,7 +868,7 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
u8 status; u8 status;
int err; int err;
mutex_lock(&vdev->cf_mutex); down_read(&vdev->cf_lock);
status = vdev->config->get_status(vdev); status = vdev->config->get_status(vdev);
if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) { if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed"); NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
...@@ -906,14 +905,14 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, ...@@ -906,14 +905,14 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
if (err) if (err)
goto msg_err; goto msg_err;
mutex_unlock(&vdev->cf_mutex); up_read(&vdev->cf_lock);
genlmsg_end(msg, hdr); genlmsg_end(msg, hdr);
return 0; return 0;
msg_err: msg_err:
genlmsg_cancel(msg, hdr); genlmsg_cancel(msg, hdr);
out: out:
mutex_unlock(&vdev->cf_mutex); up_read(&vdev->cf_lock);
return err; return err;
} }
...@@ -957,7 +956,7 @@ static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg, ...@@ -957,7 +956,7 @@ static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
{ {
int err; int err;
mutex_lock(&vdev->cf_mutex); down_read(&vdev->cf_lock);
if (!vdev->config->get_vendor_vq_stats) { if (!vdev->config->get_vendor_vq_stats) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto out; goto out;
...@@ -965,7 +964,7 @@ static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg, ...@@ -965,7 +964,7 @@ static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
err = vdpa_fill_stats_rec(vdev, msg, info, index); err = vdpa_fill_stats_rec(vdev, msg, info, index);
out: out:
mutex_unlock(&vdev->cf_mutex); up_read(&vdev->cf_lock);
return err; return err;
} }
......
...@@ -66,7 +66,7 @@ struct vdpa_mgmt_dev; ...@@ -66,7 +66,7 @@ struct vdpa_mgmt_dev;
* @dma_dev: the actual device that is performing DMA * @dma_dev: the actual device that is performing DMA
* @driver_override: driver name to force a match * @driver_override: driver name to force a match
* @config: the configuration ops for this device. * @config: the configuration ops for this device.
* @cf_mutex: Protects get and set access to configuration layout. * @cf_lock: Protects get and set access to configuration layout.
* @index: device index * @index: device index
* @features_valid: were features initialized? for legacy guests * @features_valid: were features initialized? for legacy guests
* @use_va: indicate whether virtual address must be used by this device * @use_va: indicate whether virtual address must be used by this device
...@@ -79,7 +79,7 @@ struct vdpa_device { ...@@ -79,7 +79,7 @@ struct vdpa_device {
struct device *dma_dev; struct device *dma_dev;
const char *driver_override; const char *driver_override;
const struct vdpa_config_ops *config; const struct vdpa_config_ops *config;
struct mutex cf_mutex; /* Protects get/set config */ struct rw_semaphore cf_lock; /* Protects get/set config */
unsigned int index; unsigned int index;
bool features_valid; bool features_valid;
bool use_va; bool use_va;
...@@ -398,10 +398,10 @@ static inline int vdpa_reset(struct vdpa_device *vdev) ...@@ -398,10 +398,10 @@ static inline int vdpa_reset(struct vdpa_device *vdev)
const struct vdpa_config_ops *ops = vdev->config; const struct vdpa_config_ops *ops = vdev->config;
int ret; int ret;
mutex_lock(&vdev->cf_mutex); down_write(&vdev->cf_lock);
vdev->features_valid = false; vdev->features_valid = false;
ret = ops->reset(vdev); ret = ops->reset(vdev);
mutex_unlock(&vdev->cf_mutex); up_write(&vdev->cf_lock);
return ret; return ret;
} }
...@@ -420,9 +420,9 @@ static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features) ...@@ -420,9 +420,9 @@ static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
{ {
int ret; int ret;
mutex_lock(&vdev->cf_mutex); down_write(&vdev->cf_lock);
ret = vdpa_set_features_unlocked(vdev, features); ret = vdpa_set_features_unlocked(vdev, features);
mutex_unlock(&vdev->cf_mutex); up_write(&vdev->cf_lock);
return ret; return ret;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册