提交 530a5678 编写于 作者: J Jason Wang 提交者: Michael S. Tsirkin

vdpa: support packed virtqueue for set/get_vq_state()

This patch extends the vdpa_vq_state to support packed virtqueue
state which is basically the device/driver ring wrap counters and the
avail and used index. This will be used for the virito-vdpa support
for the packed virtqueue and the future vhost/vhost-vdpa support for
the packed virtqueue.
Signed-off-by: NJason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20210602021536.39525-2-jasowang@redhat.comSigned-off-by: NMichael S. Tsirkin <mst@redhat.com>
Reviewed-by: NEli Cohen <elic@nvidia.com>
上级 72b5e895
...@@ -264,7 +264,7 @@ static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, ...@@ -264,7 +264,7 @@ static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
{ {
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
state->avail_index = ifcvf_get_vq_state(vf, qid); state->split.avail_index = ifcvf_get_vq_state(vf, qid);
return 0; return 0;
} }
...@@ -273,7 +273,7 @@ static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, ...@@ -273,7 +273,7 @@ static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
{ {
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return ifcvf_set_vq_state(vf, qid, state->avail_index); return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
} }
static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
......
...@@ -1423,8 +1423,8 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx, ...@@ -1423,8 +1423,8 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
return -EINVAL; return -EINVAL;
} }
mvq->used_idx = state->avail_index; mvq->used_idx = state->split.avail_index;
mvq->avail_idx = state->avail_index; mvq->avail_idx = state->split.avail_index;
return 0; return 0;
} }
...@@ -1445,7 +1445,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa ...@@ -1445,7 +1445,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
* Since both values should be identical, we take the value of * Since both values should be identical, we take the value of
* used_idx which is reported correctly. * used_idx which is reported correctly.
*/ */
state->avail_index = mvq->used_idx; state->split.avail_index = mvq->used_idx;
return 0; return 0;
} }
...@@ -1454,7 +1454,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa ...@@ -1454,7 +1454,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n"); mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
return err; return err;
} }
state->avail_index = attr.used_index; state->split.avail_index = attr.used_index;
return 0; return 0;
} }
......
...@@ -374,7 +374,7 @@ static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, ...@@ -374,7 +374,7 @@ static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
struct vringh *vrh = &vq->vring; struct vringh *vrh = &vq->vring;
spin_lock(&vdpasim->lock); spin_lock(&vdpasim->lock);
vrh->last_avail_idx = state->avail_index; vrh->last_avail_idx = state->split.avail_index;
spin_unlock(&vdpasim->lock); spin_unlock(&vdpasim->lock);
return 0; return 0;
...@@ -387,7 +387,7 @@ static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx, ...@@ -387,7 +387,7 @@ static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
struct vringh *vrh = &vq->vring; struct vringh *vrh = &vq->vring;
state->avail_index = vrh->last_avail_idx; state->split.avail_index = vrh->last_avail_idx;
return 0; return 0;
} }
......
...@@ -383,7 +383,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, ...@@ -383,7 +383,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
if (r) if (r)
return r; return r;
vq->last_avail_idx = vq_state.avail_index; vq->last_avail_idx = vq_state.split.avail_index;
break; break;
} }
...@@ -401,7 +401,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, ...@@ -401,7 +401,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
break; break;
case VHOST_SET_VRING_BASE: case VHOST_SET_VRING_BASE:
vq_state.avail_index = vq->last_avail_idx; vq_state.split.avail_index = vq->last_avail_idx;
if (ops->set_vq_state(vdpa, idx, &vq_state)) if (ops->set_vq_state(vdpa, idx, &vq_state))
r = -EINVAL; r = -EINVAL;
break; break;
......
...@@ -28,13 +28,34 @@ struct vdpa_notification_area { ...@@ -28,13 +28,34 @@ struct vdpa_notification_area {
}; };
/** /**
* struct vdpa_vq_state - vDPA vq_state definition * struct vdpa_vq_state_split - vDPA split virtqueue state
* @avail_index: available index * @avail_index: available index
*/ */
struct vdpa_vq_state { struct vdpa_vq_state_split {
u16 avail_index; u16 avail_index;
}; };
/**
* struct vdpa_vq_state_packed - vDPA packed virtqueue state
* @last_avail_counter: last driver ring wrap counter observed by device
* @last_avail_idx: device available index
* @last_used_counter: device ring wrap counter
* @last_used_idx: used index
*/
struct vdpa_vq_state_packed {
u16 last_avail_counter:1;
u16 last_avail_idx:15;
u16 last_used_counter:1;
u16 last_used_idx:15;
};
struct vdpa_vq_state {
union {
struct vdpa_vq_state_split split;
struct vdpa_vq_state_packed packed;
};
};
struct vdpa_mgmt_dev; struct vdpa_mgmt_dev;
/** /**
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册