aboutsummaryrefslogtreecommitdiff
path: root/drivers/virtio/virtio_pci_modern.c
diff options
context:
space:
mode:
authorJiri Pirko <jiri@nvidia.com>2024-07-16 13:35:51 +0200
committerMichael S. Tsirkin <mst@redhat.com>2024-07-17 05:43:21 -0400
commit4c3b54af907e709609d3d8beca92d65e2f0cfd83 (patch)
tree65bb3e6643a56a0d09c41abe27adbbd568ab1afe /drivers/virtio/virtio_pci_modern.c
parent7090f2b5ad33e9b6ba68eb927b02e8a286d21fb4 (diff)
virtio_pci_modern: use completion instead of busy loop to wait on admin cmd result
Currently, the code waits in a busy loop on every admin virtqueue issued command to get a reply. That prevents callers from issuing multiple commands in parallel. To overcome this limitation, introduce a virtqueue event callback for admin virtqueue. For every issued command, use completion mechanism to wait on a reply. In the event callback, trigger the completion is done for every incoming reply. Alongside with that, introduce a spin lock to protect the admin virtqueue operations. Signed-off-by: Jiri Pirko <jiri@nvidia.com> Message-Id: <20240716113552.80599-13-jiri@resnulli.us> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/virtio/virtio_pci_modern.c')
-rw-r--r--drivers/virtio/virtio_pci_modern.c74
1 files changed, 63 insertions, 11 deletions
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 0fd344d1eaf9..608df3263df1 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -53,6 +53,23 @@ static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
return index == vp_dev->admin_vq.vq_index;
}
+void vp_modern_avq_done(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+ struct virtio_admin_cmd *cmd;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&admin_vq->lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((cmd = virtqueue_get_buf(vq, &len)))
+ complete(&cmd->completion);
+ } while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+}
+
static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
u16 opcode,
struct scatterlist **sgs,
@@ -61,7 +78,8 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
struct virtio_admin_cmd *cmd)
{
struct virtqueue *vq;
- int ret, len;
+ unsigned long flags;
+ int ret;
vq = admin_vq->info->vq;
if (!vq)
@@ -72,21 +90,33 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
!((1ULL << opcode) & admin_vq->supported_cmds))
return -EOPNOTSUPP;
- ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
- if (ret < 0)
- return -EIO;
+ init_completion(&cmd->completion);
- if (unlikely(!virtqueue_kick(vq)))
+again:
+ if (virtqueue_is_broken(vq))
return -EIO;
- while (!virtqueue_get_buf(vq, &len) &&
- !virtqueue_is_broken(vq))
- cpu_relax();
+ spin_lock_irqsave(&admin_vq->lock, flags);
+ ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
+ if (ret < 0) {
+ if (ret == -ENOSPC) {
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+ cpu_relax();
+ goto again;
+ }
+ goto unlock_err;
+ }
+ if (!virtqueue_kick(vq))
+ goto unlock_err;
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
- if (virtqueue_is_broken(vq))
- return -EIO;
+ wait_for_completion(&cmd->completion);
- return 0;
+ return cmd->ret;
+
+unlock_err:
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+ return -EIO;
}
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
@@ -209,6 +239,25 @@ static void vp_modern_avq_activate(struct virtio_device *vdev)
virtio_pci_admin_cmd_list_init(vdev);
}
+static void vp_modern_avq_cleanup(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_admin_cmd *cmd;
+ struct virtqueue *vq;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return;
+
+ vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq;
+ if (!vq)
+ return;
+
+ while ((cmd = virtqueue_detach_unused_buf(vq))) {
+ cmd->ret = -EIO;
+ complete(&cmd->completion);
+ }
+}
+
static void vp_transport_features(struct virtio_device *vdev, u64 features)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -403,6 +452,8 @@ static void vp_reset(struct virtio_device *vdev)
while (vp_modern_get_status(mdev))
msleep(1);
+ vp_modern_avq_cleanup(vdev);
+
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
}
@@ -785,6 +836,7 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
vp_dev->isr = mdev->isr;
vp_dev->vdev.id = mdev->id;
+ spin_lock_init(&vp_dev->admin_vq.lock);
mutex_init(&vp_dev->admin_vq.cmd_lock);
return 0;
}