diff options
author | Dragos Tatulea <dtatulea@nvidia.com> | 2024-06-26 13:26:57 +0300 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2024-07-09 08:42:49 -0400 |
commit | ffb1aae43ed5078598318042bac1ae1057e57e2d (patch) | |
tree | c200f2c2412ce5436b77875f1e88b45e2b57464d /drivers/vdpa | |
parent | 3b3adb3bbfce275400b21f7a0d54db0ba0a46cc9 (diff) |
vdpa/mlx5: Pre-create hardware VQs at vdpa .dev_add time
Currently, hardware VQs are created right when the vdpa device gets into
DRIVER_OK state. That is easier because most of the VQ state is known by
then.
This patch switches to creating all VQs and their associated resources
at device creation time. The motivation is to reduce the vdpa device
live migration downtime by moving the expensive operation of creating
all the hardware VQs and their associated resources out of downtime on
the destination VM.
The VQs are now created in a blank state. The VQ configuration will
happen later, on DRIVER_OK. Then the configuration will be applied when
the VQs are moved to the Ready state.
When .set_vq_ready() is called on a VQ before DRIVER_OK, special care is
needed: now that the VQ is already created a resume_vq() will be
triggered too early when no mr has been configured yet. Skip calling
resume_vq() in this case, let it be handled during DRIVER_OK.
For virtio-vdpa, the device configuration is done earlier during
.vdpa_dev_add() by vdpa_register_device(). Avoid calling
setup_vq_resources() a second time in that case.
On a 64 CPU, 256 GB VM with 1 vDPA device of 16 VQps, the full VQ
resource creation + resume time was ~370ms. Now it's down to 60 ms
(only VQ config and resume). The measurements were done on a ConnectX6DX
based vDPA device.
Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
Message-Id: <20240626-stage-vdpa-vq-precreate-v2-21-560c491078df@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vdpa')
-rw-r--r-- | drivers/vdpa/mlx5/net/mlx5_vnet.c | 37 |
1 files changed, 32 insertions, 5 deletions
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 324604b16b91..8ef703f4c23d 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -2444,7 +2444,7 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready mvq = &ndev->vqs[idx]; if (!ready) { suspend_vq(ndev, mvq); - } else { + } else if (mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) { if (resume_vq(ndev, mvq)) ready = false; } @@ -3078,10 +3078,18 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) goto err_setup; } register_link_notifier(ndev); - err = setup_vq_resources(ndev, true); - if (err) { - mlx5_vdpa_warn(mvdev, "failed to setup driver\n"); - goto err_driver; + if (ndev->setup) { + err = resume_vqs(ndev); + if (err) { + mlx5_vdpa_warn(mvdev, "failed to resume VQs\n"); + goto err_driver; + } + } else { + err = setup_vq_resources(ndev, true); + if (err) { + mlx5_vdpa_warn(mvdev, "failed to setup driver\n"); + goto err_driver; + } } } else { mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n"); @@ -3142,6 +3150,7 @@ static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags) if (mlx5_vdpa_create_dma_mr(mvdev)) mlx5_vdpa_warn(mvdev, "create MR failed\n"); } + setup_vq_resources(ndev, false); up_write(&ndev->reslock); return 0; @@ -3835,8 +3844,21 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, goto err_reg; mgtdev->ndev = ndev; + + /* For virtio-vdpa, the device was set up during device register. */ + if (ndev->setup) + return 0; + + down_write(&ndev->reslock); + err = setup_vq_resources(ndev, false); + up_write(&ndev->reslock); + if (err) + goto err_setup_vq_res; + return 0; +err_setup_vq_res: + _vdpa_unregister_device(&mvdev->vdev); err_reg: destroy_workqueue(mvdev->wq); err_res2: @@ -3862,6 +3884,11 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device * unregister_link_notifier(ndev); _vdpa_unregister_device(dev); + + down_write(&ndev->reslock); + teardown_vq_resources(ndev); + up_write(&ndev->reslock); + wq = mvdev->wq; mvdev->wq = NULL; destroy_workqueue(wq); |