summaryrefslogtreecommitdiff
path: root/drivers/virtio
diff options
context:
space:
mode:
authorXuan Zhuo <xuanzhuo@linux.alibaba.com>2023-08-10 20:30:56 +0800
committerMichael S. Tsirkin <mst@redhat.com>2023-09-03 18:10:23 -0400
commit8bd2f71054bd0bc997833e9825143672eb7e2801 (patch)
treed311672bfc9f0faad96a12cbf835a874dadefadd /drivers/virtio
parentb6253b4e21939f1bb54e8fdb84c23af9c3fb834a (diff)
virtio_ring: introduce dma sync api for virtqueue
These API has been introduced: * virtqueue_dma_need_sync * virtqueue_dma_sync_single_range_for_cpu * virtqueue_dma_sync_single_range_for_device These APIs can be used together with the premapped mechanism to sync the DMA address. Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Message-Id: <20230810123057.43407-12-xuanzhuo@linux.alibaba.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/virtio_ring.c76
1 files changed, 76 insertions, 0 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 916479c9c72c..81ecb29c88f1 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -3175,4 +3175,80 @@ int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
}
EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error);
+/**
+ * virtqueue_dma_need_sync - check a dma address needs sync
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ *
+ * Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be
+ * synchronized
+ *
+ * return bool
+ */
+bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (!vq->use_dma_api)
+ return false;
+
+ return dma_need_sync(vring_dma_dev(vq), addr);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_need_sync);
+
+/**
+ * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ * @offset: DMA address offset
+ * @size: buf size for sync
+ * @dir: DMA direction
+ *
+ * Before calling this function, use virtqueue_dma_need_sync() to confirm that
+ * the DMA address really needs to be synchronized
+ *
+ */
+void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq,
+ dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct device *dev = vring_dma_dev(vq);
+
+ if (!vq->use_dma_api)
+ return;
+
+ dma_sync_single_range_for_cpu(dev, addr, offset, size,
+ DMA_BIDIRECTIONAL);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu);
+
+/**
+ * virtqueue_dma_sync_single_range_for_device - dma sync for device
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ * @offset: DMA address offset
+ * @size: buf size for sync
+ * @dir: DMA direction
+ *
+ * Before calling this function, use virtqueue_dma_need_sync() to confirm that
+ * the DMA address really needs to be synchronized
+ */
+void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
+ dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct device *dev = vring_dma_dev(vq);
+
+ if (!vq->use_dma_api)
+ return;
+
+ dma_sync_single_range_for_device(dev, addr, offset, size,
+ DMA_BIDIRECTIONAL);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device);
+
MODULE_LICENSE("GPL");