summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2019-01-28 14:50:56 -0500
committerJérôme Glisse <jglisse@redhat.com>2019-01-29 10:48:51 -0500
commita1dd88d530d101edd8a38adc8924ee222b73ae95 (patch)
tree96d6d28057984d2bf21dc02fc402be253f27a5ae
parent92a757b8b35a9eafc8bbb09709d0c020b0efb355 (diff)
NOUVEAU HACK FOR P2P AND IOMMUwip-hmm-p2p
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c62
1 files changed, 55 insertions, 7 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 33e75b9f1a94..776a57efa4ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -1071,20 +1071,27 @@ static long nouveau_dmem_p2p_map(struct hmm_devmem *devmem,
struct drm_device *drm_dev = dev_get_drvdata(devmem->device);
const unsigned long page_size = 1UL << range->page_shift;
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- unsigned long i;
+ unsigned long i, mapped;
uint64_t *pfns;
+ long ret;
+#if 0
if (!device_test_p2p(device, devmem->device))
return 0;
+#endif
pfns = &range->pfns[(addr - range->start) >> range->page_shift];
for (i = 0; addr < range->end; i++, addr += page_size) {
+ enum dma_data_direction dir = DMA_FROM_DEVICE;
struct nouveau_dmem_chunk *chunk;
struct page *page;
+ phys_addr_t pa;
/* Check if range is being invalidated */
- if (!range->valid)
- return 0;
+ if (!range->valid) {
+ ret = -EBUSY;
+ goto unmap;
+ }
page = hmm_pfn_to_page(range, pfns[i]);
if (page == NULL)
@@ -1097,10 +1104,50 @@ static long nouveau_dmem_p2p_map(struct hmm_devmem *devmem,
if (!chunk->p2p)
return i;
+ /* If it is read and write than map bi-directional. */
+ if (range->pfns[i] & range->values[HMM_PFN_WRITE])
+ dir = DMA_BIDIRECTIONAL;
+
+#if 0
pas[i] = nouveau_dmem_page_to_bar_pa(chunk, drm, page);
+#else
+ pa = nouveau_dmem_page_to_bar_pa(chunk, drm, page);
+ pas[i] = dma_map_resource(device, pa, PAGE_SIZE, dir, 0);
+ if (dma_mapping_error(device, pas[i])) {
+ ret = -EFAULT;
+ goto unmap;
+ }
+#endif
}
return i;
+
+unmap:
+ mapped = i;
+ addr = range->start;
+ for (i = 0; i < mapped; i++, addr += page_size) {
+ enum dma_data_direction dir = DMA_FROM_DEVICE;
+ struct page *page;
+
+ page = hmm_pfn_to_page(range, pfns[i]);
+ if (page == NULL)
+ continue;
+
+ if (!nouveau_dmem_page(drm, page))
+ return i;
+
+ /* If it is read and write than map bi-directional. */
+ if (range->pfns[i] & range->values[HMM_PFN_WRITE])
+ dir = DMA_BIDIRECTIONAL;
+
+#if 0
+ pas[i] = nouveau_dmem_page_to_bar_pa(chunk, drm, page);
+#else
+ dma_unmap_resource(device, pas[i], PAGE_SIZE, dir, 0);
+#endif
+ }
+
+ return ret;
}
static unsigned long nouveau_dmem_p2p_unmap(struct hmm_devmem *devmem,
@@ -1117,12 +1164,9 @@ static unsigned long nouveau_dmem_p2p_unmap(struct hmm_devmem *devmem,
pfns = &range->pfns[(addr - range->start) >> range->page_shift];
for (i = 0; addr < range->end; i++, addr += page_size) {
+ enum dma_data_direction dir = DMA_FROM_DEVICE;
struct page *page;
- /* Check if range is being invalidated */
- if (!range->valid)
- return 0;
-
page = hmm_pfn_to_page(range, pfns[i]);
if (page == NULL)
continue;
@@ -1130,6 +1174,10 @@ static unsigned long nouveau_dmem_p2p_unmap(struct hmm_devmem *devmem,
if (!nouveau_dmem_page(drm, page))
return i;
/* Nothing to do as we do not support IOMMU yet. */
+ /* If it is read and write than map bi-directional. */
+ if (range->pfns[i] & range->values[HMM_PFN_WRITE])
+ dir = DMA_BIDIRECTIONAL;
+ dma_unmap_resource(device, pas[i], PAGE_SIZE, dir, 0);
}
return i;