summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c976
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.h39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
5 files changed, 1023 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 026cdec27b23..a826a4df440d 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -31,6 +31,7 @@ nouveau-y += nouveau_vga.o
nouveau-y += nouveau_bo.o
nouveau-y += nouveau_gem.o
nouveau-y += nouveau_svm.o
+nouveau-y += nouveau_dmem.o
nouveau-y += nouveau_mem.o
nouveau-y += nouveau_prime.o
nouveau-y += nouveau_sgdma.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
new file mode 100644
index 000000000000..db7ca343b236
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -0,0 +1,976 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nouveau_dmem.h"
+#include "nouveau_drv.h"
+#include "nouveau_chan.h"
+#include "nouveau_dma.h"
+#include "nouveau_mem.h"
+#include "nouveau_bo.h"
+
+#include <nvif/class.h>
+#include <nvif/object.h>
+#include <nvif/if500b.h>
+#include <nvif/if900b.h>
+
+#include <linux/sched/mm.h>
+#include <linux/hmm.h>
+
+/*
+ * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
+ * it in vram while in use. We likely want to overhaul memory management for
+ * nouveau to be more page like (not necessarily with system page size but a
+ * bigger page size) at lowest level and have some shim layer on top that would
+ * provide the same functionality as TTM.
+ */
+#define DMEM_CHUNK_SIZE (2UL << 20)
+#define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
+
+struct nouveau_migrate;
+
+typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
+ u64 dst_addr, u64 src_addr);
+
+struct nouveau_dmem_chunk {
+ struct list_head list;
+ struct nouveau_bo *bo;
+ struct nouveau_drm *drm;
+ unsigned long pfn_first;
+ unsigned long callocated;
+ unsigned long bitmap[BITS_TO_LONGS(DMEM_CHUNK_NPAGES)];
+ struct nvif_vma vma;
+};
+
+struct nouveau_dmem_migrate {
+ nouveau_migrate_copy_t copy_func;
+ struct nouveau_channel *chan;
+ struct nvif_object copy;
+};
+
+struct nouveau_dmem {
+ struct hmm_devmem *devmem;
+ struct nouveau_dmem_migrate migrate;
+ struct list_head chunk_free;
+ struct list_head chunk_full;
+ struct list_head chunk_empty;
+ struct mutex mutex;
+};
+
+struct nouveau_migrate_hmem {
+ struct scatterlist *sg;
+ struct nouveau_mem mem;
+ unsigned long npages;
+ struct nvif_vma vma;
+};
+
+struct nouveau_dmem_fault {
+ struct nouveau_drm *drm;
+ struct nouveau_fence *fence;
+ struct nouveau_migrate_hmem hmem;
+};
+
+struct nouveau_migrate {
+ struct vm_area_struct *vma;
+ struct nouveau_drm *drm;
+ struct nouveau_fence *fence;
+ unsigned long npages;
+ struct nouveau_migrate_hmem hmem;
+};
+
+static void
+nouveau_migrate_hmem_fini(struct nouveau_drm *drm,
+ struct nouveau_migrate_hmem *hmem)
+{
+ struct nvif_vmm *vmm = &drm->client.vmm.vmm;
+
+ nouveau_mem_fini(&hmem->mem);
+ nvif_vmm_put(vmm, &hmem->vma);
+
+ if (hmem->sg) {
+ dma_unmap_sg_attrs(drm->dev->dev, hmem->sg,
+ hmem->npages, DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ kfree(hmem->sg);
+ hmem->sg = NULL;
+ }
+}
+
+static int
+nouveau_migrate_hmem_init(struct nouveau_drm *drm,
+ struct nouveau_migrate_hmem *hmem,
+ unsigned long npages,
+ const unsigned long *pfns)
+{
+ struct nvif_vmm *vmm = &drm->client.vmm.vmm;
+ unsigned long i;
+ int ret;
+
+ hmem->sg = kzalloc(npages * sizeof(*hmem->sg), GFP_KERNEL);
+ if (hmem->sg == NULL)
+ return -ENOMEM;
+
+ for (i = 0, hmem->npages = 0; hmem->npages < npages; ++i) {
+ struct page *page;
+
+ if (!pfns[i] || pfns[i] == MIGRATE_PFN_ERROR)
+ continue;
+
+ page = migrate_pfn_to_page(pfns[i]);
+ if (page == NULL) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ sg_set_page(&hmem->sg[hmem->npages], page, PAGE_SIZE, 0);
+ hmem->npages++;
+ }
+ sg_mark_end(&hmem->sg[hmem->npages - 1]);
+
+ i = dma_map_sg_attrs(drm->dev->dev, hmem->sg, hmem->npages,
+ DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
+ if (i != hmem->npages) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = nouveau_mem_sgl(&hmem->mem, &drm->client,
+ hmem->npages, hmem->sg);
+ if (ret)
+ goto error;
+
+ ret = nvif_vmm_get(vmm, LAZY, false, hmem->mem.mem.page,
+ 0, hmem->mem.mem.size, &hmem->vma);
+ if (ret)
+ goto error;
+
+ ret = nouveau_mem_map(&hmem->mem, vmm, &hmem->vma);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ nouveau_migrate_hmem_fini(drm, hmem);
+ return ret;
+}
+
+
+static void
+nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
+{
+ struct nouveau_dmem_chunk *chunk;
+ struct nouveau_drm *drm;
+ unsigned long idx;
+
+ chunk = (void *)hmm_devmem_page_get_drvdata(page);
+ idx = page_to_pfn(page) - chunk->pfn_first;
+ drm = chunk->drm;
+
+ /*
+ * FIXME:
+ *
+ * This is really a bad example, we need to overhaul nouveau memory
+ * management to be more page focus and allow lighter locking scheme
+ * to be use in the process.
+ */
+ mutex_lock(&drm->dmem->mutex);
+ clear_bit(idx, chunk->bitmap);
+ WARN_ON(!chunk->callocated);
+ chunk->callocated--;
+ mutex_unlock(&drm->dmem->mutex);
+}
+
+static void
+nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
+ const unsigned long *src_pfns,
+ unsigned long *dst_pfns,
+ unsigned long start,
+ unsigned long end,
+ void *private)
+{
+ struct nouveau_dmem_fault *fault = private;
+ struct nouveau_drm *drm = fault->drm;
+ unsigned long addr, i, c, npages = 0;
+ nouveau_migrate_copy_t copy;
+ int ret;
+
+
+ /* First allocate new memory */
+ for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
+ struct page *dpage, *spage;
+
+ dst_pfns[i] = 0;
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ dpage = hmm_vma_alloc_locked_page(vma, addr);
+ if (!dpage) {
+ dst_pfns[i] = MIGRATE_PFN_ERROR;
+ continue;
+ }
+
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)) |
+ MIGRATE_PFN_LOCKED;
+ npages++;
+ }
+
+ /* Create scatter list FIXME: get rid of scatter list */
+ ret = nouveau_migrate_hmem_init(drm, &fault->hmem, npages, dst_pfns);
+ if (ret)
+ goto error;
+
+ /* Copy things over */
+ copy = drm->dmem->migrate.copy_func;
+ for (addr = start, i = c = 0; addr < end; addr += PAGE_SIZE, i++) {
+ struct nouveau_dmem_chunk *chunk;
+ struct page *spage, *dpage;
+ u64 src_addr, dst_addr;
+
+ dpage = migrate_pfn_to_page(dst_pfns[i]);
+ if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
+ continue;
+
+ dst_addr = fault->hmem.vma.addr + (c << PAGE_SHIFT);
+ c++;
+
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) {
+ dst_pfns[i] = MIGRATE_PFN_ERROR;
+ __free_page(dpage);
+ continue;
+ }
+
+ chunk = (void *)hmm_devmem_page_get_drvdata(spage);
+ src_addr = page_to_pfn(spage) - chunk->pfn_first;
+ src_addr = (src_addr << PAGE_SHIFT) + chunk->vma.addr;
+
+ ret = copy(drm, 1, dst_addr, src_addr);
+ if (ret) {
+ dst_pfns[i] = MIGRATE_PFN_ERROR;
+ __free_page(dpage);
+ continue;
+ }
+ }
+
+ nouveau_fence_new(drm->dmem->migrate.chan, false, &fault->fence);
+
+ return;
+
+error:
+ for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, ++i) {
+ struct page *page;
+
+ if (!dst_pfns[i] || dst_pfns[i] == MIGRATE_PFN_ERROR)
+ continue;
+
+ page = migrate_pfn_to_page(dst_pfns[i]);
+ dst_pfns[i] = MIGRATE_PFN_ERROR;
+ if (page == NULL)
+ continue;
+
+ __free_page(page);
+ }
+}
+
+void nouveau_dmem_fault_finalize_and_map(struct vm_area_struct *vma,
+ const unsigned long *src_pfns,
+ const unsigned long *dst_pfns,
+ unsigned long start,
+ unsigned long end,
+ void *private)
+{
+ struct nouveau_dmem_fault *fault = private;
+ struct nouveau_drm *drm = fault->drm;
+
+ if (fault->fence) {
+ nouveau_fence_wait(fault->fence, true, false);
+ nouveau_fence_unref(&fault->fence);
+ } else {
+ /*
+ * FIXME wait for channel to be IDLE before calling finalizing
+ * the hmem object below (nouveau_migrate_hmem_fini()).
+ */
+ }
+ nouveau_migrate_hmem_fini(drm, &fault->hmem);
+}
+
+static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
+ .alloc_and_copy = nouveau_dmem_fault_alloc_and_copy,
+ .finalize_and_map = nouveau_dmem_fault_finalize_and_map,
+};
+
+static int
+nouveau_dmem_fault(struct hmm_devmem *devmem,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ const struct page *page,
+ unsigned int flags,
+ pmd_t *pmdp)
+{
+ unsigned long src[1] = {0}, dst[1] = {0};
+ struct nouveau_dmem_fault fault = {0};
+ int ret;
+
+ /*
+ * FIXME what we really want is to find some heuristic to migrate more
+ * than just one page on CPU fault. When such fault happens it is very
+ * likely that more surrounding page will CPU fault too.
+ */
+ fault.drm = devmem->private;
+ ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vma, addr,
+ addr + PAGE_SIZE, src, dst, &fault);
+ if (ret)
+ return VM_FAULT_SIGBUS;
+
+ if (dst[0] == MIGRATE_PFN_ERROR)
+ return VM_FAULT_SIGBUS;
+
+ return 0;
+}
+
+static const struct hmm_devmem_ops
+nouveau_dmem_devmem_ops = {
+ .free = nouveau_dmem_free,
+ .fault = nouveau_dmem_fault,
+};
+
+static int
+nouveau_dmem_chunk_alloc(struct nouveau_drm *drm)
+{
+ struct nvif_vmm *vmm = &drm->client.vmm.vmm;
+ struct nouveau_dmem_chunk *chunk;
+ int ret;
+
+ if (drm->dmem == NULL)
+ return -EINVAL;
+
+ mutex_lock(&drm->dmem->mutex);
+ chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
+ struct nouveau_dmem_chunk,
+ list);
+ if (chunk == NULL) {
+ mutex_unlock(&drm->dmem->mutex);
+ return -ENOMEM;
+ }
+
+ list_del(&chunk->list);
+ mutex_unlock(&drm->dmem->mutex);
+
+ ret = nvif_vmm_get(vmm, LAZY, false, 12, 0,
+ DMEM_CHUNK_SIZE, &chunk->vma);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
+ TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
+ &chunk->bo);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ if (ret) {
+ nouveau_bo_ref(NULL, &chunk->bo);
+ goto out;
+ }
+
+ ret = nouveau_mem_map(nouveau_mem(&chunk->bo->bo.mem), vmm, &chunk->vma);
+ if (ret) {
+ nouveau_bo_unpin(chunk->bo);
+ nouveau_bo_ref(NULL, &chunk->bo);
+ goto out;
+ }
+
+ bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES);
+
+out:
+ mutex_lock(&drm->dmem->mutex);
+ if (chunk->bo)
+ list_add(&chunk->list, &drm->dmem->chunk_empty);
+ else
+ list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
+ mutex_unlock(&drm->dmem->mutex);
+
+ return ret;
+}
+
+static struct nouveau_dmem_chunk *
+nouveau_dmem_chunk_first_free_locked(struct nouveau_drm *drm)
+{
+ struct nouveau_dmem_chunk *chunk;
+
+ chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
+ struct nouveau_dmem_chunk,
+ list);
+ if (chunk)
+ return chunk;
+
+ chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
+ struct nouveau_dmem_chunk,
+ list);
+ if (chunk->bo)
+ return chunk;
+
+ return NULL;
+}
+
+static int
+nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
+ unsigned long npages,
+ unsigned long *pages)
+{
+ struct nouveau_dmem_chunk *chunk;
+ unsigned long c;
+ int ret;
+
+ memset(pages, 0xff, npages * sizeof(*pages));
+
+ mutex_lock(&drm->dmem->mutex);
+ for (c = 0; c < npages;) {
+ unsigned long i;
+
+ chunk = nouveau_dmem_chunk_first_free_locked(drm);
+ if (chunk == NULL) {
+ mutex_unlock(&drm->dmem->mutex);
+ ret = nouveau_dmem_chunk_alloc(drm);
+ if (ret) {
+ if (c)
+ break;
+ return ret;
+ }
+ continue;
+ }
+
+ i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES);
+ while (i < DMEM_CHUNK_NPAGES && c < npages) {
+ pages[c] = chunk->pfn_first + i;
+ set_bit(i, chunk->bitmap);
+ chunk->callocated++;
+ c++;
+
+ i = find_next_zero_bit(chunk->bitmap,
+ DMEM_CHUNK_NPAGES, i);
+ }
+ }
+ mutex_unlock(&drm->dmem->mutex);
+
+ return 0;
+}
+
+static struct page *
+nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
+{
+ unsigned long pfns[1];
+ struct page *page;
+ int ret;
+
+ /* FIXME stop all the miss-match API ... */
+ ret = nouveau_dmem_pages_alloc(drm, 1, pfns);
+ if (ret)
+ return NULL;
+
+ page = pfn_to_page(pfns[0]);
+ get_page(page);
+ lock_page(page);
+ return page;
+}
+
+static void
+nouveau_dmem_pages_free(struct nouveau_drm *drm,
+ unsigned long npages,
+ unsigned long *pages)
+{
+ unsigned long i;
+
+ mutex_lock(&drm->dmem->mutex);
+ for (i = 0; i < npages;) {
+ struct nouveau_dmem_chunk *chunk;
+ unsigned long idx;
+ struct page *page;
+
+ if (pages[i] == -1UL)
+ continue;
+
+ page = pfn_to_page(pages[i]);
+ chunk = (void *)hmm_devmem_page_get_drvdata(page);
+
+ idx = pages[i] - chunk->pfn_first;
+ clear_bit(idx, chunk->bitmap);
+ WARN_ON(!chunk->callocated);
+ chunk->callocated--;
+ pages[i] = -1UL;
+ }
+ mutex_unlock(&drm->dmem->mutex);
+}
+
+static void
+nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
+{
+ unlock_page(page);
+ put_page(page);
+}
+
+void
+nouveau_dmem_resume(struct nouveau_drm *drm)
+{
+ struct nouveau_dmem_chunk *chunk;
+ int ret;
+
+ if (drm->dmem == NULL)
+ return;
+
+ mutex_lock(&drm->dmem->mutex);
+ list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
+ ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ /* FIXME handle pin failure */
+ WARN_ON(ret);
+ }
+ list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
+ ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ /* FIXME handle pin failure */
+ WARN_ON(ret);
+ }
+ list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
+ ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ /* FIXME handle pin failure */
+ WARN_ON(ret);
+ }
+ mutex_unlock(&drm->dmem->mutex);
+}
+
+void
+nouveau_dmem_suspend(struct nouveau_drm *drm)
+{
+ struct nouveau_dmem_chunk *chunk;
+
+ if (drm->dmem == NULL)
+ return;
+
+ mutex_lock(&drm->dmem->mutex);
+ list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
+ nouveau_bo_unpin(chunk->bo);
+ }
+ list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
+ nouveau_bo_unpin(chunk->bo);
+ }
+ list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
+ nouveau_bo_unpin(chunk->bo);
+ }
+ mutex_unlock(&drm->dmem->mutex);
+}
+
+void
+nouveau_dmem_fini(struct nouveau_drm *drm)
+{
+ struct nvif_vmm *vmm = &drm->client.vmm.vmm;
+ struct nouveau_dmem_chunk *chunk, *tmp;
+
+ if (drm->dmem == NULL)
+ return;
+
+ mutex_lock(&drm->dmem->mutex);
+
+ WARN_ON(!list_empty(&drm->dmem->chunk_free));
+ WARN_ON(!list_empty(&drm->dmem->chunk_full));
+
+ list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
+ if (chunk->bo) {
+ nouveau_bo_unpin(chunk->bo);
+ nouveau_bo_ref(NULL, &chunk->bo);
+ }
+ nvif_vmm_put(vmm, &chunk->vma);
+ list_del(&chunk->list);
+ kfree(chunk);
+ }
+
+ mutex_unlock(&drm->dmem->mutex);
+
+ hmm_devmem_remove(drm->dmem->devmem);
+ kfree(drm->dmem);
+}
+
+static int
+nvc0_migrate_init(struct nouveau_channel *chan, u32 handle)
+{
+ int ret = RING_SPACE(chan, 2);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ }
+ return ret;
+}
+
+static int
+nvc0_migrate_copy(struct nouveau_drm *drm, u64 npages,
+ u64 dst_addr, u64 src_addr)
+{
+ struct nouveau_channel *chan = drm->dmem->migrate.chan;
+ int ret;
+
+ while (npages) {
+ u64 line_count = (npages > 8191) ? 8191 : npages;
+
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
+ OUT_RING (chan, upper_32_bits(src_addr));
+ OUT_RING (chan, lower_32_bits(src_addr));
+ OUT_RING (chan, upper_32_bits(dst_addr));
+ OUT_RING (chan, lower_32_bits(dst_addr));
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, line_count);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
+ OUT_RING (chan, 0x00000110);
+
+ npages -= line_count;
+ src_addr += (line_count << PAGE_SHIFT);
+ dst_addr += (line_count << PAGE_SHIFT);
+ }
+
+ return 0;
+}
+
+static int
+nve0_migrate_init(struct nouveau_channel *chan, u32 handle)
+{
+ int ret = RING_SPACE(chan, 2);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle & 0x0000ffff);
+ FIRE_RING (chan);
+ }
+ return ret;
+}
+
+static int
+nve0_migrate_copy(struct nouveau_drm *drm, u64 npages,
+ u64 dst_addr, u64 src_addr)
+{
+ struct nouveau_channel *chan = drm->dmem->migrate.chan;
+ int ret;
+
+ ret = RING_SPACE(chan, 10);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
+ OUT_RING (chan, upper_32_bits(src_addr));
+ OUT_RING (chan, lower_32_bits(src_addr));
+ OUT_RING (chan, upper_32_bits(dst_addr));
+ OUT_RING (chan, lower_32_bits(dst_addr));
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, npages);
+ BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
+ return 0;
+}
+
+static int
+nouveau_dmem_migrate_init(struct nouveau_drm *drm)
+{
+ static const struct {
+ const char *name;
+ int engine;
+ s32 oclass;
+ nouveau_migrate_copy_t copy_func;
+ int (*init)(struct nouveau_channel *, u32 handle);
+ } _methods[] = {
+ { "COPY", 4, 0xc3b5, nve0_migrate_copy, nve0_migrate_init },
+ { "GRCE", 0, 0xc3b5, nve0_migrate_copy, nvc0_migrate_init },
+ { "COPY", 4, 0xc1b5, nve0_migrate_copy, nve0_migrate_init },
+ { "GRCE", 0, 0xc1b5, nve0_migrate_copy, nvc0_migrate_init },
+ { "COPY", 4, 0xc0b5, nve0_migrate_copy, nve0_migrate_init },
+ { "GRCE", 0, 0xc0b5, nve0_migrate_copy, nvc0_migrate_init },
+ { "COPY", 4, 0xb0b5, nve0_migrate_copy, nve0_migrate_init },
+ { "GRCE", 0, 0xb0b5, nve0_migrate_copy, nvc0_migrate_init },
+ { "COPY", 4, 0xa0b5, nve0_migrate_copy, nve0_migrate_init },
+ { "GRCE", 0, 0xa0b5, nve0_migrate_copy, nvc0_migrate_init },
+ { "COPY1", 5, 0x90b8, nvc0_migrate_copy, nvc0_migrate_init },
+ { "COPY0", 4, 0x90b5, nvc0_migrate_copy, nvc0_migrate_init },
+ {},
+ }, *mthd = _methods;
+ int ret;
+
+ do {
+ struct nouveau_channel *chan;
+
+ if (mthd->engine)
+ chan = drm->cechan;
+ else
+ chan = drm->channel;
+ if (chan == NULL)
+ continue;
+
+ ret = nvif_object_init(&chan->user,
+ mthd->oclass | (mthd->engine << 16),
+ mthd->oclass, NULL, 0,
+ &drm->dmem->migrate.copy);
+ if (ret == 0) {
+ ret = mthd->init(chan, drm->dmem->migrate.copy.handle);
+ if (ret) {
+ nvif_object_fini(&drm->dmem->migrate.copy);
+ continue;
+ }
+
+ drm->dmem->migrate.copy_func = mthd->copy_func;
+ drm->dmem->migrate.chan = chan;
+ return 0;
+ }
+ } while ((++mthd)->copy_func);
+
+ return -ENODEV;
+}
+
+void
+nouveau_dmem_init(struct nouveau_drm *drm)
+{
+ struct device *device = drm->dev->dev;
+ unsigned long i, size;
+ int ret;
+
+ /* This only make sense on PASCAL or newer */
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
+ return;
+
+ if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
+ return;
+
+ mutex_init(&drm->dmem->mutex);
+ INIT_LIST_HEAD(&drm->dmem->chunk_free);
+ INIT_LIST_HEAD(&drm->dmem->chunk_full);
+ INIT_LIST_HEAD(&drm->dmem->chunk_empty);
+
+ size = ALIGN(drm->client.device.info.ram_user, DMEM_CHUNK_SIZE);
+
+ /* Initialize migration dma helpers before registering memory */
+ ret = nouveau_dmem_migrate_init(drm);
+ if (ret) {
+ kfree(drm->dmem);
+ drm->dmem = NULL;
+ return;
+ }
+
+ /*
+ * FIXME we need some kind of policy to decide how much VRAM we
+ * want to register with HMM. For now just register everything
+ * and latter if we want to do thing like over commit then we
+ * could revisit this.
+ */
+ drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops, device,
+ size, drm);
+ if (drm->dmem->devmem == NULL) {
+ kfree(drm->dmem);
+ drm->dmem = NULL;
+ return;
+ }
+
+ for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
+ struct nouveau_dmem_chunk *chunk;
+ struct page *page;
+ unsigned long j;
+
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (chunk == NULL) {
+ nouveau_dmem_fini(drm);
+ return;
+ }
+
+ chunk->drm = drm;
+ chunk->pfn_first = drm->dmem->devmem->pfn_first;
+ chunk->pfn_first += (i * DMEM_CHUNK_NPAGES);
+ list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
+
+ page = pfn_to_page(chunk->pfn_first);
+ for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page) {
+ hmm_devmem_page_set_drvdata(page, (long)chunk);
+ }
+ }
+
+ NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
+}
+
+static void
+nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma,
+ const unsigned long *src_pfns,
+ unsigned long *dst_pfns,
+ unsigned long start,
+ unsigned long end,
+ void *private)
+{
+ struct nouveau_migrate *migrate = private;
+ struct nouveau_drm *drm = migrate->drm;
+ unsigned long addr, i, c, npages = 0;
+ nouveau_migrate_copy_t copy;
+ int ret;
+
+ /* First allocate new memory */
+ for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
+ struct page *dpage, *spage;
+
+ dst_pfns[i] = 0;
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ dpage = nouveau_dmem_page_alloc_locked(drm);
+ if (!dpage)
+ continue;
+
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)) |
+ MIGRATE_PFN_LOCKED |
+ MIGRATE_PFN_DEVICE;
+ npages++;
+ }
+
+ /* Create scatter list FIXME: get rid of scatter list */
+ ret = nouveau_migrate_hmem_init(drm, &migrate->hmem, npages, src_pfns);
+ if (ret)
+ goto error;
+
+ /* Copy things over */
+ copy = drm->dmem->migrate.copy_func;
+ for (addr = start, i = c = 0; addr < end; addr += PAGE_SIZE, i++) {
+ struct nouveau_dmem_chunk *chunk;
+ struct page *spage, *dpage;
+ u64 src_addr, dst_addr;
+
+ dpage = migrate_pfn_to_page(dst_pfns[i]);
+ if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
+ continue;
+
+ chunk = (void *)hmm_devmem_page_get_drvdata(dpage);
+ dst_addr = page_to_pfn(dpage) - chunk->pfn_first;
+ dst_addr = (dst_addr << PAGE_SHIFT) + chunk->vma.addr;
+
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) {
+ nouveau_dmem_page_free_locked(drm, dpage);
+ dst_pfns[i] = 0;
+ continue;
+ }
+
+ src_addr = migrate->hmem.vma.addr + (c << PAGE_SHIFT);
+ c++;
+
+ ret = copy(drm, 1, dst_addr, src_addr);
+ if (ret) {
+ nouveau_dmem_page_free_locked(drm, dpage);
+ dst_pfns[i] = 0;
+ continue;
+ }
+ }
+
+ nouveau_fence_new(drm->dmem->migrate.chan, false, &migrate->fence);
+
+ return;
+
+error:
+ for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, ++i) {
+ struct page *page;
+
+ if (!dst_pfns[i] || dst_pfns[i] == MIGRATE_PFN_ERROR)
+ continue;
+
+ page = migrate_pfn_to_page(dst_pfns[i]);
+ dst_pfns[i] = MIGRATE_PFN_ERROR;
+ if (page == NULL)
+ continue;
+
+ __free_page(page);
+ }
+}
+
+void nouveau_dmem_migrate_finalize_and_map(struct vm_area_struct *vma,
+ const unsigned long *src_pfns,
+ const unsigned long *dst_pfns,
+ unsigned long start,
+ unsigned long end,
+ void *private)
+{
+ struct nouveau_migrate *migrate = private;
+ struct nouveau_drm *drm = migrate->drm;
+
+ if (migrate->fence) {
+ nouveau_fence_wait(migrate->fence, true, false);
+ nouveau_fence_unref(&migrate->fence);
+ } else {
+ /*
+ * FIXME wait for channel to be IDLE before calling finalizing
+ * the hmem object below (nouveau_migrate_hmem_fini()).
+ */
+ }
+ nouveau_migrate_hmem_fini(drm, &migrate->hmem);
+
+ /* FIXME update GPU page table to point to newly migrated memory */
+}
+
+static const struct migrate_vma_ops nouveau_dmem_migrate_ops = {
+ .alloc_and_copy = nouveau_dmem_migrate_alloc_and_copy,
+ .finalize_and_map = nouveau_dmem_migrate_finalize_and_map,
+};
+
+int
+nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end)
+{
+ unsigned long *src_pfns, *dst_pfns, npages;
+ struct nouveau_migrate migrate = {0};
+ unsigned long i, c, max;
+ int ret = 0;
+
+ npages = (end - start) >> PAGE_SHIFT;
+ max = min(SG_MAX_SINGLE_ALLOC, npages);
+ src_pfns = kzalloc(sizeof(long) * max, GFP_KERNEL);
+ if (src_pfns == NULL)
+ return -ENOMEM;
+ dst_pfns = kzalloc(sizeof(long) * max, GFP_KERNEL);
+ if (dst_pfns == NULL) {
+ kfree(src_pfns);
+ return -ENOMEM;
+ }
+
+ migrate.drm = drm;
+ migrate.vma = vma;
+ migrate.npages = npages;
+ for (i = 0; i < npages; i += c) {
+ unsigned long next;
+
+ c = min(SG_MAX_SINGLE_ALLOC, npages);
+ next = start + (c << PAGE_SHIFT);
+ ret = migrate_vma(&nouveau_dmem_migrate_ops, vma, start,
+ next, src_pfns, dst_pfns, &migrate);
+ if (ret)
+ goto out;
+ start = next;
+ }
+
+out:
+ kfree(dst_pfns);
+ kfree(src_pfns);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.h b/drivers/gpu/drm/nouveau/nouveau_dmem.h
new file mode 100644
index 000000000000..afe0b5549818
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __NOUVEAU_DMEM_H__
+#define __NOUVEAU_DMEM_H__
+#include <nvif/os.h>
+struct drm_device;
+struct drm_file;
+struct nouveau_drm;
+
+void nouveau_dmem_init(struct nouveau_drm *);
+void nouveau_dmem_fini(struct nouveau_drm *);
+void nouveau_dmem_suspend(struct nouveau_drm *);
+void nouveau_dmem_resume(struct nouveau_drm *);
+
+int nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index d871f6a5d82e..b5e68914c406 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -64,6 +64,7 @@
#include "nouveau_connector.h"
#include "nouveau_platform.h"
#include "nouveau_svm.h"
+#include "nouveau_dmem.h"
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
@@ -577,6 +578,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_hwmon_init(dev);
nouveau_svm_init(drm);
nouveau_accel_init(drm);
+ nouveau_dmem_init(drm);
nouveau_fbcon_init(dev);
nouveau_led_init(dev);
@@ -619,6 +621,7 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_led_fini(dev);
nouveau_fbcon_fini(dev);
+ nouveau_dmem_fini(drm);
nouveau_accel_fini(drm);
nouveau_svm_fini(drm);
nouveau_hwmon_fini(dev);
@@ -668,6 +671,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
int ret;
nouveau_svm_suspend(drm);
+ nouveau_dmem_suspend(drm);
nouveau_led_suspend(dev);
if (dev->mode_config.num_crtc) {
@@ -744,6 +748,7 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
}
nouveau_led_resume(dev);
+ nouveau_dmem_resume(drm);
nouveau_svm_resume(drm);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index eb638c52851e..a00e79fe2f58 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -217,6 +217,8 @@ struct nouveau_drm {
struct dev_pm_domain vga_pm_domain;
struct nouveau_svm *svm;
+
+ struct nouveau_dmem *dmem;
};
static inline struct nouveau_drm *