summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2018-07-26 17:59:13 -0400
committerJérôme Glisse <jglisse@redhat.com>2018-08-16 18:09:53 -0400
commit6cfdadc5eecdf4524ed1fac3f5b727ba45db9b59 (patch)
tree98c5175d6b4ac7f68254fe7d16f8e318b02bf478
parentd428e307248f9a42b5a383a6453e9f9d214580f1 (diff)
gpu/nouveau: dmemnouveau-hmm-v01
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c518
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.h41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
5 files changed, 567 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 026cdec27b23..a826a4df440d 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -31,6 +31,7 @@ nouveau-y += nouveau_vga.o
nouveau-y += nouveau_bo.o
nouveau-y += nouveau_gem.o
nouveau-y += nouveau_svm.o
+nouveau-y += nouveau_dmem.o
nouveau-y += nouveau_mem.o
nouveau-y += nouveau_prime.o
nouveau-y += nouveau_sgdma.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
new file mode 100644
index 000000000000..877ca5d994d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -0,0 +1,518 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nouveau_dmem.h"
+#include "nouveau_drv.h"
+#include "nouveau_chan.h"
+#include "nouveau_bo.h"
+
+#include <nvif/class.h>
+#include <nvif/object.h>
+#include <nvif/if500b.h>
+#include <nvif/if900b.h>
+
+#include <linux/sched/mm.h>
+#include <linux/hmm.h>
+
+/*
+ * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
+ * it in vram while in use. We likely want to overhaul memory management for
+ * nouveau to be more page like (not necessarily with system page size but a
+ * bigger page size) at lowest level and have some shim layer on top that would
+ * provide the same functionality as TTM.
+ */
+#define DMEM_CHUNK_SIZE (2UL << 20)
+#define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
+
+typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm,
+ unsigned long npages,
+ unsigned long *pfns_src,
+ unsigned long *pfns_dst);
+
+struct nouveau_dmem_chunk {
+ struct list_head list;
+ struct nouveau_bo *bo;
+ struct nouveau_drm *drm;
+ unsigned long pfn_first;
+ unsigned long callocated;
+ unsigned long bitmap[BITS_TO_LONGS(DMEM_CHUNK_NPAGES)];
+ struct nvif_vma vma;
+};
+
+struct nouveau_dmem_migrate {
+ struct nouveau_channel *chan;
+ nouveau_migrate_copy_t copy;
+ struct nvif_object copy;
+};
+
+struct nouveau_dmem {
+ struct hmm_devmem *devmem;
+ struct nouveau_dmem_migrate migrate;
+ struct list_head chunk_free;
+ struct list_head chunk_full;
+ struct list_head chunk_empty;
+ struct mutex mutex;
+};
+
+static void
+nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
+{
+ struct nouveau_dmem_chunk *chunk;
+ struct nouveau_drm *drm;
+ unsigned long idx;
+
+ chunk = (void *)hmm_devmem_page_get_drvdata(page);
+ idx = page_to_pfn(page) - chunk->pfn_first;
+ drm = chunk->drm;
+
+ /*
+ * FIXME:
+ *
+ * This is really a bad example, we need to overhaul nouveau memory
+ * management to be more page focus and allow lighter locking scheme
+ * to be use in the process.
+ */
+ mutex_lock(&drm->dmem->mutex);
+ clear_bit(idx, chunk->bitmap);
+ WARN_ON(!chunk->callocated);
+ chunk->callocated--;
+ mutex_unlock(&drm->dmem->mutex);
+}
+
+static void
+nouveau_dmem_migrate_out_alloc_and_copy(struct vm_area_struct *vma,
+ const unsigned long *src_pfns,
+ unsigned long *dst_pfns,
+ unsigned long start,
+ unsigned long end,
+ void *private)
+{
+ struct nouveau_dmem_fault *fault = private;
+ unsigned long addr, c;
+
+ /* First allocate new memory */
+ for (addr = start, c = 0; addr < end; addr += PAGE_SIZE, c++) {
+ struct page *dpage, *spage;
+
+ dst_pfns[c] = 0;
+ spage = migrate_pfn_to_page(src_pfns[c]);
+ if (!spage || !(src_pfns[c] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ dpage = hmm_vma_alloc_locked_page(vma, addr);
+ if (!dpage) {
+ dst_pfns[c] = MIGRATE_PFN_ERROR;
+ continue;
+ }
+
+ copy_highpage(dpage, spage);
+ dst_pfns[c] = migrate_pfn(page_to_pfn(dpage)) |
+ MIGRATE_PFN_LOCKED;
+ c++;
+ }
+
+ /* Copy things over */
+}
+
+void dummy_devmem_fault_finalize_and_map(struct vm_area_struct *vma,
+ const unsigned long *src_pfns,
+ const unsigned long *dst_pfns,
+ unsigned long start,
+ unsigned long end,
+ void *private)
+{
+}
+
+static const struct migrate_vma_ops dummy_devmem_migrate = {
+ .alloc_and_copy = dummy_devmem_fault_alloc_and_copy,
+ .finalize_and_map = dummy_devmem_fault_finalize_and_map,
+};
+
+static int
+nouveau_dmem_fault(struct hmm_devmem *devmem,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ const struct page *page,
+ unsigned int flags,
+ pmd_t *pmdp)
+{
+ return VM_FAULT_SIGBUS;
+}
+
+static const struct hmm_devmem_ops
+nouveau_dmem_devmem_ops = {
+ .free = nouveau_dmem_free,
+ .fault = nouveau_dmem_fault,
+};
+
+static int
+nouveau_dmem_chunk_alloc(struct nouveau_drm *drm)
+{
+ struct nvif_vmm *vmm = &drm->client.vmm.vmm;
+ struct nouveau_dmem_chunk *chunk;
+ int ret;
+
+ if (drm->dmem == NULL)
+ return -EINVAL;
+
+ mutex_lock(&drm->dmem->mutex);
+ chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
+ struct nouveau_dmem_chunk,
+ list);
+ if (chunk == NULL) {
+ mutex_unlock(&drm->dmem->mutex);
+ return -ENOMEM;
+ }
+
+ list_del(&chunk->list);
+ mutex_unlock(&drm->dmem->mutex);
+
+ ret = nvif_vmm_get(vmm, LAZY, false, 12, 0,
+ DMEM_CHUNK_SIZE, &chunk->vma);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
+ TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
+ &chunk->bo);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ if (ret) {
+ nouveau_bo_ref(NULL, &chunk->bo);
+ goto out;
+ }
+
+ ret = nouveau_mem_map(nouveau_mem(&bo->bo.mem), vmm, &chunk->vma);
+ if (ret) {
+ nouveau_bo_unpin(chunk->bo);
+ nouveau_bo_ref(NULL, &chunk->bo);
+ goto done;
+ }
+
+ bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES);
+
+out:
+ mutex_lock(&drm->dmem->mutex);
+ if (chunk->bo)
+ list_add(&chunk->list, &drm->dmem->chunk_empty);
+ else
+ list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
+ mutex_unlock(&drm->dmem->mutex);
+
+ return ret;
+}
+
+static struct nouveau_dmem_chunk *
+nouveau_dmem_chunk_first_free_locked(struct nouveau_drm *drm)
+{
+ struct nouveau_dmem_chunk *chunk;
+
+ chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
+ struct nouveau_dmem_chunk,
+ list);
+ if (chunk)
+ return chunk;
+
+ chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
+ struct nouveau_dmem_chunk,
+ list);
+ if (chunk->bo)
+ return chunk;
+
+ return NULL;
+}
+
+int
+nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
+ unsigned long npages,
+ unsigned long *pages)
+{
+ struct nouveau_dmem_chunk *chunk;
+ unsigned long c;
+ int ret;
+
+ memset(pages, 0xff, npages * sizeof(*pages));
+
+ mutex_lock(&drm->dmem->mutex);
+ for (c = 0; c < npages;) {
+ unsigned long i;
+
+ chunk = nouveau_dmem_chunk_first_free_locked(drm);
+ if (chunk == NULL) {
+ mutex_unlock(&drm->dmem->mutex);
+ ret = nouveau_dmem_chunk_alloc(drm);
+ if (ret) {
+ if (c)
+ break;
+ return ret;
+ }
+ continue;
+ }
+
+ i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES);
+ while (i < DMEM_CHUNK_NPAGES && c < npages) {
+ pages[c] = chunk->pfn_first + i;
+ set_bit(i, chunk->bitmap);
+ chunk->callocated++;
+ c++;
+
+ i = find_next_zero_bit(chunk->bitmap,
+ DMEM_CHUNK_NPAGES, i);
+ }
+ }
+ mutex_unlock(&drm->dmem->mutex);
+
+ return 0;
+}
+
+void
+nouveau_dmem_pages_free(struct nouveau_drm *drm,
+ unsigned long npages,
+ unsigned long *pages)
+{
+ unsigned long i;
+
+ mutex_lock(&drm->dmem->mutex);
+ for (i = 0; i < npages;) {
+ struct nouveau_dmem_chunk *chunk;
+ unsigned long idx;
+ struct page *page;
+
+ if (pages[i] == -1UL)
+ continue;
+
+ page = pfn_to_page(pages[i]);
+ chunk = (void *)hmm_devmem_page_get_drvdata(page);
+
+ idx = pages[i] - chunk->pfn_first;
+ clear_bit(idx, chunk->bitmap);
+ WARN_ON(!chunk->callocated);
+ chunk->callocated--;
+ pages[i] = -1UL;
+ }
+ mutex_unlock(&drm->dmem->mutex);
+}
+
+void
+nouveau_dmem_resume(struct nouveau_drm *drm)
+{
+ struct nouveau_dmem_chunk *chunk;
+ int ret;
+
+ if (drm->dmem == NULL)
+ return;
+
+ mutex_lock(&drm->dmem->mutex);
+ list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
+ ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ /* FIXME handle pin failure */
+ WARN_ON(ret);
+ }
+ list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
+ ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ /* FIXME handle pin failure */
+ WARN_ON(ret);
+ }
+ list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
+ ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ /* FIXME handle pin failure */
+ WARN_ON(ret);
+ }
+ mutex_unlock(&drm->dmem->mutex);
+}
+
+void
+nouveau_dmem_suspend(struct nouveau_drm *drm)
+{
+ struct nouveau_dmem_chunk *chunk;
+
+ if (drm->dmem == NULL)
+ return;
+
+ mutex_lock(&drm->dmem->mutex);
+ list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
+ nouveau_bo_unpin(chunk->bo);
+ }
+ list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
+ nouveau_bo_unpin(chunk->bo);
+ }
+ list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
+ nouveau_bo_unpin(chunk->bo);
+ }
+ mutex_unlock(&drm->dmem->mutex);
+}
+
+void
+nouveau_dmem_fini(struct nouveau_drm *drm)
+{
+ struct nvif_vmm *vmm = &drm->client.vmm.vmm;
+ struct nouveau_dmem_chunk *chunk, *tmp;
+
+ if (drm->dmem == NULL)
+ return;
+
+ mutex_lock(&drm->dmem->mutex);
+
+ WARN_ON(!list_empty(&drm->dmem->chunk_free));
+ WARN_ON(!list_empty(&drm->dmem->chunk_full));
+
+ list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
+ if (chunk->bo) {
+ nouveau_bo_unpin(chunk->bo);
+ nouveau_bo_ref(NULL, &chunk->bo);
+ }
+ nvif_vmm_put(vmm, &chunk->vma);
+ list_del(&chunk->list);
+ kfree(chunk);
+ }
+
+ mutex_unlock(&drm->dmem->mutex);
+
+ hmm_devmem_remove(drm->dmem->devmem);
+ kfree(drm->dmem);
+}
+
+struct void
+nouveau_dmem_migrate_init(struct nouveau_drm *drm)
+{
+ static const struct {
+ const char *name;
+ int engine;
+ s32 oclass;
+ int (*exec)(struct nouveau_channel *,
+ struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
+ int (*init)(struct nouveau_channel *, u32 handle);
+ } _methods[] = {
+ { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
+ { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
+ { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
+ { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
+ { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
+ {},
+ { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
+ }, *mthd = _methods;
+ const char *name = "CPU";
+ int ret;
+
+ do {
+ struct nouveau_channel *chan;
+
+ if (mthd->engine)
+ chan = drm->cechan;
+ else
+ chan = drm->channel;
+ if (chan == NULL)
+ continue;
+
+ ret = nvif_object_init(&chan->user,
+ mthd->oclass | (mthd->engine << 16),
+ mthd->oclass, NULL, 0,
+ &drm->ttm.copy);
+ if (ret == 0) {
+ ret = mthd->init(chan, drm->ttm.copy.handle);
+ if (ret) {
+ nvif_object_fini(&drm->ttm.copy);
+ continue;
+ }
+
+ drm->ttm.move = mthd->exec;
+ drm->ttm.chan = chan;
+ name = mthd->name;
+ break;
+ }
+ } while ((++mthd)->exec);
+
+ NV_INFO(drm, "MM: using %s for buffer copies\n", name);
+}
+
+void
+nouveau_dmem_init(struct nouveau_drm *drm)
+{
+ struct device *device = drm->dev->dev;
+ unsigned long i, size;
+
+ /* This only make sense on PASCAL or newer */
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
+ return;
+
+ if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
+ return;
+
+ mutex_init(&drm->dmem->mutex);
+ INIT_LIST_HEAD(&drm->dmem->chunk_free);
+ INIT_LIST_HEAD(&drm->dmem->chunk_full);
+ INIT_LIST_HEAD(&drm->dmem->chunk_empty);
+
+ size = ALIGN(drm->client.device.info.ram_user, DMEM_CHUNK_SIZE);
+
+ /*
+ * FIXME we need some kind of policy to decide how much VRAM we
+ * want to register with HMM. For now just register everything
+ * and latter if we want to do thing like over commit then we
+ * could revisit this.
+ */
+ drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops, device,
+ size, drm);
+ if (drm->dmem->devmem == NULL) {
+ kfree(drm->dmem);
+ drm->dmem = NULL;
+ return;
+ }
+
+ for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
+ struct nouveau_dmem_chunk *chunk;
+ struct page *page;
+ unsigned long j;
+
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (chunk == NULL) {
+ nouveau_dmem_fini(drm);
+ return;
+ }
+
+ chunk->drm = drm;
+ chunk->pfn_first = drm->dmem->devmem->pfn_first;
+ chunk->pfn_first += (i * DMEM_CHUNK_NPAGES);
+ list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
+
+ page = pfn_to_page(chunk->pfn_first);
+ for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page) {
+ hmm_devmem_page_set_drvdata(page, (long)chunk);
+ }
+ }
+}
+
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.h b/drivers/gpu/drm/nouveau/nouveau_dmem.h
new file mode 100644
index 000000000000..b2d5bc3de717
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __NOUVEAU_DMEM_H__
+#define __NOUVEAU_DMEM_H__
+#include <nvif/os.h>
+struct drm_device;
+struct drm_file;
+struct nouveau_drm;
+
+void nouveau_dmem_init(struct nouveau_drm *);
+void nouveau_dmem_fini(struct nouveau_drm *);
+void nouveau_dmem_suspend(struct nouveau_drm *);
+void nouveau_dmem_resume(struct nouveau_drm *);
+
+int nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
+ unsigned long npages,
+ unsigned long *pages);
+void nouveau_dmem_pages_free(struct nouveau_drm *drm,
+ unsigned long npages,
+ unsigned long *pages);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index d871f6a5d82e..b5e68914c406 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -64,6 +64,7 @@
#include "nouveau_connector.h"
#include "nouveau_platform.h"
#include "nouveau_svm.h"
+#include "nouveau_dmem.h"
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
@@ -577,6 +578,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_hwmon_init(dev);
nouveau_svm_init(drm);
nouveau_accel_init(drm);
+ nouveau_dmem_init(drm);
nouveau_fbcon_init(dev);
nouveau_led_init(dev);
@@ -619,6 +621,7 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_led_fini(dev);
nouveau_fbcon_fini(dev);
+ nouveau_dmem_fini(drm);
nouveau_accel_fini(drm);
nouveau_svm_fini(drm);
nouveau_hwmon_fini(dev);
@@ -668,6 +671,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
int ret;
nouveau_svm_suspend(drm);
+ nouveau_dmem_suspend(drm);
nouveau_led_suspend(dev);
if (dev->mode_config.num_crtc) {
@@ -744,6 +748,7 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
}
nouveau_led_resume(dev);
+ nouveau_dmem_resume(drm);
nouveau_svm_resume(drm);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index eb638c52851e..a00e79fe2f58 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -217,6 +217,8 @@ struct nouveau_drm {
struct dev_pm_domain vga_pm_domain;
struct nouveau_svm *svm;
+
+ struct nouveau_dmem *dmem;
};
static inline struct nouveau_drm *