summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nvkm/subdev/mmu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/mmu')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c437
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c91
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c242
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c73
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c190
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c181
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c582
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c1976
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h380
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c426
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c104
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c73
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c187
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c635
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c141
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c112
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c230
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c387
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c77
45 files changed, 7911 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
new file mode 100644
index 0000000000..a602b0cb5b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/mmu/base.o
+nvkm-y += nvkm/subdev/mmu/nv04.o
+nvkm-y += nvkm/subdev/mmu/nv41.o
+nvkm-y += nvkm/subdev/mmu/nv44.o
+nvkm-y += nvkm/subdev/mmu/nv50.o
+nvkm-y += nvkm/subdev/mmu/g84.o
+nvkm-y += nvkm/subdev/mmu/mcp77.o
+nvkm-y += nvkm/subdev/mmu/gf100.o
+nvkm-y += nvkm/subdev/mmu/gk104.o
+nvkm-y += nvkm/subdev/mmu/gk20a.o
+nvkm-y += nvkm/subdev/mmu/gm200.o
+nvkm-y += nvkm/subdev/mmu/gm20b.o
+nvkm-y += nvkm/subdev/mmu/gp100.o
+nvkm-y += nvkm/subdev/mmu/gp10b.o
+nvkm-y += nvkm/subdev/mmu/gv100.o
+nvkm-y += nvkm/subdev/mmu/tu102.o
+
+nvkm-y += nvkm/subdev/mmu/mem.o
+nvkm-y += nvkm/subdev/mmu/memnv04.o
+nvkm-y += nvkm/subdev/mmu/memnv50.o
+nvkm-y += nvkm/subdev/mmu/memgf100.o
+
+nvkm-y += nvkm/subdev/mmu/vmm.o
+nvkm-y += nvkm/subdev/mmu/vmmnv04.o
+nvkm-y += nvkm/subdev/mmu/vmmnv41.o
+nvkm-y += nvkm/subdev/mmu/vmmnv44.o
+nvkm-y += nvkm/subdev/mmu/vmmnv50.o
+nvkm-y += nvkm/subdev/mmu/vmmmcp77.o
+nvkm-y += nvkm/subdev/mmu/vmmgf100.o
+nvkm-y += nvkm/subdev/mmu/vmmgk104.o
+nvkm-y += nvkm/subdev/mmu/vmmgk20a.o
+nvkm-y += nvkm/subdev/mmu/vmmgm200.o
+nvkm-y += nvkm/subdev/mmu/vmmgm20b.o
+nvkm-y += nvkm/subdev/mmu/vmmgp100.o
+nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
+nvkm-y += nvkm/subdev/mmu/vmmgv100.o
+nvkm-y += nvkm/subdev/mmu/vmmtu102.o
+
+nvkm-y += nvkm/subdev/mmu/umem.o
+nvkm-y += nvkm/subdev/mmu/ummu.o
+nvkm-y += nvkm/subdev/mmu/uvmm.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
new file mode 100644
index 0000000000..ad3b44a9e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -0,0 +1,437 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "ummu.h"
+#include "vmm.h"
+
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#include <nvif/if500d.h>
+#include <nvif/if900d.h>
+
+struct nvkm_mmu_ptp {
+ struct nvkm_mmu_pt *pt;
+ struct list_head head;
+ u8 shift;
+ u16 mask;
+ u16 free;
+};
+
+static void
+nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
+{
+ const int slot = pt->base >> pt->ptp->shift;
+ struct nvkm_mmu_ptp *ptp = pt->ptp;
+
+ /* If there were no free slots in the parent allocation before,
+ * there will be now, so return PTP to the cache.
+ */
+ if (!ptp->free)
+ list_add(&ptp->head, &mmu->ptp.list);
+ ptp->free |= BIT(slot);
+
+ /* If there's no more sub-allocations, destroy PTP. */
+ if (ptp->free == ptp->mask) {
+ nvkm_mmu_ptc_put(mmu, force, &ptp->pt);
+ list_del(&ptp->head);
+ kfree(ptp);
+ }
+
+ kfree(pt);
+}
+
+static struct nvkm_mmu_pt *
+nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
+{
+ struct nvkm_mmu_pt *pt;
+ struct nvkm_mmu_ptp *ptp;
+ int slot;
+
+ if (!(pt = kzalloc(sizeof(*pt), GFP_KERNEL)))
+ return NULL;
+
+ ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head);
+ if (!ptp) {
+ /* Need to allocate a new parent to sub-allocate from. */
+ if (!(ptp = kmalloc(sizeof(*ptp), GFP_KERNEL))) {
+ kfree(pt);
+ return NULL;
+ }
+
+ ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false);
+ if (!ptp->pt) {
+ kfree(ptp);
+ kfree(pt);
+ return NULL;
+ }
+
+ ptp->shift = order_base_2(size);
+ slot = nvkm_memory_size(ptp->pt->memory) >> ptp->shift;
+ ptp->mask = (1 << slot) - 1;
+ ptp->free = ptp->mask;
+ list_add(&ptp->head, &mmu->ptp.list);
+ }
+ pt->ptp = ptp;
+ pt->sub = true;
+
+ /* Sub-allocate from parent object, removing PTP from cache
+ * if there's no more free slots left.
+ */
+ slot = __ffs(ptp->free);
+ ptp->free &= ~BIT(slot);
+ if (!ptp->free)
+ list_del(&ptp->head);
+
+ pt->memory = pt->ptp->pt->memory;
+ pt->base = slot << ptp->shift;
+ pt->addr = pt->ptp->pt->addr + pt->base;
+ return pt;
+}
+
+struct nvkm_mmu_ptc {
+ struct list_head head;
+ struct list_head item;
+ u32 size;
+ u32 refs;
+};
+
+static inline struct nvkm_mmu_ptc *
+nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)
+{
+ struct nvkm_mmu_ptc *ptc;
+
+ list_for_each_entry(ptc, &mmu->ptc.list, head) {
+ if (ptc->size == size)
+ return ptc;
+ }
+
+ ptc = kmalloc(sizeof(*ptc), GFP_KERNEL);
+ if (ptc) {
+ INIT_LIST_HEAD(&ptc->item);
+ ptc->size = size;
+ ptc->refs = 0;
+ list_add(&ptc->head, &mmu->ptc.list);
+ }
+
+ return ptc;
+}
+
+void
+nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt)
+{
+ struct nvkm_mmu_pt *pt = *ppt;
+ if (pt) {
+ /* Handle sub-allocated page tables. */
+ if (pt->sub) {
+ mutex_lock(&mmu->ptp.mutex);
+ nvkm_mmu_ptp_put(mmu, force, pt);
+ mutex_unlock(&mmu->ptp.mutex);
+ return;
+ }
+
+ /* Either cache or free the object. */
+ mutex_lock(&mmu->ptc.mutex);
+ if (pt->ptc->refs < 8 /* Heuristic. */ && !force) {
+ list_add_tail(&pt->head, &pt->ptc->item);
+ pt->ptc->refs++;
+ } else {
+ nvkm_memory_unref(&pt->memory);
+ kfree(pt);
+ }
+ mutex_unlock(&mmu->ptc.mutex);
+ }
+}
+
+struct nvkm_mmu_pt *
+nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
+{
+ struct nvkm_mmu_ptc *ptc;
+ struct nvkm_mmu_pt *pt;
+ int ret;
+
+ /* Sub-allocated page table (ie. GP100 LPT). */
+ if (align < 0x1000) {
+ mutex_lock(&mmu->ptp.mutex);
+ pt = nvkm_mmu_ptp_get(mmu, align, zero);
+ mutex_unlock(&mmu->ptp.mutex);
+ return pt;
+ }
+
+ /* Lookup cache for this page table size. */
+ mutex_lock(&mmu->ptc.mutex);
+ ptc = nvkm_mmu_ptc_find(mmu, size);
+ if (!ptc) {
+ mutex_unlock(&mmu->ptc.mutex);
+ return NULL;
+ }
+
+ /* If there's a free PT in the cache, reuse it. */
+ pt = list_first_entry_or_null(&ptc->item, typeof(*pt), head);
+ if (pt) {
+ if (zero)
+ nvkm_fo64(pt->memory, 0, 0, size >> 3);
+ list_del(&pt->head);
+ ptc->refs--;
+ mutex_unlock(&mmu->ptc.mutex);
+ return pt;
+ }
+ mutex_unlock(&mmu->ptc.mutex);
+
+ /* No such luck, we need to allocate. */
+ if (!(pt = kmalloc(sizeof(*pt), GFP_KERNEL)))
+ return NULL;
+ pt->ptc = ptc;
+ pt->sub = false;
+
+ ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+ size, align, zero, &pt->memory);
+ if (ret) {
+ kfree(pt);
+ return NULL;
+ }
+
+ pt->base = 0;
+ pt->addr = nvkm_memory_addr(pt->memory);
+ return pt;
+}
+
+void
+nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
+{
+ struct nvkm_mmu_ptc *ptc;
+ list_for_each_entry(ptc, &mmu->ptc.list, head) {
+ struct nvkm_mmu_pt *pt, *tt;
+ list_for_each_entry_safe(pt, tt, &ptc->item, head) {
+ nvkm_memory_unref(&pt->memory);
+ list_del(&pt->head);
+ kfree(pt);
+ }
+ }
+}
+
+static void
+nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu)
+{
+ struct nvkm_mmu_ptc *ptc, *ptct;
+
+ list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) {
+ WARN_ON(!list_empty(&ptc->item));
+ list_del(&ptc->head);
+ kfree(ptc);
+ }
+}
+
+static void
+nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
+{
+ mutex_init(&mmu->ptc.mutex);
+ INIT_LIST_HEAD(&mmu->ptc.list);
+ mutex_init(&mmu->ptp.mutex);
+ INIT_LIST_HEAD(&mmu->ptp.list);
+}
+
+static void
+nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type)
+{
+ if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) {
+ mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type;
+ mmu->type[mmu->type_nr].heap = heap;
+ mmu->type_nr++;
+ }
+}
+
+static int
+nvkm_mmu_heap(struct nvkm_mmu *mmu, u8 type, u64 size)
+{
+ if (size) {
+ if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) {
+ mmu->heap[mmu->heap_nr].type = type;
+ mmu->heap[mmu->heap_nr].size = size;
+ return mmu->heap_nr++;
+ }
+ }
+ return -EINVAL;
+}
+
+static void
+nvkm_mmu_host(struct nvkm_mmu *mmu)
+{
+ struct nvkm_device *device = mmu->subdev.device;
+ u8 type = NVKM_MEM_KIND * !!mmu->func->kind_sys;
+ int heap;
+
+ /* Non-mappable system memory. */
+ heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL);
+ nvkm_mmu_type(mmu, heap, type);
+
+ /* Non-coherent, cached, system memory.
+ *
+ * Block-linear mappings of system memory must be done through
+ * BAR1, and cannot be supported on systems where we're unable
+ * to map BAR1 with write-combining.
+ */
+ type |= NVKM_MEM_MAPPABLE;
+ if (!device->bar || device->bar->iomap_uncached)
+ nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
+ else
+ nvkm_mmu_type(mmu, heap, type);
+
+ /* Coherent, cached, system memory.
+ *
+ * Unsupported on systems that aren't able to support snooped
+ * mappings, and also for block-linear mappings which must be
+ * done through BAR1.
+ */
+ type |= NVKM_MEM_COHERENT;
+ if (device->func->cpu_coherent)
+ nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
+
+ /* Uncached system memory. */
+ nvkm_mmu_type(mmu, heap, type |= NVKM_MEM_UNCACHED);
+}
+
+static void
+nvkm_mmu_vram(struct nvkm_mmu *mmu)
+{
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_mm *mm = &device->fb->ram->vram;
+ const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+ const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+ const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+ u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
+ u8 heap = NVKM_MEM_VRAM;
+ int heapM, heapN, heapU;
+
+ /* Mixed-memory doesn't support compression or display. */
+ heapM = nvkm_mmu_heap(mmu, heap, sizeM << NVKM_RAM_MM_SHIFT);
+
+ heap |= NVKM_MEM_COMP;
+ heap |= NVKM_MEM_DISP;
+ heapN = nvkm_mmu_heap(mmu, heap, sizeN << NVKM_RAM_MM_SHIFT);
+ heapU = nvkm_mmu_heap(mmu, heap, sizeU << NVKM_RAM_MM_SHIFT);
+
+ /* Add non-mappable VRAM types first so that they're preferred
+ * over anything else. Mixed-memory will be slower than other
+ * heaps, it's prioritised last.
+ */
+ nvkm_mmu_type(mmu, heapU, type);
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
+
+ /* Add host memory types next, under the assumption that users
+ * wanting mappable memory want to use them as staging buffers
+ * or the like.
+ */
+ nvkm_mmu_host(mmu);
+
+ /* Mappable VRAM types go last, as they're basically the worst
+ * possible type to ask for unless there's no other choice.
+ */
+ if (device->bar) {
+ /* Write-combined BAR1 access. */
+ type |= NVKM_MEM_MAPPABLE;
+ if (!device->bar->iomap_uncached) {
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
+ }
+
+ /* Uncached BAR1 access. */
+ type |= NVKM_MEM_COHERENT;
+ type |= NVKM_MEM_UNCACHED;
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
+ }
+}
+
+static int
+nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+
+ /* Determine available memory types. */
+ if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram)
+ nvkm_mmu_vram(mmu);
+ else
+ nvkm_mmu_host(mmu);
+
+ if (mmu->func->vmm.global) {
+ int ret = nvkm_vmm_new(subdev->device, 0, 0, NULL, 0, NULL,
+ "gart", &mmu->vmm);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+nvkm_mmu_init(struct nvkm_subdev *subdev)
+{
+ struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+ if (mmu->func->init)
+ mmu->func->init(mmu);
+ return 0;
+}
+
+static void *
+nvkm_mmu_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+
+ nvkm_vmm_unref(&mmu->vmm);
+
+ nvkm_mmu_ptc_fini(mmu);
+ mutex_destroy(&mmu->mutex);
+ return mmu;
+}
+
+static const struct nvkm_subdev_func
+nvkm_mmu = {
+ .dtor = nvkm_mmu_dtor,
+ .oneinit = nvkm_mmu_oneinit,
+ .init = nvkm_mmu_init,
+};
+
+void
+nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_mmu *mmu)
+{
+ nvkm_subdev_ctor(&nvkm_mmu, device, type, inst, &mmu->subdev);
+ mmu->func = func;
+ mmu->dma_bits = func->dma_bits;
+ nvkm_mmu_ptc_init(mmu);
+ mutex_init(&mmu->mutex);
+ mmu->user.ctor = nvkm_ummu_new;
+ mmu->user.base = func->mmu.user;
+}
+
+int
+nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_mmu **pmmu)
+{
+ if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_mmu_ctor(func, device, type, inst, *pmmu);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
new file mode 100644
index 0000000000..ce47a3b97b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+g84_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 },
+ .kind = nv50_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+g84_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&g84_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
new file mode 100644
index 0000000000..7a28b1d49f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+/* Map from compressed to corresponding uncompressed storage type.
+ * The value 0xff represents an invalid storage type.
+ */
+const u8 *
+gf100_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
+{
+ static const u8
+ kind[256] = {
+ 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
+ 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
+ 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
+ 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
+ 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
+ 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
+ 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
+ 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
+ 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
+ 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
+ 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
+ };
+
+ *count = ARRAY_SIZE(kind);
+ *invalid = 0xff;
+ return kind;
+}
+
+static const struct nvkm_mmu_func
+gf100_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new },
+ .kind = gf100_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gf100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&gf100_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
new file mode 100644
index 0000000000..34c9b2b821
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gk104_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new },
+ .kind = gf100_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gk104_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&gk104_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
new file mode 100644
index 0000000000..a7db29c429
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gk20a_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new },
+ .kind = gf100_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gk20a_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&gk20a_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
new file mode 100644
index 0000000000..e1696f637a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+
+const u8 *
+gm200_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
+{
+ static const u8
+ kind[256] = {
+ 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
+ 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
+ 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
+ 0x28, 0x29, 0x2a, 0x2b, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
+ 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
+ 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
+ 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
+ 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
+ 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
+ 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
+ 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
+ };
+ *count = ARRAY_SIZE(kind);
+ *invalid = 0xff;
+ return kind;
+}
+
+static const struct nvkm_mmu_func
+gm200_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+static const struct nvkm_mmu_func
+gm200_mmu_fixed = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gm200_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ if (device->fb->page)
+ return nvkm_mmu_new_(&gm200_mmu_fixed, device, type, inst, pmmu);
+ return nvkm_mmu_new_(&gm200_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
new file mode 100644
index 0000000000..e6e1a8ad70
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gm20b_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+static const struct nvkm_mmu_func
+gm20b_mmu_fixed = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gm20b_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ if (device->fb->page)
+ return nvkm_mmu_new_(&gm20b_mmu_fixed, device, type, inst, pmmu);
+ return nvkm_mmu_new_(&gm20b_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
new file mode 100644
index 0000000000..daa5ab0f87
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gp100_mmu = {
+ .dma_bits = 47,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gp100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
+ return gm200_mmu_new(device, type, inst, pmmu);
+ return nvkm_mmu_new_(&gp100_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
new file mode 100644
index 0000000000..edd0bf9a5c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gp10b_mmu = {
+ .dma_bits = 47,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gp10b_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
+ return gm20b_mmu_new(device, type, inst, pmmu);
+ return nvkm_mmu_new_(&gp10b_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
new file mode 100644
index 0000000000..fb8bdc88d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gv100_mmu = {
+ .dma_bits = 47,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gv100_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gv100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&gv100_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
new file mode 100644
index 0000000000..514876d641
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+mcp77_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, mcp77_vmm_new, false, 0x0200 },
+ .kind = nv50_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+mcp77_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&mcp77_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
new file mode 100644
index 0000000000..92e363dbbc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define nvkm_mem(p) container_of((p), struct nvkm_mem, memory)
+#include "mem.h"
+
+#include <core/memory.h>
+
+#include <nvif/if000a.h>
+#include <nvif/unpack.h>
+
+struct nvkm_mem {
+ struct nvkm_memory memory;
+ enum nvkm_memory_target target;
+ struct nvkm_mmu *mmu;
+ u64 pages;
+ struct page **mem;
+ union {
+ struct scatterlist *sgl;
+ dma_addr_t *dma;
+ };
+};
+
+static enum nvkm_memory_target
+nvkm_mem_target(struct nvkm_memory *memory)
+{
+ return nvkm_mem(memory)->target;
+}
+
+static u8
+nvkm_mem_page(struct nvkm_memory *memory)
+{
+ return PAGE_SHIFT;
+}
+
+static u64
+nvkm_mem_addr(struct nvkm_memory *memory)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ if (mem->pages == 1 && mem->mem)
+ return mem->dma[0];
+ return ~0ULL;
+}
+
+static u64
+nvkm_mem_size(struct nvkm_memory *memory)
+{
+ return nvkm_mem(memory)->pages << PAGE_SHIFT;
+}
+
+static int
+nvkm_mem_map_dma(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ struct nvkm_vmm_map map = {
+ .memory = &mem->memory,
+ .offset = offset,
+ .dma = mem->dma,
+ };
+ return nvkm_vmm_map(vmm, vma, argv, argc, &map);
+}
+
+static void *
+nvkm_mem_dtor(struct nvkm_memory *memory)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ if (mem->mem) {
+ while (mem->pages--) {
+ dma_unmap_page(mem->mmu->subdev.device->dev,
+ mem->dma[mem->pages], PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(mem->mem[mem->pages]);
+ }
+ kvfree(mem->dma);
+ kvfree(mem->mem);
+ }
+ return mem;
+}
+
+static const struct nvkm_memory_func
+nvkm_mem_dma = {
+ .dtor = nvkm_mem_dtor,
+ .target = nvkm_mem_target,
+ .page = nvkm_mem_page,
+ .addr = nvkm_mem_addr,
+ .size = nvkm_mem_size,
+ .map = nvkm_mem_map_dma,
+};
+
+static int
+nvkm_mem_map_sgl(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ struct nvkm_vmm_map map = {
+ .memory = &mem->memory,
+ .offset = offset,
+ .sgl = mem->sgl,
+ };
+ return nvkm_vmm_map(vmm, vma, argv, argc, &map);
+}
+
+static const struct nvkm_memory_func
+nvkm_mem_sgl = {
+ .dtor = nvkm_mem_dtor,
+ .target = nvkm_mem_target,
+ .page = nvkm_mem_page,
+ .addr = nvkm_mem_addr,
+ .size = nvkm_mem_size,
+ .map = nvkm_mem_map_sgl,
+};
+
+int
+nvkm_mem_map_host(struct nvkm_memory *memory, void **pmap)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ if (mem->mem) {
+ *pmap = vmap(mem->mem, mem->pages, VM_MAP, PAGE_KERNEL);
+ return *pmap ? 0 : -EFAULT;
+ }
+ return -EINVAL;
+}
+
+static int
+nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ struct device *dev = mmu->subdev.device->dev;
+ union {
+ struct nvif_mem_ram_vn vn;
+ struct nvif_mem_ram_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ enum nvkm_memory_target target;
+ struct nvkm_mem *mem;
+ gfp_t gfp = GFP_USER | __GFP_ZERO;
+
+ if ( (mmu->type[type].type & NVKM_MEM_COHERENT) &&
+ !(mmu->type[type].type & NVKM_MEM_UNCACHED))
+ target = NVKM_MEM_TARGET_HOST;
+ else
+ target = NVKM_MEM_TARGET_NCOH;
+
+ if (page != PAGE_SHIFT)
+ return -EINVAL;
+
+ if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
+ return -ENOMEM;
+ mem->target = target;
+ mem->mmu = mmu;
+ *pmemory = &mem->memory;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ if (args->v0.dma) {
+ nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
+ mem->dma = args->v0.dma;
+ } else {
+ nvkm_memory_ctor(&nvkm_mem_sgl, &mem->memory);
+ mem->sgl = args->v0.sgl;
+ }
+
+ if (!IS_ALIGNED(size, PAGE_SIZE))
+ return -EINVAL;
+ mem->pages = size >> PAGE_SHIFT;
+ return 0;
+ } else
+ if ( (ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ kfree(mem);
+ return ret;
+ }
+
+ nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
+ size = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
+
+ if (!(mem->mem = kvmalloc_array(size, sizeof(*mem->mem), GFP_KERNEL)))
+ return -ENOMEM;
+ if (!(mem->dma = kvmalloc_array(size, sizeof(*mem->dma), GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (mmu->dma_bits > 32)
+ gfp |= GFP_HIGHUSER;
+ else
+ gfp |= GFP_DMA32;
+
+ for (mem->pages = 0; size; size--, mem->pages++) {
+ struct page *p = alloc_page(gfp);
+ if (!p)
+ return -ENOMEM;
+
+ mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev,
+ p, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, mem->dma[mem->pages])) {
+ __free_page(p);
+ return -ENOMEM;
+ }
+
+ mem->mem[mem->pages] = p;
+ }
+
+ return 0;
+}
+
+int
+nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ struct nvkm_memory *memory = NULL;
+ int ret;
+
+ if (mmu->type[type].type & NVKM_MEM_VRAM) {
+ ret = mmu->func->mem.vram(mmu, type, page, size,
+ argv, argc, &memory);
+ } else {
+ ret = nvkm_mem_new_host(mmu, type, page, size,
+ argv, argc, &memory);
+ }
+
+ if (ret)
+ nvkm_memory_unref(&memory);
+ *pmemory = memory;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h
new file mode 100644
index 0000000000..234267e1b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h
@@ -0,0 +1,23 @@
+#ifndef __NVKM_MEM_H__
+#define __NVKM_MEM_H__
+#include "priv.h"
+
+int nvkm_mem_new_type(struct nvkm_mmu *, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **);
+int nvkm_mem_map_host(struct nvkm_memory *, void **pmap);
+
+int nv04_mem_new(struct nvkm_mmu *, int, u8, u64, void *, u32,
+ struct nvkm_memory **);
+int nv04_mem_map(struct nvkm_mmu *, struct nvkm_memory *, void *, u32,
+ u64 *, u64 *, struct nvkm_vma **);
+
+int nv50_mem_new(struct nvkm_mmu *, int, u8, u64, void *, u32,
+ struct nvkm_memory **);
+int nv50_mem_map(struct nvkm_mmu *, struct nvkm_memory *, void *, u32,
+ u64 *, u64 *, struct nvkm_vma **);
+
+int gf100_mem_new(struct nvkm_mmu *, int, u8, u64, void *, u32,
+ struct nvkm_memory **);
+int gf100_mem_map(struct nvkm_mmu *, struct nvkm_memory *, void *, u32,
+ u64 *, u64 *, struct nvkm_vma **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
new file mode 100644
index 0000000000..d9c9bee452
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+
+#include <core/memory.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/if900b.h>
+#include <nvif/if900d.h>
+#include <nvif/unpack.h>
+
+int
+gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
+ u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
+{
+ struct gf100_vmm_map_v0 uvmm = {};
+ union {
+ struct gf100_mem_map_vn vn;
+ struct gf100_mem_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ uvmm.ro = args->v0.ro;
+ uvmm.kind = args->v0.kind;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ } else
+ return ret;
+
+ ret = nvkm_vmm_get(bar, nvkm_memory_page(memory),
+ nvkm_memory_size(memory), pvma);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
+ if (ret)
+ return ret;
+
+ *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+ *psize = (*pvma)->size;
+ return 0;
+}
+
+int
+gf100_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ union {
+ struct gf100_mem_vn vn;
+ struct gf100_mem_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ bool contig;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ contig = args->v0.contig;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ contig = false;
+ } else
+ return ret;
+
+ if (mmu->type[type].type & (NVKM_MEM_DISP | NVKM_MEM_COMP))
+ type = NVKM_RAM_MM_NORMAL;
+ else
+ type = NVKM_RAM_MM_MIXED;
+
+ return nvkm_ram_get(mmu->subdev.device, type, 0x01, page,
+ size, contig, false, pmemory);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
new file mode 100644
index 0000000000..79a3b0cc9f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+
+#include <core/memory.h>
+#include <subdev/fb.h>
+
+#include <nvif/if000b.h>
+#include <nvif/unpack.h>
+
+int
+nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
+ u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
+{
+ union {
+ struct nv04_mem_map_vn vn;
+ } *args = argv;
+ struct nvkm_device *device = mmu->subdev.device;
+ const u64 addr = nvkm_memory_addr(memory);
+ int ret = -ENOSYS;
+
+ if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
+ return ret;
+
+ *paddr = device->func->resource_addr(device, 1) + addr;
+ *psize = nvkm_memory_size(memory);
+ *pvma = ERR_PTR(-ENODEV);
+ return 0;
+}
+
+int
+nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ union {
+ struct nv04_mem_vn vn;
+ } *args = argv;
+ int ret = -ENOSYS;
+
+ if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
+ return ret;
+
+ if (mmu->type[type].type & NVKM_MEM_MAPPABLE)
+ type = NVKM_RAM_MM_NORMAL;
+ else
+ type = NVKM_RAM_MM_NOMAP;
+
+ return nvkm_ram_get(mmu->subdev.device, type, 0x01, page,
+ size, true, false, pmemory);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
new file mode 100644
index 0000000000..46759b89fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+
+#include <core/memory.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/if500b.h>
+#include <nvif/if500d.h>
+#include <nvif/unpack.h>
+
+int
+nv50_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
+ u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
+{
+ struct nv50_vmm_map_v0 uvmm = {};
+ union {
+ struct nv50_mem_map_vn vn;
+ struct nv50_mem_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
+ u64 size = nvkm_memory_size(memory);
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ uvmm.ro = args->v0.ro;
+ uvmm.kind = args->v0.kind;
+ uvmm.comp = args->v0.comp;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ } else
+ return ret;
+
+ ret = nvkm_vmm_get(bar, 12, size, pvma);
+ if (ret)
+ return ret;
+
+ *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+ *psize = (*pvma)->size;
+ return nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
+}
+
+int
+nv50_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ union {
+ struct nv50_mem_vn vn;
+ struct nv50_mem_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ bool contig;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ type = args->v0.bankswz ? 0x02 : 0x01;
+ contig = args->v0.contig;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ type = 0x01;
+ contig = false;
+ } else
+ return -ENOSYS;
+
+ return nvkm_ram_get(mmu->subdev.device, NVKM_RAM_MM_NORMAL, type,
+ page, size, contig, false, pmemory);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
new file mode 100644
index 0000000000..0674aa8f68
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+const struct nvkm_mmu_func
+nv04_mmu = {
+ .dma_bits = 32,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true },
+};
+
+int
+nv04_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&nv04_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
new file mode 100644
index 0000000000..909f92b728
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static void
+nv41_mmu_init(struct nvkm_mmu *mmu)
+{
+ struct nvkm_device *device = mmu->subdev.device;
+ nvkm_wr32(device, 0x100800, 0x00000002 | mmu->vmm->pd->pt[0]->addr);
+ nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
+ nvkm_wr32(device, 0x100820, 0x00000000);
+}
+
+static const struct nvkm_mmu_func
+nv41_mmu = {
+ .init = nv41_mmu_init,
+ .dma_bits = 39,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true },
+};
+
+int
+nv41_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ if (device->type == NVKM_DEVICE_AGP ||
+ !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
+ return nv04_mmu_new(device, type, inst, pmmu);
+
+ return nvkm_mmu_new_(&nv41_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
new file mode 100644
index 0000000000..dd2a8d461d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static void
+nv44_mmu_init(struct nvkm_mmu *mmu)
+{
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_memory *pt = mmu->vmm->pd->pt[0]->memory;
+ u32 addr;
+
+ /* calculate vram address of this PRAMIN block, object must be
+ * allocated on 512KiB alignment, and not exceed a total size
+ * of 512KiB for this to work correctly
+ */
+ addr = nvkm_rd32(device, 0x10020c);
+ addr -= ((nvkm_memory_addr(pt) >> 19) + 1) << 19;
+
+ nvkm_wr32(device, 0x100850, 0x80000000);
+ nvkm_wr32(device, 0x100818, mmu->vmm->null);
+ nvkm_wr32(device, 0x100804, (nvkm_memory_size(pt) / 4) * 4096);
+ nvkm_wr32(device, 0x100850, 0x00008000);
+ nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
+ nvkm_wr32(device, 0x100820, 0x00000000);
+ nvkm_wr32(device, 0x10082c, 0x00000001);
+ nvkm_wr32(device, 0x100800, addr | 0x00000010);
+}
+
+static const struct nvkm_mmu_func
+nv44_mmu = {
+ .init = nv44_mmu_init,
+ .dma_bits = 39,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true },
+};
+
+int
+nv44_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ if (device->type == NVKM_DEVICE_AGP ||
+ !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
+ return nv04_mmu_new(device, type, inst, pmmu);
+
+ return nvkm_mmu_new_(&nv44_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
new file mode 100644
index 0000000000..78d46e35d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+const u8 *
+nv50_mmu_kind(struct nvkm_mmu *base, int *count, u8 *invalid)
+{
+ /* 0x01: no bank swizzle
+ * 0x02: bank swizzled
+ * 0x7f: invalid
+ *
+ * 0x01/0x02 are values understood by the VRAM allocator,
+ * and are required to avoid mixing the two types within
+ * a certain range.
+ */
+ static const u8
+ kind[128] = {
+ 0x01, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x00 */
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x01, 0x01, 0x01, 0x01, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x10 */
+ 0x02, 0x02, 0x02, 0x02, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x7f, /* 0x20 */
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x30 */
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, /* 0x40 */
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x01, 0x01, 0x01, 0x7f, /* 0x50 */
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x7f, /* 0x60 */
+ 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02,
+ 0x01, 0x7f, 0x02, 0x7f, 0x01, 0x7f, 0x02, 0x7f, /* 0x70 */
+ 0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x7f, 0x7f
+ };
+ *count = ARRAY_SIZE(kind);
+ *invalid = 0x7f;
+ return kind;
+}
+
+static const struct nvkm_mmu_func
+nv50_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x1400 },
+ .kind = nv50_mmu_kind,
+};
+
+int
+nv50_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&nv50_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
new file mode 100644
index 0000000000..5265bf4d83
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_MMU_PRIV_H__
+#define __NVKM_MMU_PRIV_H__
+#define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
+#include <subdev/mmu.h>
+
+void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_mmu *);
+int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_mmu **);
+
+struct nvkm_mmu_func {
+ void (*init)(struct nvkm_mmu *);
+
+ u8 dma_bits;
+
+ struct {
+ struct nvkm_sclass user;
+ } mmu;
+
+ struct {
+ struct nvkm_sclass user;
+ int (*vram)(struct nvkm_mmu *, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **);
+ int (*umap)(struct nvkm_mmu *, struct nvkm_memory *, void *argv,
+ u32 argc, u64 *addr, u64 *size, struct nvkm_vma **);
+ } mem;
+
+ struct {
+ struct nvkm_sclass user;
+ int (*ctor)(struct nvkm_mmu *, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *,
+ const char *name, struct nvkm_vmm **);
+ bool global;
+ u32 pd_offset;
+ } vmm;
+
+ const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid);
+ bool kind_sys;
+};
+
+extern const struct nvkm_mmu_func nv04_mmu;
+
+const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
+
+const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
+
+const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *);
+
+struct nvkm_mmu_pt {
+ union {
+ struct nvkm_mmu_ptc *ptc;
+ struct nvkm_mmu_ptp *ptp;
+ };
+ struct nvkm_memory *memory;
+ bool sub;
+ u16 base;
+ u64 addr;
+ struct list_head head;
+};
+
+void nvkm_mmu_ptc_dump(struct nvkm_mmu *);
+struct nvkm_mmu_pt *
+nvkm_mmu_ptc_get(struct nvkm_mmu *, u32 size, u32 align, bool zero);
+void nvkm_mmu_ptc_put(struct nvkm_mmu *, bool force, struct nvkm_mmu_pt **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
new file mode 100644
index 0000000000..8d060ce47f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ * Copyright 2019 NVIDIA Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static const u8 *
+tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
+{
+ static const u8
+ kind[16] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00 */
+ 0x06, 0x06, 0x02, 0x01, 0x03, 0x04, 0x05, 0x07,
+ };
+ *count = ARRAY_SIZE(kind);
+ *invalid = 0x07;
+ return kind;
+}
+
+static const struct nvkm_mmu_func
+tu102_mmu = {
+ .dma_bits = 47,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, tu102_vmm_new },
+ .kind = tu102_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+tu102_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&tu102_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
new file mode 100644
index 0000000000..e530bb8b3b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "umem.h"
+#include "ummu.h"
+
+#include <core/client.h>
+#include <core/memory.h>
+#include <subdev/bar.h>
+
+#include <nvif/class.h>
+#include <nvif/if000a.h>
+#include <nvif/unpack.h>
+
+static const struct nvkm_object_func nvkm_umem;
+struct nvkm_memory *
+nvkm_umem_search(struct nvkm_client *client, u64 handle)
+{
+ struct nvkm_client *master = client->object.client;
+ struct nvkm_memory *memory = NULL;
+ struct nvkm_object *object;
+ struct nvkm_umem *umem;
+
+ object = nvkm_object_search(client, handle, &nvkm_umem);
+ if (IS_ERR(object)) {
+ if (client != master) {
+ spin_lock(&master->lock);
+ list_for_each_entry(umem, &master->umem, head) {
+ if (umem->object.object == handle) {
+ memory = nvkm_memory_ref(umem->memory);
+ break;
+ }
+ }
+ spin_unlock(&master->lock);
+ }
+ } else {
+ umem = nvkm_umem(object);
+ memory = nvkm_memory_ref(umem->memory);
+ }
+
+ return memory ? memory : ERR_PTR(-ENOENT);
+}
+
+static int
+nvkm_umem_unmap(struct nvkm_object *object)
+{
+ struct nvkm_umem *umem = nvkm_umem(object);
+
+ if (!umem->map)
+ return -EEXIST;
+
+ if (umem->io) {
+ if (!IS_ERR(umem->bar)) {
+ struct nvkm_device *device = umem->mmu->subdev.device;
+ nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &umem->bar);
+ } else {
+ umem->bar = NULL;
+ }
+ } else {
+ vunmap(umem->map);
+ umem->map = NULL;
+ }
+
+ return 0;
+}
+
+static int
+nvkm_umem_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *handle, u64 *length)
+{
+ struct nvkm_umem *umem = nvkm_umem(object);
+ struct nvkm_mmu *mmu = umem->mmu;
+
+ if (!umem->mappable)
+ return -EINVAL;
+ if (umem->map)
+ return -EEXIST;
+
+ if ((umem->type & NVKM_MEM_HOST) && !argc) {
+ int ret = nvkm_mem_map_host(umem->memory, &umem->map);
+ if (ret)
+ return ret;
+
+ *handle = (unsigned long)(void *)umem->map;
+ *length = nvkm_memory_size(umem->memory);
+ *type = NVKM_OBJECT_MAP_VA;
+ return 0;
+ } else
+ if ((umem->type & NVKM_MEM_VRAM) ||
+ (umem->type & NVKM_MEM_KIND)) {
+ int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc,
+ handle, length, &umem->bar);
+ if (ret)
+ return ret;
+
+ *type = NVKM_OBJECT_MAP_IO;
+ } else {
+ return -EINVAL;
+ }
+
+ umem->io = (*type == NVKM_OBJECT_MAP_IO);
+ return 0;
+}
+
+static void *
+nvkm_umem_dtor(struct nvkm_object *object)
+{
+ struct nvkm_umem *umem = nvkm_umem(object);
+ spin_lock(&umem->object.client->lock);
+ list_del_init(&umem->head);
+ spin_unlock(&umem->object.client->lock);
+ nvkm_memory_unref(&umem->memory);
+ return umem;
+}
+
+static const struct nvkm_object_func
+nvkm_umem = {
+ .dtor = nvkm_umem_dtor,
+ .map = nvkm_umem_map,
+ .unmap = nvkm_umem_unmap,
+};
+
+int
+nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
+ union {
+ struct nvif_mem_v0 v0;
+ } *args = argv;
+ struct nvkm_umem *umem;
+ int type, ret = -ENOSYS;
+ u8 page;
+ u64 size;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+ type = args->v0.type;
+ page = args->v0.page;
+ size = args->v0.size;
+ } else
+ return ret;
+
+ if (type >= mmu->type_nr)
+ return -EINVAL;
+
+ if (!(umem = kzalloc(sizeof(*umem), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
+ umem->mmu = mmu;
+ umem->type = mmu->type[type].type;
+ INIT_LIST_HEAD(&umem->head);
+ *pobject = &umem->object;
+
+ if (mmu->type[type].type & NVKM_MEM_MAPPABLE) {
+ page = max_t(u8, page, PAGE_SHIFT);
+ umem->mappable = true;
+ }
+
+ ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc,
+ &umem->memory);
+ if (ret)
+ return ret;
+
+ spin_lock(&umem->object.client->lock);
+ list_add(&umem->head, &umem->object.client->umem);
+ spin_unlock(&umem->object.client->lock);
+
+ args->v0.page = nvkm_memory_page(umem->memory);
+ args->v0.addr = nvkm_memory_addr(umem->memory);
+ args->v0.size = nvkm_memory_size(umem->memory);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
new file mode 100644
index 0000000000..d56a594016
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
@@ -0,0 +1,25 @@
+#ifndef __NVKM_UMEM_H__
+#define __NVKM_UMEM_H__
+#define nvkm_umem(p) container_of((p), struct nvkm_umem, object)
+#include <core/object.h>
+#include "mem.h"
+
+struct nvkm_umem {
+ struct nvkm_object object;
+ struct nvkm_mmu *mmu;
+ u8 type:8;
+ bool mappable:1;
+ bool io:1;
+
+ struct nvkm_memory *memory;
+ struct list_head head;
+
+ union {
+ struct nvkm_vma *bar;
+ void *map;
+ };
+};
+
+int nvkm_umem_new(const struct nvkm_oclass *, void *argv, u32 argc,
+ struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
new file mode 100644
index 0000000000..6870fda4b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ummu.h"
+#include "umem.h"
+#include "uvmm.h"
+
+#include <core/client.h>
+
+#include <nvif/if0008.h>
+#include <nvif/unpack.h>
+
+static int
+nvkm_ummu_sclass(struct nvkm_object *object, int index,
+ struct nvkm_oclass *oclass)
+{
+ struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
+
+ if (mmu->func->mem.user.oclass) {
+ if (index-- == 0) {
+ oclass->base = mmu->func->mem.user;
+ oclass->ctor = nvkm_umem_new;
+ return 0;
+ }
+ }
+
+ if (mmu->func->vmm.user.oclass) {
+ if (index-- == 0) {
+ oclass->base = mmu->func->vmm.user;
+ oclass->ctor = nvkm_uvmm_new;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int
+nvkm_ummu_heap(struct nvkm_ummu *ummu, void *argv, u32 argc)
+{
+ struct nvkm_mmu *mmu = ummu->mmu;
+ union {
+ struct nvif_mmu_heap_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ u8 index;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ if ((index = args->v0.index) >= mmu->heap_nr)
+ return -EINVAL;
+ args->v0.size = mmu->heap[index].size;
+ } else
+ return ret;
+
+ return 0;
+}
+
+static int
+nvkm_ummu_type(struct nvkm_ummu *ummu, void *argv, u32 argc)
+{
+ struct nvkm_mmu *mmu = ummu->mmu;
+ union {
+ struct nvif_mmu_type_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ u8 type, index;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ if ((index = args->v0.index) >= mmu->type_nr)
+ return -EINVAL;
+ type = mmu->type[index].type;
+ args->v0.heap = mmu->type[index].heap;
+ args->v0.vram = !!(type & NVKM_MEM_VRAM);
+ args->v0.host = !!(type & NVKM_MEM_HOST);
+ args->v0.comp = !!(type & NVKM_MEM_COMP);
+ args->v0.disp = !!(type & NVKM_MEM_DISP);
+ args->v0.kind = !!(type & NVKM_MEM_KIND);
+ args->v0.mappable = !!(type & NVKM_MEM_MAPPABLE);
+ args->v0.coherent = !!(type & NVKM_MEM_COHERENT);
+ args->v0.uncached = !!(type & NVKM_MEM_UNCACHED);
+ } else
+ return ret;
+
+ return 0;
+}
+
+static int
+nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc)
+{
+ struct nvkm_mmu *mmu = ummu->mmu;
+ union {
+ struct nvif_mmu_kind_v0 v0;
+ } *args = argv;
+ const u8 *kind = NULL;
+ int ret = -ENOSYS, count = 0;
+ u8 kind_inv = 0;
+
+ if (mmu->func->kind)
+ kind = mmu->func->kind(mmu, &count, &kind_inv);
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+ if (argc != args->v0.count * sizeof(*args->v0.data))
+ return -EINVAL;
+ if (args->v0.count > count)
+ return -EINVAL;
+ args->v0.kind_inv = kind_inv;
+ memcpy(args->v0.data, kind, args->v0.count);
+ } else
+ return ret;
+
+ return 0;
+}
+
+static int
+nvkm_ummu_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
+{
+ struct nvkm_ummu *ummu = nvkm_ummu(object);
+ switch (mthd) {
+ case NVIF_MMU_V0_HEAP: return nvkm_ummu_heap(ummu, argv, argc);
+ case NVIF_MMU_V0_TYPE: return nvkm_ummu_type(ummu, argv, argc);
+ case NVIF_MMU_V0_KIND: return nvkm_ummu_kind(ummu, argv, argc);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static const struct nvkm_object_func
+nvkm_ummu = {
+ .mthd = nvkm_ummu_mthd,
+ .sclass = nvkm_ummu_sclass,
+};
+
+int
+nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+ void *argv, u32 argc, struct nvkm_object **pobject)
+{
+ union {
+ struct nvif_mmu_v0 v0;
+ } *args = argv;
+ struct nvkm_mmu *mmu = device->mmu;
+ struct nvkm_ummu *ummu;
+ int ret = -ENOSYS, kinds = 0;
+ u8 unused = 0;
+
+ if (mmu->func->kind)
+ mmu->func->kind(mmu, &kinds, &unused);
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ args->v0.dmabits = mmu->dma_bits;
+ args->v0.heap_nr = mmu->heap_nr;
+ args->v0.type_nr = mmu->type_nr;
+ args->v0.kind_nr = kinds;
+ } else
+ return ret;
+
+ if (!(ummu = kzalloc(sizeof(*ummu), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nvkm_ummu, oclass, &ummu->object);
+ ummu->mmu = mmu;
+ *pobject = &ummu->object;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h
new file mode 100644
index 0000000000..0cd510dcfc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_UMMU_H__
+#define __NVKM_UMMU_H__
+#define nvkm_ummu(p) container_of((p), struct nvkm_ummu, object)
+#include <core/object.h>
+#include "priv.h"
+
+struct nvkm_ummu {
+ struct nvkm_object object;
+ struct nvkm_mmu *mmu;
+};
+
+int nvkm_ummu_new(struct nvkm_device *, const struct nvkm_oclass *,
+ void *argv, u32 argc, struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
new file mode 100644
index 0000000000..8e459d88ff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -0,0 +1,582 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "uvmm.h"
+#include "umem.h"
+#include "ummu.h"
+
+#include <core/client.h>
+#include <core/memory.h>
+
+#include <nvif/if000c.h>
+#include <nvif/unpack.h>
+
+static const struct nvkm_object_func nvkm_uvmm;
+struct nvkm_vmm *
+nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
+{
+ struct nvkm_object *object;
+
+ object = nvkm_object_search(client, handle, &nvkm_uvmm);
+ if (IS_ERR(object))
+ return (void *)object;
+
+ return nvkm_vmm_ref(nvkm_uvmm(object)->vmm);
+}
+
+static int
+nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ union {
+ struct nvif_vmm_pfnclr_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ int ret = -ENOSYS;
+ u64 addr, size;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ addr = args->v0.addr;
+ size = args->v0.size;
+ } else
+ return ret;
+
+ if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
+ return -EINVAL;
+
+ if (size) {
+ mutex_lock(&vmm->mutex.vmm);
+ ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
+ mutex_unlock(&vmm->mutex.vmm);
+ }
+
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ union {
+ struct nvif_vmm_pfnmap_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ int ret = -ENOSYS;
+ u64 addr, size, *phys;
+ u8 page;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+ page = args->v0.page;
+ addr = args->v0.addr;
+ size = args->v0.size;
+ phys = args->v0.phys;
+ if (argc != (size >> page) * sizeof(args->v0.phys[0]))
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
+ return -EINVAL;
+
+ if (size) {
+ mutex_lock(&vmm->mutex.vmm);
+ ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
+ mutex_unlock(&vmm->mutex.vmm);
+ }
+
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ union {
+ struct nvif_vmm_unmap_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma *vma;
+ int ret = -ENOSYS;
+ u64 addr;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ addr = args->v0.addr;
+ } else
+ return ret;
+
+ if (nvkm_vmm_in_managed_range(vmm, addr, 0) && vmm->managed.raw)
+ return -EINVAL;
+
+ mutex_lock(&vmm->mutex.vmm);
+ vma = nvkm_vmm_node_search(vmm, addr);
+ if (ret = -ENOENT, !vma || vma->addr != addr) {
+ VMM_DEBUG(vmm, "lookup %016llx: %016llx",
+ addr, vma ? vma->addr : ~0ULL);
+ goto done;
+ }
+
+ if (ret = -ENOENT, vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
+ goto done;
+ }
+
+ if (ret = -EINVAL, !vma->memory) {
+ VMM_DEBUG(vmm, "unmapped");
+ goto done;
+ }
+
+ nvkm_vmm_unmap_locked(vmm, vma, false);
+ ret = 0;
+done:
+ mutex_unlock(&vmm->mutex.vmm);
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ struct nvkm_client *client = uvmm->object.client;
+ union {
+ struct nvif_vmm_map_v0 v0;
+ } *args = argv;
+ u64 addr, size, handle, offset;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma *vma;
+ struct nvkm_memory *memory;
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+ addr = args->v0.addr;
+ size = args->v0.size;
+ handle = args->v0.memory;
+ offset = args->v0.offset;
+ } else
+ return ret;
+
+ if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
+ return -EINVAL;
+
+ memory = nvkm_umem_search(client, handle);
+ if (IS_ERR(memory)) {
+ VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
+ return PTR_ERR(memory);
+ }
+
+ mutex_lock(&vmm->mutex.vmm);
+ if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
+ VMM_DEBUG(vmm, "lookup %016llx", addr);
+ goto fail;
+ }
+
+ if (ret = -ENOENT, vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
+ goto fail;
+ }
+
+ if (ret = -EINVAL, vma->mapped && !vma->memory) {
+ VMM_DEBUG(vmm, "pfnmap %016llx", addr);
+ goto fail;
+ }
+
+ if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
+ if (addr + size > vma->addr + vma->size || vma->memory ||
+ (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
+ VMM_DEBUG(vmm, "split %d %d %d "
+ "%016llx %016llx %016llx %016llx",
+ !!vma->memory, vma->refd, vma->mapref,
+ addr, size, vma->addr, (u64)vma->size);
+ goto fail;
+ }
+
+ vma = nvkm_vmm_node_split(vmm, vma, addr, size);
+ if (!vma) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+ vma->busy = true;
+ mutex_unlock(&vmm->mutex.vmm);
+
+ ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
+ if (ret == 0) {
+ /* Successful map will clear vma->busy. */
+ nvkm_memory_unref(&memory);
+ return 0;
+ }
+
+ mutex_lock(&vmm->mutex.vmm);
+ vma->busy = false;
+ nvkm_vmm_unmap_region(vmm, vma);
+fail:
+ mutex_unlock(&vmm->mutex.vmm);
+ nvkm_memory_unref(&memory);
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ union {
+ struct nvif_vmm_put_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma *vma;
+ int ret = -ENOSYS;
+ u64 addr;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ addr = args->v0.addr;
+ } else
+ return ret;
+
+ mutex_lock(&vmm->mutex.vmm);
+ vma = nvkm_vmm_node_search(vmm, args->v0.addr);
+ if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
+ VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
+ vma ? vma->addr : ~0ULL, vma ? vma->part : 0);
+ goto done;
+ }
+
+ if (ret = -ENOENT, vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
+ goto done;
+ }
+
+ nvkm_vmm_put_locked(vmm, vma);
+ ret = 0;
+done:
+ mutex_unlock(&vmm->mutex.vmm);
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ union {
+ struct nvif_vmm_get_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma *vma;
+ int ret = -ENOSYS;
+ bool getref, mapref, sparse;
+ u8 page, align;
+ u64 size;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ getref = args->v0.type == NVIF_VMM_GET_V0_PTES;
+ mapref = args->v0.type == NVIF_VMM_GET_V0_ADDR;
+ sparse = args->v0.sparse;
+ page = args->v0.page;
+ align = args->v0.align;
+ size = args->v0.size;
+ } else
+ return ret;
+
+ mutex_lock(&vmm->mutex.vmm);
+ ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
+ page, align, size, &vma);
+ mutex_unlock(&vmm->mutex.vmm);
+ if (ret)
+ return ret;
+
+ args->v0.addr = vma->addr;
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ union {
+ struct nvif_vmm_page_v0 v0;
+ } *args = argv;
+ const struct nvkm_vmm_page *page;
+ int ret = -ENOSYS;
+ u8 type, index, nr;
+
+ page = uvmm->vmm->func->page;
+ for (nr = 0; page[nr].shift; nr++);
+
+ if (!(nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ if ((index = args->v0.index) >= nr)
+ return -EINVAL;
+ type = page[index].type;
+ args->v0.shift = page[index].shift;
+ args->v0.sparse = !!(type & NVKM_VMM_PAGE_SPARSE);
+ args->v0.vram = !!(type & NVKM_VMM_PAGE_VRAM);
+ args->v0.host = !!(type & NVKM_VMM_PAGE_HOST);
+ args->v0.comp = !!(type & NVKM_VMM_PAGE_COMP);
+ } else
+ return -ENOSYS;
+
+ return 0;
+}
+
+static inline int
+nvkm_uvmm_page_index(struct nvkm_uvmm *uvmm, u64 size, u8 shift, u8 *refd)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ const struct nvkm_vmm_page *page;
+
+ if (likely(shift)) {
+ for (page = vmm->func->page; page->shift; page++) {
+ if (shift == page->shift)
+ break;
+ }
+
+ if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
+ VMM_DEBUG(vmm, "page %d %016llx", shift, size);
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+ *refd = page - vmm->func->page;
+
+ return 0;
+}
+
+static int
+nvkm_uvmm_mthd_raw_get(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ u8 refd;
+ int ret;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+ if (ret)
+ return ret;
+
+ return nvkm_vmm_raw_get(vmm, args->addr, args->size, refd);
+}
+
+static int
+nvkm_uvmm_mthd_raw_put(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ u8 refd;
+ int ret;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+ if (ret)
+ return ret;
+
+ nvkm_vmm_raw_put(vmm, args->addr, args->size, refd);
+
+ return 0;
+}
+
+static int
+nvkm_uvmm_mthd_raw_map(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_client *client = uvmm->object.client;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma vma = {
+ .addr = args->addr,
+ .size = args->size,
+ .used = true,
+ .mapref = false,
+ .no_comp = true,
+ };
+ struct nvkm_memory *memory;
+ void *argv = (void *)(uintptr_t)args->argv;
+ unsigned int argc = args->argc;
+ u64 handle = args->memory;
+ u8 refd;
+ int ret;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+ if (ret)
+ return ret;
+
+ vma.page = vma.refd = refd;
+
+ memory = nvkm_umem_search(client, args->memory);
+ if (IS_ERR(memory)) {
+ VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
+ return PTR_ERR(memory);
+ }
+
+ ret = nvkm_memory_map(memory, args->offset, vmm, &vma, argv, argc);
+
+ nvkm_memory_unref(&vma.memory);
+ nvkm_memory_unref(&memory);
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_raw_unmap(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ u8 refd;
+ int ret;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+ if (ret)
+ return ret;
+
+ nvkm_vmm_raw_unmap(vmm, args->addr, args->size,
+ args->sparse, refd);
+
+ return 0;
+}
+
+static int
+nvkm_uvmm_mthd_raw_sparse(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ return nvkm_vmm_raw_sparse(vmm, args->addr, args->size, args->ref);
+}
+
+static int
+nvkm_uvmm_mthd_raw(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ union {
+ struct nvif_vmm_raw_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+
+ if (!uvmm->vmm->managed.raw)
+ return -EINVAL;
+
+ if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true)))
+ return ret;
+
+ switch (args->v0.op) {
+ case NVIF_VMM_RAW_V0_GET:
+ return nvkm_uvmm_mthd_raw_get(uvmm, &args->v0);
+ case NVIF_VMM_RAW_V0_PUT:
+ return nvkm_uvmm_mthd_raw_put(uvmm, &args->v0);
+ case NVIF_VMM_RAW_V0_MAP:
+ return nvkm_uvmm_mthd_raw_map(uvmm, &args->v0);
+ case NVIF_VMM_RAW_V0_UNMAP:
+ return nvkm_uvmm_mthd_raw_unmap(uvmm, &args->v0);
+ case NVIF_VMM_RAW_V0_SPARSE:
+ return nvkm_uvmm_mthd_raw_sparse(uvmm, &args->v0);
+ default:
+ return -EINVAL;
+ };
+}
+
+static int
+nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
+{
+ struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
+ switch (mthd) {
+ case NVIF_VMM_V0_PAGE : return nvkm_uvmm_mthd_page (uvmm, argv, argc);
+ case NVIF_VMM_V0_GET : return nvkm_uvmm_mthd_get (uvmm, argv, argc);
+ case NVIF_VMM_V0_PUT : return nvkm_uvmm_mthd_put (uvmm, argv, argc);
+ case NVIF_VMM_V0_MAP : return nvkm_uvmm_mthd_map (uvmm, argv, argc);
+ case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
+ case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
+ case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
+ case NVIF_VMM_V0_RAW : return nvkm_uvmm_mthd_raw (uvmm, argv, argc);
+ case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
+ if (uvmm->vmm->func->mthd) {
+ return uvmm->vmm->func->mthd(uvmm->vmm,
+ uvmm->object.client,
+ mthd, argv, argc);
+ }
+ break;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static void *
+nvkm_uvmm_dtor(struct nvkm_object *object)
+{
+ struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
+ nvkm_vmm_unref(&uvmm->vmm);
+ return uvmm;
+}
+
+static const struct nvkm_object_func
+nvkm_uvmm = {
+ .dtor = nvkm_uvmm_dtor,
+ .mthd = nvkm_uvmm_mthd,
+};
+
+int
+nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
+ const bool more = oclass->base.maxver >= 0;
+ union {
+ struct nvif_vmm_v0 v0;
+ } *args = argv;
+ const struct nvkm_vmm_page *page;
+ struct nvkm_uvmm *uvmm;
+ int ret = -ENOSYS;
+ u64 addr, size;
+ bool managed, raw;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
+ managed = args->v0.type == NVIF_VMM_V0_TYPE_MANAGED;
+ raw = args->v0.type == NVIF_VMM_V0_TYPE_RAW;
+ addr = args->v0.addr;
+ size = args->v0.size;
+ } else
+ return ret;
+
+ if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
+ *pobject = &uvmm->object;
+
+ if (!mmu->vmm) {
+ ret = mmu->func->vmm.ctor(mmu, managed || raw, addr, size,
+ argv, argc, NULL, "user", &uvmm->vmm);
+ if (ret)
+ return ret;
+
+ uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug);
+ } else {
+ if (size)
+ return -EINVAL;
+
+ uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
+ }
+ uvmm->vmm->managed.raw = raw;
+
+ page = uvmm->vmm->func->page;
+ args->v0.page_nr = 0;
+ while (page && (page++)->shift)
+ args->v0.page_nr++;
+ args->v0.addr = uvmm->vmm->start;
+ args->v0.size = uvmm->vmm->limit;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h
new file mode 100644
index 0000000000..71dab55e18
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_UVMM_H__
+#define __NVKM_UVMM_H__
+#define nvkm_uvmm(p) container_of((p), struct nvkm_uvmm, object)
+#include <core/object.h>
+#include "vmm.h"
+
+struct nvkm_uvmm {
+ struct nvkm_object object;
+ struct nvkm_vmm *vmm;
+};
+
+int nvkm_uvmm_new(const struct nvkm_oclass *, void *argv, u32 argc,
+ struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
new file mode 100644
index 0000000000..eb5fcadcb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -0,0 +1,1976 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define NVKM_VMM_LEVELS_MAX 5
+#include "vmm.h"
+
+#include <subdev/fb.h>
+
+static void
+nvkm_vmm_pt_del(struct nvkm_vmm_pt **ppgt)
+{
+ struct nvkm_vmm_pt *pgt = *ppgt;
+ if (pgt) {
+ kvfree(pgt->pde);
+ kfree(pgt);
+ *ppgt = NULL;
+ }
+}
+
+
+static struct nvkm_vmm_pt *
+nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
+ const struct nvkm_vmm_page *page)
+{
+ const u32 pten = 1 << desc->bits;
+ struct nvkm_vmm_pt *pgt;
+ u32 lpte = 0;
+
+ if (desc->type > PGT) {
+ if (desc->type == SPT) {
+ const struct nvkm_vmm_desc *pair = page[-1].desc;
+ lpte = pten >> (desc->bits - pair->bits);
+ } else {
+ lpte = pten;
+ }
+ }
+
+ if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL)))
+ return NULL;
+ pgt->page = page ? page->shift : 0;
+ pgt->sparse = sparse;
+
+ if (desc->type == PGD) {
+ pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL);
+ if (!pgt->pde) {
+ kfree(pgt);
+ return NULL;
+ }
+ }
+
+ return pgt;
+}
+
+struct nvkm_vmm_iter {
+ const struct nvkm_vmm_page *page;
+ const struct nvkm_vmm_desc *desc;
+ struct nvkm_vmm *vmm;
+ u64 cnt;
+ u16 max, lvl;
+ u32 pte[NVKM_VMM_LEVELS_MAX];
+ struct nvkm_vmm_pt *pt[NVKM_VMM_LEVELS_MAX];
+ int flush;
+};
+
+#ifdef CONFIG_NOUVEAU_DEBUG_MMU
+static const char *
+nvkm_vmm_desc_type(const struct nvkm_vmm_desc *desc)
+{
+ switch (desc->type) {
+ case PGD: return "PGD";
+ case PGT: return "PGT";
+ case SPT: return "SPT";
+ case LPT: return "LPT";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static void
+nvkm_vmm_trace(struct nvkm_vmm_iter *it, char *buf)
+{
+ int lvl;
+ for (lvl = it->max; lvl >= 0; lvl--) {
+ if (lvl >= it->lvl)
+ buf += sprintf(buf, "%05x:", it->pte[lvl]);
+ else
+ buf += sprintf(buf, "xxxxx:");
+ }
+}
+
+#define TRA(i,f,a...) do { \
+ char _buf[NVKM_VMM_LEVELS_MAX * 7]; \
+ struct nvkm_vmm_iter *_it = (i); \
+ nvkm_vmm_trace(_it, _buf); \
+ VMM_TRACE(_it->vmm, "%s "f, _buf, ##a); \
+} while(0)
+#else
+#define TRA(i,f,a...)
+#endif
+
+static inline void
+nvkm_vmm_flush_mark(struct nvkm_vmm_iter *it)
+{
+ it->flush = min(it->flush, it->max - it->lvl);
+}
+
+static inline void
+nvkm_vmm_flush(struct nvkm_vmm_iter *it)
+{
+ if (it->flush != NVKM_VMM_LEVELS_MAX) {
+ if (it->vmm->func->flush) {
+ TRA(it, "flush: %d", it->flush);
+ it->vmm->func->flush(it->vmm, it->flush);
+ }
+ it->flush = NVKM_VMM_LEVELS_MAX;
+ }
+}
+
+static void
+nvkm_vmm_unref_pdes(struct nvkm_vmm_iter *it)
+{
+ const struct nvkm_vmm_desc *desc = it->desc;
+ const int type = desc[it->lvl].type == SPT;
+ struct nvkm_vmm_pt *pgd = it->pt[it->lvl + 1];
+ struct nvkm_vmm_pt *pgt = it->pt[it->lvl];
+ struct nvkm_mmu_pt *pt = pgt->pt[type];
+ struct nvkm_vmm *vmm = it->vmm;
+ u32 pdei = it->pte[it->lvl + 1];
+
+ /* Recurse up the tree, unreferencing/destroying unneeded PDs. */
+ it->lvl++;
+ if (--pgd->refs[0]) {
+ const struct nvkm_vmm_desc_func *func = desc[it->lvl].func;
+ /* PD has other valid PDEs, so we need a proper update. */
+ TRA(it, "PDE unmap %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
+ pgt->pt[type] = NULL;
+ if (!pgt->refs[!type]) {
+ /* PDE no longer required. */
+ if (pgd->pt[0]) {
+ if (pgt->sparse) {
+ func->sparse(vmm, pgd->pt[0], pdei, 1);
+ pgd->pde[pdei] = NVKM_VMM_PDE_SPARSE;
+ } else {
+ func->unmap(vmm, pgd->pt[0], pdei, 1);
+ pgd->pde[pdei] = NULL;
+ }
+ } else {
+ /* Special handling for Tesla-class GPUs,
+ * where there's no central PD, but each
+ * instance has its own embedded PD.
+ */
+ func->pde(vmm, pgd, pdei);
+ pgd->pde[pdei] = NULL;
+ }
+ } else {
+ /* PDE was pointing at dual-PTs and we're removing
+ * one of them, leaving the other in place.
+ */
+ func->pde(vmm, pgd, pdei);
+ }
+
+ /* GPU may have cached the PTs, flush before freeing. */
+ nvkm_vmm_flush_mark(it);
+ nvkm_vmm_flush(it);
+ } else {
+ /* PD has no valid PDEs left, so we can just destroy it. */
+ nvkm_vmm_unref_pdes(it);
+ }
+
+ /* Destroy PD/PT. */
+ TRA(it, "PDE free %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
+ nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt);
+ if (!pgt->refs[!type])
+ nvkm_vmm_pt_del(&pgt);
+ it->lvl--;
+}
+
+static void
+nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
+ const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *pair = it->page[-1].desc;
+ const u32 sptb = desc->bits - pair->bits;
+ const u32 sptn = 1 << sptb;
+ struct nvkm_vmm *vmm = it->vmm;
+ u32 spti = ptei & (sptn - 1), lpti, pteb;
+
+ /* Determine how many SPTEs are being touched under each LPTE,
+ * and drop reference counts.
+ */
+ for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
+ const u32 pten = min(sptn - spti, ptes);
+ pgt->pte[lpti] -= pten;
+ ptes -= pten;
+ }
+
+ /* We're done here if there's no corresponding LPT. */
+ if (!pgt->refs[0])
+ return;
+
+ for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
+ /* Skip over any LPTEs that still have valid SPTEs. */
+ if (pgt->pte[pteb] & NVKM_VMM_PTE_SPTES) {
+ for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+ if (!(pgt->pte[ptei] & NVKM_VMM_PTE_SPTES))
+ break;
+ }
+ continue;
+ }
+
+ /* As there's no more non-UNMAPPED SPTEs left in the range
+ * covered by a number of LPTEs, the LPTEs once again take
+ * control over their address range.
+ *
+ * Determine how many LPTEs need to transition state.
+ */
+ pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
+ for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+ if (pgt->pte[ptei] & NVKM_VMM_PTE_SPTES)
+ break;
+ pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
+ }
+
+ if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
+ TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes);
+ pair->func->sparse(vmm, pgt->pt[0], pteb, ptes);
+ } else
+ if (pair->func->invalid) {
+ /* If the MMU supports it, restore the LPTE to the
+ * INVALID state to tell the MMU there is no point
+ * trying to fetch the corresponding SPTEs.
+ */
+ TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes);
+ pair->func->invalid(vmm, pgt->pt[0], pteb, ptes);
+ }
+ }
+}
+
+static bool
+nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *desc = it->desc;
+ const int type = desc->type == SPT;
+ struct nvkm_vmm_pt *pgt = it->pt[0];
+ bool dma;
+
+ if (pfn) {
+ /* Need to clear PTE valid bits before we dma_unmap_page(). */
+ dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes);
+ if (dma) {
+ /* GPU may have cached the PT, flush before unmap. */
+ nvkm_vmm_flush_mark(it);
+ nvkm_vmm_flush(it);
+ desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes);
+ }
+ }
+
+ /* Drop PTE references. */
+ pgt->refs[type] -= ptes;
+
+ /* Dual-PTs need special handling, unless PDE becoming invalid. */
+ if (desc->type == SPT && (pgt->refs[0] || pgt->refs[1]))
+ nvkm_vmm_unref_sptes(it, pgt, desc, ptei, ptes);
+
+ /* PT no longer needed? Destroy it. */
+ if (!pgt->refs[type]) {
+ it->lvl++;
+ TRA(it, "%s empty", nvkm_vmm_desc_type(desc));
+ it->lvl--;
+ nvkm_vmm_unref_pdes(it);
+ return false; /* PTE writes for unmap() not necessary. */
+ }
+
+ return true;
+}
+
+static void
+nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
+ const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *pair = it->page[-1].desc;
+ const u32 sptb = desc->bits - pair->bits;
+ const u32 sptn = 1 << sptb;
+ struct nvkm_vmm *vmm = it->vmm;
+ u32 spti = ptei & (sptn - 1), lpti, pteb;
+
+ /* Determine how many SPTEs are being touched under each LPTE,
+ * and increase reference counts.
+ */
+ for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
+ const u32 pten = min(sptn - spti, ptes);
+ pgt->pte[lpti] += pten;
+ ptes -= pten;
+ }
+
+ /* We're done here if there's no corresponding LPT. */
+ if (!pgt->refs[0])
+ return;
+
+ for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
+ /* Skip over any LPTEs that already have valid SPTEs. */
+ if (pgt->pte[pteb] & NVKM_VMM_PTE_VALID) {
+ for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+ if (!(pgt->pte[ptei] & NVKM_VMM_PTE_VALID))
+ break;
+ }
+ continue;
+ }
+
+ /* As there are now non-UNMAPPED SPTEs in the range covered
+ * by a number of LPTEs, we need to transfer control of the
+ * address range to the SPTEs.
+ *
+ * Determine how many LPTEs need to transition state.
+ */
+ pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
+ for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+ if (pgt->pte[ptei] & NVKM_VMM_PTE_VALID)
+ break;
+ pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
+ }
+
+ if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
+ const u32 spti = pteb * sptn;
+ const u32 sptc = ptes * sptn;
+ /* The entire LPTE is marked as sparse, we need
+ * to make sure that the SPTEs are too.
+ */
+ TRA(it, "SPTE %05x: U -> S %d PTEs", spti, sptc);
+ desc->func->sparse(vmm, pgt->pt[1], spti, sptc);
+ /* Sparse LPTEs prevent SPTEs from being accessed. */
+ TRA(it, "LPTE %05x: S -> U %d PTEs", pteb, ptes);
+ pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
+ } else
+ if (pair->func->invalid) {
+ /* MMU supports blocking SPTEs by marking an LPTE
+ * as INVALID. We need to reverse that here.
+ */
+ TRA(it, "LPTE %05x: I -> U %d PTEs", pteb, ptes);
+ pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
+ }
+ }
+}
+
+static bool
+nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *desc = it->desc;
+ const int type = desc->type == SPT;
+ struct nvkm_vmm_pt *pgt = it->pt[0];
+
+ /* Take PTE references. */
+ pgt->refs[type] += ptes;
+
+ /* Dual-PTs need special handling. */
+ if (desc->type == SPT)
+ nvkm_vmm_ref_sptes(it, pgt, desc, ptei, ptes);
+
+ return true;
+}
+
+static void
+nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
+ struct nvkm_vmm_pt *pgt, u32 ptei, u32 ptes)
+{
+ if (desc->type == PGD) {
+ while (ptes--)
+ pgt->pde[ptei++] = NVKM_VMM_PDE_SPARSE;
+ } else
+ if (desc->type == LPT) {
+ memset(&pgt->pte[ptei], NVKM_VMM_PTE_SPARSE, ptes);
+ }
+}
+
+static bool
+nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
+{
+ struct nvkm_vmm_pt *pt = it->pt[0];
+ if (it->desc->type == PGD)
+ memset(&pt->pde[ptei], 0x00, sizeof(pt->pde[0]) * ptes);
+ else
+ if (it->desc->type == LPT)
+ memset(&pt->pte[ptei], 0x00, sizeof(pt->pte[0]) * ptes);
+ return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes);
+}
+
+static bool
+nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
+{
+ nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes);
+ return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes);
+}
+
+static bool
+nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
+ const int type = desc->type == SPT;
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ const bool zero = !pgt->sparse && !desc->func->invalid;
+ struct nvkm_vmm *vmm = it->vmm;
+ struct nvkm_mmu *mmu = vmm->mmu;
+ struct nvkm_mmu_pt *pt;
+ u32 pten = 1 << desc->bits;
+ u32 pteb, ptei, ptes;
+ u32 size = desc->size * pten;
+
+ pgd->refs[0]++;
+
+ pgt->pt[type] = nvkm_mmu_ptc_get(mmu, size, desc->align, zero);
+ if (!pgt->pt[type]) {
+ it->lvl--;
+ nvkm_vmm_unref_pdes(it);
+ return false;
+ }
+
+ if (zero)
+ goto done;
+
+ pt = pgt->pt[type];
+
+ if (desc->type == LPT && pgt->refs[1]) {
+ /* SPT already exists covering the same range as this LPT,
+ * which means we need to be careful that any LPTEs which
+ * overlap valid SPTEs are unmapped as opposed to invalid
+ * or sparse, which would prevent the MMU from looking at
+ * the SPTEs on some GPUs.
+ */
+ for (ptei = pteb = 0; ptei < pten; pteb = ptei) {
+ bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
+ for (ptes = 1, ptei++; ptei < pten; ptes++, ptei++) {
+ bool next = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
+ if (spte != next)
+ break;
+ }
+
+ if (!spte) {
+ if (pgt->sparse)
+ desc->func->sparse(vmm, pt, pteb, ptes);
+ else
+ desc->func->invalid(vmm, pt, pteb, ptes);
+ memset(&pgt->pte[pteb], 0x00, ptes);
+ } else {
+ desc->func->unmap(vmm, pt, pteb, ptes);
+ while (ptes--)
+ pgt->pte[pteb++] |= NVKM_VMM_PTE_VALID;
+ }
+ }
+ } else {
+ if (pgt->sparse) {
+ nvkm_vmm_sparse_ptes(desc, pgt, 0, pten);
+ desc->func->sparse(vmm, pt, 0, pten);
+ } else {
+ desc->func->invalid(vmm, pt, 0, pten);
+ }
+ }
+
+done:
+ TRA(it, "PDE write %s", nvkm_vmm_desc_type(desc));
+ it->desc[it->lvl].func->pde(it->vmm, pgd, pdei);
+ nvkm_vmm_flush_mark(it);
+ return true;
+}
+
+static bool
+nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+
+ pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page);
+ if (!pgt) {
+ if (!pgd->refs[0])
+ nvkm_vmm_unref_pdes(it);
+ return false;
+ }
+
+ pgd->pde[pdei] = pgt;
+ return true;
+}
+
+static inline u64
+nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, const char *name, bool ref, bool pfn,
+ bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32),
+ nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
+ nvkm_vmm_pxe_func CLR_PTES)
+{
+ const struct nvkm_vmm_desc *desc = page->desc;
+ struct nvkm_vmm_iter it;
+ u64 bits = addr >> page->shift;
+
+ it.page = page;
+ it.desc = desc;
+ it.vmm = vmm;
+ it.cnt = size >> page->shift;
+ it.flush = NVKM_VMM_LEVELS_MAX;
+
+ /* Deconstruct address into PTE indices for each mapping level. */
+ for (it.lvl = 0; desc[it.lvl].bits; it.lvl++) {
+ it.pte[it.lvl] = bits & ((1 << desc[it.lvl].bits) - 1);
+ bits >>= desc[it.lvl].bits;
+ }
+ it.max = --it.lvl;
+ it.pt[it.max] = vmm->pd;
+
+ it.lvl = 0;
+ TRA(&it, "%s: %016llx %016llx %d %lld PTEs", name,
+ addr, size, page->shift, it.cnt);
+ it.lvl = it.max;
+
+ /* Depth-first traversal of page tables. */
+ while (it.cnt) {
+ struct nvkm_vmm_pt *pgt = it.pt[it.lvl];
+ const int type = desc->type == SPT;
+ const u32 pten = 1 << desc->bits;
+ const u32 ptei = it.pte[0];
+ const u32 ptes = min_t(u64, it.cnt, pten - ptei);
+
+ /* Walk down the tree, finding page tables for each level. */
+ for (; it.lvl; it.lvl--) {
+ const u32 pdei = it.pte[it.lvl];
+ struct nvkm_vmm_pt *pgd = pgt;
+
+ /* Software PT. */
+ if (ref && NVKM_VMM_PDE_INVALID(pgd->pde[pdei])) {
+ if (!nvkm_vmm_ref_swpt(&it, pgd, pdei))
+ goto fail;
+ }
+ it.pt[it.lvl - 1] = pgt = pgd->pde[pdei];
+
+ /* Hardware PT.
+ *
+ * This is a separate step from above due to GF100 and
+ * newer having dual page tables at some levels, which
+ * are refcounted independently.
+ */
+ if (ref && !pgt->refs[desc[it.lvl - 1].type == SPT]) {
+ if (!nvkm_vmm_ref_hwpt(&it, pgd, pdei))
+ goto fail;
+ }
+ }
+
+ /* Handle PTE updates. */
+ if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) {
+ struct nvkm_mmu_pt *pt = pgt->pt[type];
+ if (MAP_PTES || CLR_PTES) {
+ if (MAP_PTES)
+ MAP_PTES(vmm, pt, ptei, ptes, map);
+ else
+ CLR_PTES(vmm, pt, ptei, ptes);
+ nvkm_vmm_flush_mark(&it);
+ }
+ }
+
+ /* Walk back up the tree to the next position. */
+ it.pte[it.lvl] += ptes;
+ it.cnt -= ptes;
+ if (it.cnt) {
+ while (it.pte[it.lvl] == (1 << desc[it.lvl].bits)) {
+ it.pte[it.lvl++] = 0;
+ it.pte[it.lvl]++;
+ }
+ }
+ }
+
+ nvkm_vmm_flush(&it);
+ return ~0ULL;
+
+fail:
+ /* Reconstruct the failure address so the caller is able to
+ * reverse any partially completed operations.
+ */
+ addr = it.pte[it.max--];
+ do {
+ addr = addr << desc[it.max].bits;
+ addr |= it.pte[it.max];
+ } while (it.max--);
+
+ return addr << page->shift;
+}
+
+static void
+nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false,
+ nvkm_vmm_sparse_unref_ptes, NULL, NULL,
+ page->desc->func->invalid ?
+ page->desc->func->invalid : page->desc->func->unmap);
+}
+
+static int
+nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ if ((page->type & NVKM_VMM_PAGE_SPARSE)) {
+ u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
+ true, false, nvkm_vmm_sparse_ref_ptes,
+ NULL, NULL, page->desc->func->sparse);
+ if (fail != ~0ULL) {
+ if ((size = fail - addr))
+ nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
+ return -ENOMEM;
+ }
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int
+nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+ int m = 0, i;
+ u64 start = addr;
+ u64 block;
+
+ while (size) {
+ /* Limit maximum page size based on remaining size. */
+ while (size < (1ULL << page[m].shift))
+ m++;
+ i = m;
+
+ /* Find largest page size suitable for alignment. */
+ while (!IS_ALIGNED(addr, 1ULL << page[i].shift))
+ i++;
+
+ /* Determine number of PTEs at this page size. */
+ if (i != m) {
+ /* Limited to alignment boundary of next page size. */
+ u64 next = 1ULL << page[i - 1].shift;
+ u64 part = ALIGN(addr, next) - addr;
+ if (size - part >= next)
+ block = (part >> page[i].shift) << page[i].shift;
+ else
+ block = (size >> page[i].shift) << page[i].shift;
+ } else {
+ block = (size >> page[i].shift) << page[i].shift;
+ }
+
+ /* Perform operation. */
+ if (ref) {
+ int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block);
+ if (ret) {
+ if ((size = addr - start))
+ nvkm_vmm_ptes_sparse(vmm, start, size, false);
+ return ret;
+ }
+ } else {
+ nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block);
+ }
+
+ size -= block;
+ addr += block;
+ }
+
+ return 0;
+}
+
+static void
+nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, bool sparse, bool pfn)
+{
+ const struct nvkm_vmm_desc_func *func = page->desc->func;
+
+ mutex_lock(&vmm->mutex.map);
+ nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
+ NULL, NULL, NULL,
+ sparse ? func->sparse : func->invalid ? func->invalid :
+ func->unmap);
+ mutex_unlock(&vmm->mutex.map);
+}
+
+static void
+nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, struct nvkm_vmm_map *map,
+ nvkm_vmm_pte_func func)
+{
+ mutex_lock(&vmm->mutex.map);
+ nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
+ NULL, func, map, NULL);
+ mutex_unlock(&vmm->mutex.map);
+}
+
+static void
+nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
+ nvkm_vmm_unref_ptes, NULL, NULL, NULL);
+}
+
+static void
+nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ mutex_lock(&vmm->mutex.ref);
+ nvkm_vmm_ptes_put_locked(vmm, page, addr, size);
+ mutex_unlock(&vmm->mutex.ref);
+}
+
+static int
+nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ u64 fail;
+
+ mutex_lock(&vmm->mutex.ref);
+ fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
+ nvkm_vmm_ref_ptes, NULL, NULL, NULL);
+ if (fail != ~0ULL) {
+ if (fail != addr)
+ nvkm_vmm_ptes_put_locked(vmm, page, addr, fail - addr);
+ mutex_unlock(&vmm->mutex.ref);
+ return -ENOMEM;
+ }
+ mutex_unlock(&vmm->mutex.ref);
+ return 0;
+}
+
+static void
+__nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, bool sparse, bool pfn)
+{
+ const struct nvkm_vmm_desc_func *func = page->desc->func;
+
+ nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
+ false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
+ sparse ? func->sparse : func->invalid ? func->invalid :
+ func->unmap);
+}
+
+static void
+nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, bool sparse, bool pfn)
+{
+ if (vmm->managed.raw) {
+ nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, pfn);
+ nvkm_vmm_ptes_put(vmm, page, addr, size);
+ } else {
+ __nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, sparse, pfn);
+ }
+}
+
+static int
+__nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, struct nvkm_vmm_map *map,
+ nvkm_vmm_pte_func func)
+{
+ u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
+ false, nvkm_vmm_ref_ptes, func, map, NULL);
+ if (fail != ~0ULL) {
+ if ((size = fail - addr))
+ nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int
+nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, struct nvkm_vmm_map *map,
+ nvkm_vmm_pte_func func)
+{
+ int ret;
+
+ if (vmm->managed.raw) {
+ ret = nvkm_vmm_ptes_get(vmm, page, addr, size);
+ if (ret)
+ return ret;
+
+ nvkm_vmm_ptes_map(vmm, page, addr, size, map, func);
+
+ return 0;
+ } else {
+ return __nvkm_vmm_ptes_get_map(vmm, page, addr, size, map, func);
+ }
+}
+
+struct nvkm_vma *
+nvkm_vma_new(u64 addr, u64 size)
+{
+ struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (vma) {
+ vma->addr = addr;
+ vma->size = size;
+ vma->page = NVKM_VMA_PAGE_NONE;
+ vma->refd = NVKM_VMA_PAGE_NONE;
+ }
+ return vma;
+}
+
+struct nvkm_vma *
+nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
+{
+ struct nvkm_vma *new;
+
+ BUG_ON(vma->size == tail);
+
+ if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail)))
+ return NULL;
+ vma->size -= tail;
+
+ new->mapref = vma->mapref;
+ new->sparse = vma->sparse;
+ new->page = vma->page;
+ new->refd = vma->refd;
+ new->used = vma->used;
+ new->part = vma->part;
+ new->busy = vma->busy;
+ new->mapped = vma->mapped;
+ list_add(&new->head, &vma->head);
+ return new;
+}
+
+static inline void
+nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ rb_erase(&vma->tree, &vmm->free);
+}
+
+static inline void
+nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ nvkm_vmm_free_remove(vmm, vma);
+ list_del(&vma->head);
+ kfree(vma);
+}
+
+static void
+nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ struct rb_node **ptr = &vmm->free.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*ptr) {
+ struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
+ parent = *ptr;
+ if (vma->size < this->size)
+ ptr = &parent->rb_left;
+ else
+ if (vma->size > this->size)
+ ptr = &parent->rb_right;
+ else
+ if (vma->addr < this->addr)
+ ptr = &parent->rb_left;
+ else
+ if (vma->addr > this->addr)
+ ptr = &parent->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&vma->tree, parent, ptr);
+ rb_insert_color(&vma->tree, &vmm->free);
+}
+
+static inline void
+nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ rb_erase(&vma->tree, &vmm->root);
+}
+
+static inline void
+nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ nvkm_vmm_node_remove(vmm, vma);
+ list_del(&vma->head);
+ kfree(vma);
+}
+
+static void
+nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ struct rb_node **ptr = &vmm->root.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*ptr) {
+ struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
+ parent = *ptr;
+ if (vma->addr < this->addr)
+ ptr = &parent->rb_left;
+ else
+ if (vma->addr > this->addr)
+ ptr = &parent->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&vma->tree, parent, ptr);
+ rb_insert_color(&vma->tree, &vmm->root);
+}
+
+struct nvkm_vma *
+nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
+{
+ struct rb_node *node = vmm->root.rb_node;
+ while (node) {
+ struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
+ if (addr < vma->addr)
+ node = node->rb_left;
+ else
+ if (addr >= vma->addr + vma->size)
+ node = node->rb_right;
+ else
+ return vma;
+ }
+ return NULL;
+}
+
+#define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL : \
+ list_entry((root)->head.dir, struct nvkm_vma, head))
+
+static struct nvkm_vma *
+nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
+ struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
+{
+ if (next) {
+ if (vma->size == size) {
+ vma->size += next->size;
+ nvkm_vmm_node_delete(vmm, next);
+ if (prev) {
+ prev->size += vma->size;
+ nvkm_vmm_node_delete(vmm, vma);
+ return prev;
+ }
+ return vma;
+ }
+ BUG_ON(prev);
+
+ nvkm_vmm_node_remove(vmm, next);
+ vma->size -= size;
+ next->addr -= size;
+ next->size += size;
+ nvkm_vmm_node_insert(vmm, next);
+ return next;
+ }
+
+ if (prev) {
+ if (vma->size != size) {
+ nvkm_vmm_node_remove(vmm, vma);
+ prev->size += size;
+ vma->addr += size;
+ vma->size -= size;
+ nvkm_vmm_node_insert(vmm, vma);
+ } else {
+ prev->size += vma->size;
+ nvkm_vmm_node_delete(vmm, vma);
+ }
+ return prev;
+ }
+
+ return vma;
+}
+
+struct nvkm_vma *
+nvkm_vmm_node_split(struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, u64 addr, u64 size)
+{
+ struct nvkm_vma *prev = NULL;
+
+ if (vma->addr != addr) {
+ prev = vma;
+ if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
+ return NULL;
+ vma->part = true;
+ nvkm_vmm_node_insert(vmm, vma);
+ }
+
+ if (vma->size != size) {
+ struct nvkm_vma *tmp;
+ if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
+ nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
+ return NULL;
+ }
+ tmp->part = true;
+ nvkm_vmm_node_insert(vmm, tmp);
+ }
+
+ return vma;
+}
+
+static void
+nvkm_vma_dump(struct nvkm_vma *vma)
+{
+ printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n",
+ vma->addr, (u64)vma->size,
+ vma->used ? '-' : 'F',
+ vma->mapref ? 'R' : '-',
+ vma->sparse ? 'S' : '-',
+ vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
+ vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
+ vma->part ? 'P' : '-',
+ vma->busy ? 'B' : '-',
+ vma->mapped ? 'M' : '-',
+ vma->memory);
+}
+
+static void
+nvkm_vmm_dump(struct nvkm_vmm *vmm)
+{
+ struct nvkm_vma *vma;
+ list_for_each_entry(vma, &vmm->list, head) {
+ nvkm_vma_dump(vma);
+ }
+}
+
+static void
+nvkm_vmm_dtor(struct nvkm_vmm *vmm)
+{
+ struct nvkm_vma *vma;
+ struct rb_node *node;
+
+ if (0)
+ nvkm_vmm_dump(vmm);
+
+ while ((node = rb_first(&vmm->root))) {
+ struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
+ nvkm_vmm_put(vmm, &vma);
+ }
+
+ if (vmm->bootstrapped) {
+ const struct nvkm_vmm_page *page = vmm->func->page;
+ const u64 limit = vmm->limit - vmm->start;
+
+ while (page[1].shift)
+ page++;
+
+ nvkm_mmu_ptc_dump(vmm->mmu);
+ nvkm_vmm_ptes_put(vmm, page, vmm->start, limit);
+ }
+
+ vma = list_first_entry(&vmm->list, typeof(*vma), head);
+ list_del(&vma->head);
+ kfree(vma);
+ WARN_ON(!list_empty(&vmm->list));
+
+ if (vmm->nullp) {
+ dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024,
+ vmm->nullp, vmm->null);
+ }
+
+ if (vmm->pd) {
+ nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]);
+ nvkm_vmm_pt_del(&vmm->pd);
+ }
+}
+
+static int
+nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
+{
+ struct nvkm_vma *vma;
+ if (!(vma = nvkm_vma_new(addr, size)))
+ return -ENOMEM;
+ vma->mapref = true;
+ vma->sparse = false;
+ vma->used = true;
+ nvkm_vmm_node_insert(vmm, vma);
+ list_add_tail(&vma->head, &vmm->list);
+ return 0;
+}
+
+static int
+nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
+ u32 pd_header, bool managed, u64 addr, u64 size,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm *vmm)
+{
+ static struct lock_class_key _key;
+ const struct nvkm_vmm_page *page = func->page;
+ const struct nvkm_vmm_desc *desc;
+ struct nvkm_vma *vma;
+ int levels, bits = 0, ret;
+
+ vmm->func = func;
+ vmm->mmu = mmu;
+ vmm->name = name;
+ vmm->debug = mmu->subdev.debug;
+ kref_init(&vmm->kref);
+
+ __mutex_init(&vmm->mutex.vmm, "&vmm->mutex.vmm", key ? key : &_key);
+ mutex_init(&vmm->mutex.ref);
+ mutex_init(&vmm->mutex.map);
+
+ /* Locate the smallest page size supported by the backend, it will
+ * have the deepest nesting of page tables.
+ */
+ while (page[1].shift)
+ page++;
+
+ /* Locate the structure that describes the layout of the top-level
+ * page table, and determine the number of valid bits in a virtual
+ * address.
+ */
+ for (levels = 0, desc = page->desc; desc->bits; desc++, levels++)
+ bits += desc->bits;
+ bits += page->shift;
+ desc--;
+
+ if (WARN_ON(levels > NVKM_VMM_LEVELS_MAX))
+ return -EINVAL;
+
+ /* Allocate top-level page table. */
+ vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
+ if (!vmm->pd)
+ return -ENOMEM;
+ vmm->pd->refs[0] = 1;
+ INIT_LIST_HEAD(&vmm->join);
+
+ /* ... and the GPU storage for it, except on Tesla-class GPUs that
+ * have the PD embedded in the instance structure.
+ */
+ if (desc->size) {
+ const u32 size = pd_header + desc->size * (1 << desc->bits);
+ vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
+ if (!vmm->pd->pt[0])
+ return -ENOMEM;
+ }
+
+ /* Initialise address-space MM. */
+ INIT_LIST_HEAD(&vmm->list);
+ vmm->free = RB_ROOT;
+ vmm->root = RB_ROOT;
+
+ if (managed) {
+ /* Address-space will be managed by the client for the most
+ * part, except for a specified area where NVKM allocations
+ * are allowed to be placed.
+ */
+ vmm->start = 0;
+ vmm->limit = 1ULL << bits;
+ if (addr + size < addr || addr + size > vmm->limit)
+ return -EINVAL;
+
+ /* Client-managed area before the NVKM-managed area. */
+ if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
+ return ret;
+
+ vmm->managed.p.addr = 0;
+ vmm->managed.p.size = addr;
+
+ /* NVKM-managed area. */
+ if (size) {
+ if (!(vma = nvkm_vma_new(addr, size)))
+ return -ENOMEM;
+ nvkm_vmm_free_insert(vmm, vma);
+ list_add_tail(&vma->head, &vmm->list);
+ }
+
+ /* Client-managed area after the NVKM-managed area. */
+ addr = addr + size;
+ size = vmm->limit - addr;
+ if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
+ return ret;
+
+ vmm->managed.n.addr = addr;
+ vmm->managed.n.size = size;
+ } else {
+ /* Address-space fully managed by NVKM, requiring calls to
+ * nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space.
+ */
+ vmm->start = addr;
+ vmm->limit = size ? (addr + size) : (1ULL << bits);
+ if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
+ return -EINVAL;
+
+ if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
+ return -ENOMEM;
+
+ nvkm_vmm_free_insert(vmm, vma);
+ list_add(&vma->head, &vmm->list);
+ }
+
+ return 0;
+}
+
+int
+nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
+ u32 hdr, bool managed, u64 addr, u64 size,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ if (!(*pvmm = kzalloc(sizeof(**pvmm), GFP_KERNEL)))
+ return -ENOMEM;
+ return nvkm_vmm_ctor(func, mmu, hdr, managed, addr, size, key, name, *pvmm);
+}
+
+static struct nvkm_vma *
+nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ u64 addr, u64 size, u8 page, bool map)
+{
+ struct nvkm_vma *prev = NULL;
+ struct nvkm_vma *next = NULL;
+
+ if (vma->addr == addr && vma->part && (prev = node(vma, prev))) {
+ if (prev->memory || prev->mapped != map)
+ prev = NULL;
+ }
+
+ if (vma->addr + vma->size == addr + size && (next = node(vma, next))) {
+ if (!next->part ||
+ next->memory || next->mapped != map)
+ next = NULL;
+ }
+
+ if (prev || next)
+ return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
+ return nvkm_vmm_node_split(vmm, vma, addr, size);
+}
+
+int
+nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
+{
+ struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
+ struct nvkm_vma *next;
+ u64 limit = addr + size;
+ u64 start = addr;
+
+ if (!vma)
+ return -EINVAL;
+
+ do {
+ if (!vma->mapped || vma->memory)
+ continue;
+
+ size = min(limit - start, vma->size - (start - vma->addr));
+
+ nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
+ start, size, false, true);
+
+ next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
+ if (!WARN_ON(!next)) {
+ vma = next;
+ vma->refd = NVKM_VMA_PAGE_NONE;
+ vma->mapped = false;
+ }
+ } while ((vma = node(vma, next)) && (start = vma->addr) < limit);
+
+ return 0;
+}
+
+/*TODO:
+ * - Avoid PT readback (for dma_unmap etc), this might end up being dealt
+ * with inside HMM, which would be a lot nicer for us to deal with.
+ * - Support for systems without a 4KiB page size.
+ */
+int
+nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+ struct nvkm_vma *vma, *tmp;
+ u64 limit = addr + size;
+ u64 start = addr;
+ int pm = size >> shift;
+ int pi = 0;
+
+ /* Only support mapping where the page size of the incoming page
+ * array matches a page size available for direct mapping.
+ */
+ while (page->shift && (page->shift != shift ||
+ page->desc->func->pfn == NULL))
+ page++;
+
+ if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
+ !IS_ALIGNED(size, 1ULL << shift) ||
+ addr + size < addr || addr + size > vmm->limit) {
+ VMM_DEBUG(vmm, "paged map %d %d %016llx %016llx\n",
+ shift, page->shift, addr, size);
+ return -EINVAL;
+ }
+
+ if (!(vma = nvkm_vmm_node_search(vmm, addr)))
+ return -ENOENT;
+
+ do {
+ bool map = !!(pfn[pi] & NVKM_VMM_PFN_V);
+ bool mapped = vma->mapped;
+ u64 size = limit - start;
+ u64 addr = start;
+ int pn, ret = 0;
+
+ /* Narrow the operation window to cover a single action (page
+ * should be mapped or not) within a single VMA.
+ */
+ for (pn = 0; pi + pn < pm; pn++) {
+ if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V))
+ break;
+ }
+ size = min_t(u64, size, pn << page->shift);
+ size = min_t(u64, size, vma->size + vma->addr - addr);
+
+ /* Reject any operation to unmanaged regions, and areas that
+ * have nvkm_memory objects mapped in them already.
+ */
+ if (!vma->mapref || vma->memory) {
+ ret = -EINVAL;
+ goto next;
+ }
+
+ /* In order to both properly refcount GPU page tables, and
+ * prevent "normal" mappings and these direct mappings from
+ * interfering with each other, we need to track contiguous
+ * ranges that have been mapped with this interface.
+ *
+ * Here we attempt to either split an existing VMA so we're
+ * able to flag the region as either unmapped/mapped, or to
+ * merge with adjacent VMAs that are already compatible.
+ *
+ * If the region is already compatible, nothing is required.
+ */
+ if (map != mapped) {
+ tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
+ page -
+ vmm->func->page, map);
+ if (WARN_ON(!tmp)) {
+ ret = -ENOMEM;
+ goto next;
+ }
+
+ if ((tmp->mapped = map))
+ tmp->refd = page - vmm->func->page;
+ else
+ tmp->refd = NVKM_VMA_PAGE_NONE;
+ vma = tmp;
+ }
+
+ /* Update HW page tables. */
+ if (map) {
+ struct nvkm_vmm_map args;
+ args.page = page;
+ args.pfn = &pfn[pi];
+
+ if (!mapped) {
+ ret = nvkm_vmm_ptes_get_map(vmm, page, addr,
+ size, &args, page->
+ desc->func->pfn);
+ } else {
+ nvkm_vmm_ptes_map(vmm, page, addr, size, &args,
+ page->desc->func->pfn);
+ }
+ } else {
+ if (mapped) {
+ nvkm_vmm_ptes_unmap_put(vmm, page, addr, size,
+ false, true);
+ }
+ }
+
+next:
+ /* Iterate to next operation. */
+ if (vma->addr + vma->size == addr + size)
+ vma = node(vma, next);
+ start += size;
+
+ if (ret) {
+ /* Failure is signalled by clearing the valid bit on
+ * any PFN that couldn't be modified as requested.
+ */
+ while (size) {
+ pfn[pi++] = NVKM_VMM_PFN_NONE;
+ size -= 1 << page->shift;
+ }
+ } else {
+ pi += size >> page->shift;
+ }
+ } while (vma && start < limit);
+
+ return 0;
+}
+
+void
+nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ struct nvkm_vma *prev = NULL;
+ struct nvkm_vma *next;
+
+ nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
+ nvkm_memory_unref(&vma->memory);
+ vma->mapped = false;
+
+ if (vma->part && (prev = node(vma, prev)) && prev->mapped)
+ prev = NULL;
+ if ((next = node(vma, next)) && (!next->part || next->mapped))
+ next = NULL;
+ nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
+}
+
+void
+nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
+{
+ const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
+
+ if (vma->mapref) {
+ nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
+ vma->refd = NVKM_VMA_PAGE_NONE;
+ } else {
+ nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
+ }
+
+ nvkm_vmm_unmap_region(vmm, vma);
+}
+
+void
+nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ if (vma->memory) {
+ mutex_lock(&vmm->mutex.vmm);
+ nvkm_vmm_unmap_locked(vmm, vma, false);
+ mutex_unlock(&vmm->mutex.vmm);
+ }
+}
+
+static int
+nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc, struct nvkm_vmm_map *map)
+{
+ switch (nvkm_memory_target(map->memory)) {
+ case NVKM_MEM_TARGET_VRAM:
+ if (!(map->page->type & NVKM_VMM_PAGE_VRAM)) {
+ VMM_DEBUG(vmm, "%d !VRAM", map->page->shift);
+ return -EINVAL;
+ }
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ case NVKM_MEM_TARGET_NCOH:
+ if (!(map->page->type & NVKM_VMM_PAGE_HOST)) {
+ VMM_DEBUG(vmm, "%d !HOST", map->page->shift);
+ return -EINVAL;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOSYS;
+ }
+
+ if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) ||
+ !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
+ !IS_ALIGNED( map->offset, 1ULL << map->page->shift) ||
+ nvkm_memory_page(map->memory) < map->page->shift) {
+ VMM_DEBUG(vmm, "alignment %016llx %016llx %016llx %d %d",
+ vma->addr, (u64)vma->size, map->offset, map->page->shift,
+ nvkm_memory_page(map->memory));
+ return -EINVAL;
+ }
+
+ return vmm->func->valid(vmm, argv, argc, map);
+}
+
+static int
+nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc, struct nvkm_vmm_map *map)
+{
+ for (map->page = vmm->func->page; map->page->shift; map->page++) {
+ VMM_DEBUG(vmm, "trying %d", map->page->shift);
+ if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int
+nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc, struct nvkm_vmm_map *map)
+{
+ nvkm_vmm_pte_func func;
+ int ret;
+
+ map->no_comp = vma->no_comp;
+
+ /* Make sure we won't overrun the end of the memory object. */
+ if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
+ VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
+ nvkm_memory_size(map->memory),
+ map->offset, (u64)vma->size);
+ return -EINVAL;
+ }
+
+ /* Check remaining arguments for validity. */
+ if (vma->page == NVKM_VMA_PAGE_NONE &&
+ vma->refd == NVKM_VMA_PAGE_NONE) {
+ /* Find the largest page size we can perform the mapping at. */
+ const u32 debug = vmm->debug;
+ vmm->debug = 0;
+ ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
+ vmm->debug = debug;
+ if (ret) {
+ VMM_DEBUG(vmm, "invalid at any page size");
+ nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
+ return -EINVAL;
+ }
+ } else {
+ /* Page size of the VMA is already pre-determined. */
+ if (vma->refd != NVKM_VMA_PAGE_NONE)
+ map->page = &vmm->func->page[vma->refd];
+ else
+ map->page = &vmm->func->page[vma->page];
+
+ ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
+ if (ret) {
+ VMM_DEBUG(vmm, "invalid %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* Deal with the 'offset' argument, and fetch the backend function. */
+ map->off = map->offset;
+ if (map->mem) {
+ for (; map->off; map->mem = map->mem->next) {
+ u64 size = (u64)map->mem->length << NVKM_RAM_MM_SHIFT;
+ if (size > map->off)
+ break;
+ map->off -= size;
+ }
+ func = map->page->desc->func->mem;
+ } else
+ if (map->sgl) {
+ for (; map->off; map->sgl = sg_next(map->sgl)) {
+ u64 size = sg_dma_len(map->sgl);
+ if (size > map->off)
+ break;
+ map->off -= size;
+ }
+ func = map->page->desc->func->sgl;
+ } else {
+ map->dma += map->offset >> PAGE_SHIFT;
+ map->off = map->offset & PAGE_MASK;
+ func = map->page->desc->func->dma;
+ }
+
+ /* Perform the map. */
+ if (vma->refd == NVKM_VMA_PAGE_NONE) {
+ ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
+ if (ret)
+ return ret;
+
+ vma->refd = map->page - vmm->func->page;
+ } else {
+ nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
+ }
+
+ nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
+ nvkm_memory_unref(&vma->memory);
+ vma->memory = nvkm_memory_ref(map->memory);
+ vma->mapped = true;
+ vma->tags = map->tags;
+ return 0;
+}
+
+int
+nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ int ret;
+
+ if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) &&
+ vmm->managed.raw)
+ return nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
+
+ mutex_lock(&vmm->mutex.vmm);
+ ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
+ vma->busy = false;
+ mutex_unlock(&vmm->mutex.vmm);
+ return ret;
+}
+
+static void
+nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ struct nvkm_vma *prev, *next;
+
+ if ((prev = node(vma, prev)) && !prev->used) {
+ vma->addr = prev->addr;
+ vma->size += prev->size;
+ nvkm_vmm_free_delete(vmm, prev);
+ }
+
+ if ((next = node(vma, next)) && !next->used) {
+ vma->size += next->size;
+ nvkm_vmm_free_delete(vmm, next);
+ }
+
+ nvkm_vmm_free_insert(vmm, vma);
+}
+
+void
+nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+ struct nvkm_vma *next = vma;
+
+ BUG_ON(vma->part);
+
+ if (vma->mapref || !vma->sparse) {
+ do {
+ const bool mem = next->memory != NULL;
+ const bool map = next->mapped;
+ const u8 refd = next->refd;
+ const u64 addr = next->addr;
+ u64 size = next->size;
+
+ /* Merge regions that are in the same state. */
+ while ((next = node(next, next)) && next->part &&
+ (next->mapped == map) &&
+ (next->memory != NULL) == mem &&
+ (next->refd == refd))
+ size += next->size;
+
+ if (map) {
+ /* Region(s) are mapped, merge the unmap
+ * and dereference into a single walk of
+ * the page tree.
+ */
+ nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
+ size, vma->sparse,
+ !mem);
+ } else
+ if (refd != NVKM_VMA_PAGE_NONE) {
+ /* Drop allocation-time PTE references. */
+ nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
+ }
+ } while (next && next->part);
+ }
+
+ /* Merge any mapped regions that were split from the initial
+ * address-space allocation back into the allocated VMA, and
+ * release memory/compression resources.
+ */
+ next = vma;
+ do {
+ if (next->mapped)
+ nvkm_vmm_unmap_region(vmm, next);
+ } while ((next = node(vma, next)) && next->part);
+
+ if (vma->sparse && !vma->mapref) {
+ /* Sparse region that was allocated with a fixed page size,
+ * meaning all relevant PTEs were referenced once when the
+ * region was allocated, and remained that way, regardless
+ * of whether memory was mapped into it afterwards.
+ *
+ * The process of unmapping, unsparsing, and dereferencing
+ * PTEs can be done in a single page tree walk.
+ */
+ nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
+ } else
+ if (vma->sparse) {
+ /* Sparse region that wasn't allocated with a fixed page size,
+ * PTE references were taken both at allocation time (to make
+ * the GPU see the region as sparse), and when mapping memory
+ * into the region.
+ *
+ * The latter was handled above, and the remaining references
+ * are dealt with here.
+ */
+ nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
+ }
+
+ /* Remove VMA from the list of allocated nodes. */
+ nvkm_vmm_node_remove(vmm, vma);
+
+ /* Merge VMA back into the free list. */
+ vma->page = NVKM_VMA_PAGE_NONE;
+ vma->refd = NVKM_VMA_PAGE_NONE;
+ vma->used = false;
+ nvkm_vmm_put_region(vmm, vma);
+}
+
+void
+nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
+{
+ struct nvkm_vma *vma = *pvma;
+ if (vma) {
+ mutex_lock(&vmm->mutex.vmm);
+ nvkm_vmm_put_locked(vmm, vma);
+ mutex_unlock(&vmm->mutex.vmm);
+ *pvma = NULL;
+ }
+}
+
+int
+nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
+ u8 shift, u8 align, u64 size, struct nvkm_vma **pvma)
+{
+ const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE];
+ struct rb_node *node = NULL, *temp;
+ struct nvkm_vma *vma = NULL, *tmp;
+ u64 addr, tail;
+ int ret;
+
+ VMM_TRACE(vmm, "getref %d mapref %d sparse %d "
+ "shift: %d align: %d size: %016llx",
+ getref, mapref, sparse, shift, align, size);
+
+ /* Zero-sized, or lazily-allocated sparse VMAs, make no sense. */
+ if (unlikely(!size || (!getref && !mapref && sparse))) {
+ VMM_DEBUG(vmm, "args %016llx %d %d %d",
+ size, getref, mapref, sparse);
+ return -EINVAL;
+ }
+
+ /* Tesla-class GPUs can only select page size per-PDE, which means
+ * we're required to know the mapping granularity up-front to find
+ * a suitable region of address-space.
+ *
+ * The same goes if we're requesting up-front allocation of PTES.
+ */
+ if (unlikely((getref || vmm->func->page_block) && !shift)) {
+ VMM_DEBUG(vmm, "page size required: %d %016llx",
+ getref, vmm->func->page_block);
+ return -EINVAL;
+ }
+
+ /* If a specific page size was requested, determine its index and
+ * make sure the requested size is a multiple of the page size.
+ */
+ if (shift) {
+ for (page = vmm->func->page; page->shift; page++) {
+ if (shift == page->shift)
+ break;
+ }
+
+ if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
+ VMM_DEBUG(vmm, "page %d %016llx", shift, size);
+ return -EINVAL;
+ }
+ align = max_t(u8, align, shift);
+ } else {
+ align = max_t(u8, align, 12);
+ }
+
+ /* Locate smallest block that can possibly satisfy the allocation. */
+ temp = vmm->free.rb_node;
+ while (temp) {
+ struct nvkm_vma *this = rb_entry(temp, typeof(*this), tree);
+ if (this->size < size) {
+ temp = temp->rb_right;
+ } else {
+ node = temp;
+ temp = temp->rb_left;
+ }
+ }
+
+ if (unlikely(!node))
+ return -ENOSPC;
+
+ /* Take into account alignment restrictions, trying larger blocks
+ * in turn until we find a suitable free block.
+ */
+ do {
+ struct nvkm_vma *this = rb_entry(node, typeof(*this), tree);
+ struct nvkm_vma *prev = node(this, prev);
+ struct nvkm_vma *next = node(this, next);
+ const int p = page - vmm->func->page;
+
+ addr = this->addr;
+ if (vmm->func->page_block && prev && prev->page != p)
+ addr = ALIGN(addr, vmm->func->page_block);
+ addr = ALIGN(addr, 1ULL << align);
+
+ tail = this->addr + this->size;
+ if (vmm->func->page_block && next && next->page != p)
+ tail = ALIGN_DOWN(tail, vmm->func->page_block);
+
+ if (addr <= tail && tail - addr >= size) {
+ nvkm_vmm_free_remove(vmm, this);
+ vma = this;
+ break;
+ }
+ } while ((node = rb_next(node)));
+
+ if (unlikely(!vma))
+ return -ENOSPC;
+
+ /* If the VMA we found isn't already exactly the requested size,
+ * it needs to be split, and the remaining free blocks returned.
+ */
+ if (addr != vma->addr) {
+ if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) {
+ nvkm_vmm_put_region(vmm, vma);
+ return -ENOMEM;
+ }
+ nvkm_vmm_free_insert(vmm, vma);
+ vma = tmp;
+ }
+
+ if (size != vma->size) {
+ if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
+ nvkm_vmm_put_region(vmm, vma);
+ return -ENOMEM;
+ }
+ nvkm_vmm_free_insert(vmm, tmp);
+ }
+
+ /* Pre-allocate page tables and/or setup sparse mappings. */
+ if (sparse && getref)
+ ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
+ else if (sparse)
+ ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
+ else if (getref)
+ ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
+ else
+ ret = 0;
+ if (ret) {
+ nvkm_vmm_put_region(vmm, vma);
+ return ret;
+ }
+
+ vma->mapref = mapref && !getref;
+ vma->sparse = sparse;
+ vma->page = page - vmm->func->page;
+ vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE;
+ vma->used = true;
+ nvkm_vmm_node_insert(vmm, vma);
+ *pvma = vma;
+ return 0;
+}
+
+int
+nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
+{
+ int ret;
+ mutex_lock(&vmm->mutex.vmm);
+ ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
+ mutex_unlock(&vmm->mutex.vmm);
+ return ret;
+}
+
+void
+nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
+ bool sparse, u8 refd)
+{
+ const struct nvkm_vmm_page *page = &vmm->func->page[refd];
+
+ nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, false);
+}
+
+void
+nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+
+ nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
+}
+
+int
+nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+
+ if (unlikely(!size))
+ return -EINVAL;
+
+ return nvkm_vmm_ptes_get(vmm, &page[refd], addr, size);
+}
+
+int
+nvkm_vmm_raw_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
+{
+ int ret;
+
+ mutex_lock(&vmm->mutex.ref);
+ ret = nvkm_vmm_ptes_sparse(vmm, addr, size, ref);
+ mutex_unlock(&vmm->mutex.ref);
+
+ return ret;
+}
+
+void
+nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ if (inst && vmm && vmm->func->part) {
+ mutex_lock(&vmm->mutex.vmm);
+ vmm->func->part(vmm, inst);
+ mutex_unlock(&vmm->mutex.vmm);
+ }
+}
+
+int
+nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ int ret = 0;
+ if (vmm->func->join) {
+ mutex_lock(&vmm->mutex.vmm);
+ ret = vmm->func->join(vmm, inst);
+ mutex_unlock(&vmm->mutex.vmm);
+ }
+ return ret;
+}
+
+static bool
+nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *desc = it->desc;
+ const int type = desc->type == SPT;
+ nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm);
+ return false;
+}
+
+int
+nvkm_vmm_boot(struct nvkm_vmm *vmm)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+ const u64 limit = vmm->limit - vmm->start;
+ int ret;
+
+ while (page[1].shift)
+ page++;
+
+ ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit);
+ if (ret)
+ return ret;
+
+ nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false,
+ nvkm_vmm_boot_ptes, NULL, NULL, NULL);
+ vmm->bootstrapped = true;
+ return 0;
+}
+
+static void
+nvkm_vmm_del(struct kref *kref)
+{
+ struct nvkm_vmm *vmm = container_of(kref, typeof(*vmm), kref);
+ nvkm_vmm_dtor(vmm);
+ kfree(vmm);
+}
+
+void
+nvkm_vmm_unref(struct nvkm_vmm **pvmm)
+{
+ struct nvkm_vmm *vmm = *pvmm;
+ if (vmm) {
+ kref_put(&vmm->kref, nvkm_vmm_del);
+ *pvmm = NULL;
+ }
+}
+
+struct nvkm_vmm *
+nvkm_vmm_ref(struct nvkm_vmm *vmm)
+{
+ if (vmm)
+ kref_get(&vmm->kref);
+ return vmm;
+}
+
+int
+nvkm_vmm_new(struct nvkm_device *device, u64 addr, u64 size, void *argv,
+ u32 argc, struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ struct nvkm_mmu *mmu = device->mmu;
+ struct nvkm_vmm *vmm = NULL;
+ int ret;
+ ret = mmu->func->vmm.ctor(mmu, false, addr, size, argv, argc,
+ key, name, &vmm);
+ if (ret)
+ nvkm_vmm_unref(&vmm);
+ *pvmm = vmm;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
new file mode 100644
index 0000000000..f9bc30cdb2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -0,0 +1,380 @@
+#ifndef __NVKM_VMM_H__
+#define __NVKM_VMM_H__
+#include "priv.h"
+#include <core/memory.h>
+enum nvkm_memory_target;
+
+struct nvkm_vmm_pt {
+ /* Some GPUs have a mapping level with a dual page tables to
+ * support large and small pages in the same address-range.
+ *
+ * We track the state of both page tables in one place, which
+ * is why there's multiple PT pointers/refcounts here.
+ */
+ struct nvkm_mmu_pt *pt[2];
+ u32 refs[2];
+
+ /* Page size handled by this PT.
+ *
+ * Tesla backend needs to know this when writinge PDEs,
+ * otherwise unnecessary.
+ */
+ u8 page;
+
+ /* Entire page table sparse.
+ *
+ * Used to propagate sparseness to child page tables.
+ */
+ bool sparse:1;
+
+ /* Tracking for page directories.
+ *
+ * The array is indexed by PDE, and will either point to the
+ * child page table, or indicate the PDE is marked as sparse.
+ **/
+#define NVKM_VMM_PDE_INVALID(pde) IS_ERR_OR_NULL(pde)
+#define NVKM_VMM_PDE_SPARSED(pde) IS_ERR(pde)
+#define NVKM_VMM_PDE_SPARSE ERR_PTR(-EBUSY)
+ struct nvkm_vmm_pt **pde;
+
+ /* Tracking for dual page tables.
+ *
+ * There's one entry for each LPTE, keeping track of whether
+ * there are valid SPTEs in the same address-range.
+ *
+ * This information is used to manage LPTE state transitions.
+ */
+#define NVKM_VMM_PTE_SPARSE 0x80
+#define NVKM_VMM_PTE_VALID 0x40
+#define NVKM_VMM_PTE_SPTES 0x3f
+ u8 pte[];
+};
+
+typedef void (*nvkm_vmm_pxe_func)(struct nvkm_vmm *,
+ struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
+typedef void (*nvkm_vmm_pde_func)(struct nvkm_vmm *,
+ struct nvkm_vmm_pt *, u32 pdei);
+typedef void (*nvkm_vmm_pte_func)(struct nvkm_vmm *, struct nvkm_mmu_pt *,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *);
+
+struct nvkm_vmm_desc_func {
+ nvkm_vmm_pxe_func invalid;
+ nvkm_vmm_pxe_func unmap;
+ nvkm_vmm_pxe_func sparse;
+
+ nvkm_vmm_pde_func pde;
+
+ nvkm_vmm_pte_func mem;
+ nvkm_vmm_pte_func dma;
+ nvkm_vmm_pte_func sgl;
+
+ nvkm_vmm_pte_func pfn;
+ bool (*pfn_clear)(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
+ nvkm_vmm_pxe_func pfn_unmap;
+};
+
+extern const struct nvkm_vmm_desc_func gf100_vmm_pgd;
+void gf100_vmm_pgd_pde(struct nvkm_vmm *, struct nvkm_vmm_pt *, u32);
+extern const struct nvkm_vmm_desc_func gf100_vmm_pgt;
+void gf100_vmm_pgt_unmap(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32);
+void gf100_vmm_pgt_mem(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
+ struct nvkm_vmm_map *);
+void gf100_vmm_pgt_dma(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
+ struct nvkm_vmm_map *);
+void gf100_vmm_pgt_sgl(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
+ struct nvkm_vmm_map *);
+
+void gk104_vmm_lpt_invalid(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32);
+
+struct nvkm_vmm_desc {
+ enum {
+ PGD,
+ PGT,
+ SPT,
+ LPT,
+ } type;
+ u8 bits; /* VMA bits covered by PT. */
+ u8 size; /* Bytes-per-PTE. */
+ u32 align; /* PT address alignment. */
+ const struct nvkm_vmm_desc_func *func;
+};
+
+extern const struct nvkm_vmm_desc nv50_vmm_desc_12[];
+extern const struct nvkm_vmm_desc nv50_vmm_desc_16[];
+
+extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[];
+extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[];
+extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[];
+extern const struct nvkm_vmm_desc gk104_vmm_desc_17_17[];
+
+extern const struct nvkm_vmm_desc gm200_vmm_desc_16_12[];
+extern const struct nvkm_vmm_desc gm200_vmm_desc_16_16[];
+extern const struct nvkm_vmm_desc gm200_vmm_desc_17_12[];
+extern const struct nvkm_vmm_desc gm200_vmm_desc_17_17[];
+
+extern const struct nvkm_vmm_desc gp100_vmm_desc_12[];
+extern const struct nvkm_vmm_desc gp100_vmm_desc_16[];
+
+struct nvkm_vmm_page {
+ u8 shift;
+ const struct nvkm_vmm_desc *desc;
+#define NVKM_VMM_PAGE_SPARSE 0x01
+#define NVKM_VMM_PAGE_VRAM 0x02
+#define NVKM_VMM_PAGE_HOST 0x04
+#define NVKM_VMM_PAGE_COMP 0x08
+#define NVKM_VMM_PAGE_Sxxx (NVKM_VMM_PAGE_SPARSE)
+#define NVKM_VMM_PAGE_xVxx (NVKM_VMM_PAGE_VRAM)
+#define NVKM_VMM_PAGE_SVxx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_VRAM)
+#define NVKM_VMM_PAGE_xxHx (NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_SxHx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_xVHx (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_SVHx (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_xVxC (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_COMP)
+#define NVKM_VMM_PAGE_SVxC (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_COMP)
+#define NVKM_VMM_PAGE_xxHC (NVKM_VMM_PAGE_xxHx | NVKM_VMM_PAGE_COMP)
+#define NVKM_VMM_PAGE_SxHC (NVKM_VMM_PAGE_SxHx | NVKM_VMM_PAGE_COMP)
+ u8 type;
+};
+
+struct nvkm_vmm_func {
+ int (*join)(struct nvkm_vmm *, struct nvkm_memory *inst);
+ void (*part)(struct nvkm_vmm *, struct nvkm_memory *inst);
+
+ int (*aper)(enum nvkm_memory_target);
+ int (*valid)(struct nvkm_vmm *, void *argv, u32 argc,
+ struct nvkm_vmm_map *);
+ void (*flush)(struct nvkm_vmm *, int depth);
+
+ int (*mthd)(struct nvkm_vmm *, struct nvkm_client *,
+ u32 mthd, void *argv, u32 argc);
+
+ void (*invalidate_pdb)(struct nvkm_vmm *, u64 addr);
+
+ u64 page_block;
+ const struct nvkm_vmm_page page[];
+};
+
+struct nvkm_vmm_join {
+ struct nvkm_memory *inst;
+ struct list_head head;
+};
+
+int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
+ u32 pd_header, bool managed, u64 addr, u64 size,
+ struct lock_class_key *, const char *name,
+ struct nvkm_vmm **);
+struct nvkm_vma *nvkm_vma_new(u64 addr, u64 size);
+struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
+struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
+ u64 addr, u64 size);
+int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
+ bool sparse, u8 page, u8 align, u64 size,
+ struct nvkm_vma **pvma);
+void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
+void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn);
+void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *);
+
+int nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd);
+void nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd);
+void nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
+ bool sparse, u8 refd);
+int nvkm_vmm_raw_sparse(struct nvkm_vmm *, u64 addr, u64 size, bool ref);
+
+static inline bool
+nvkm_vmm_in_managed_range(struct nvkm_vmm *vmm, u64 start, u64 size)
+{
+ u64 p_start = vmm->managed.p.addr;
+ u64 p_end = p_start + vmm->managed.p.size;
+ u64 n_start = vmm->managed.n.addr;
+ u64 n_end = n_start + vmm->managed.n.size;
+ u64 end = start + size;
+
+ if (start >= p_start && end <= p_end)
+ return true;
+
+ if (start >= n_start && end <= n_end)
+ return true;
+
+ return false;
+}
+
+#define NVKM_VMM_PFN_ADDR 0xfffffffffffff000ULL
+#define NVKM_VMM_PFN_ADDR_SHIFT 12
+#define NVKM_VMM_PFN_APER 0x00000000000000f0ULL
+#define NVKM_VMM_PFN_HOST 0x0000000000000000ULL
+#define NVKM_VMM_PFN_VRAM 0x0000000000000010ULL
+#define NVKM_VMM_PFN_A 0x0000000000000004ULL
+#define NVKM_VMM_PFN_W 0x0000000000000002ULL
+#define NVKM_VMM_PFN_V 0x0000000000000001ULL
+#define NVKM_VMM_PFN_NONE 0x0000000000000000ULL
+
+int nvkm_vmm_pfn_map(struct nvkm_vmm *, u8 page, u64 addr, u64 size, u64 *pfn);
+int nvkm_vmm_pfn_unmap(struct nvkm_vmm *, u64 addr, u64 size);
+
+struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
+
+int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
+ bool, u64, u64, void *, u32, struct lock_class_key *,
+ const char *, struct nvkm_vmm **);
+int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+
+int nv50_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+void nv50_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
+int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+void nv50_vmm_flush(struct nvkm_vmm *, int);
+
+int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
+ struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gf100_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
+int gf100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+void gf100_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
+int gf100_vmm_aper(enum nvkm_memory_target);
+int gf100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+void gf100_vmm_flush(struct nvkm_vmm *, int);
+void gf100_vmm_invalidate(struct nvkm_vmm *, u32 type);
+void gf100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);
+
+int gk20a_vmm_aper(enum nvkm_memory_target);
+
+int gm200_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
+ struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gm200_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
+int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+
+int gp100_vmm_new_(const struct nvkm_vmm_func *,
+ struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+void gp100_vmm_flush(struct nvkm_vmm *, int);
+int gp100_vmm_mthd(struct nvkm_vmm *, struct nvkm_client *, u32, void *, u32);
+void gp100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);
+
+int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+
+int nv04_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int nv41_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int nv44_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int nv50_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int mcp77_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int g84_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gf100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gk104_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gk20a_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gm200_vmm_new_fixed(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gm200_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gm20b_vmm_new_fixed(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gm20b_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gp100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gp10b_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gv100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+
+#define VMM_PRINT(l,v,p,f,a...) do { \
+ struct nvkm_vmm *_vmm = (v); \
+ if (CONFIG_NOUVEAU_DEBUG >= (l) && _vmm->debug >= (l)) { \
+ nvkm_printk_(&_vmm->mmu->subdev, 0, p, "%s: "f"\n", \
+ _vmm->name, ##a); \
+ } \
+} while(0)
+#define VMM_DEBUG(v,f,a...) VMM_PRINT(NV_DBG_DEBUG, (v), info, f, ##a)
+#define VMM_TRACE(v,f,a...) VMM_PRINT(NV_DBG_TRACE, (v), info, f, ##a)
+#define VMM_SPAM(v,f,a...) VMM_PRINT(NV_DBG_SPAM , (v), dbg, f, ##a)
+
+#define VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,BASE,SIZE,NEXT) do { \
+ nvkm_kmap((PT)->memory); \
+ while (PTEN) { \
+ u64 _ptes = ((SIZE) - MAP->off) >> MAP->page->shift; \
+ u64 _addr = ((BASE) + MAP->off); \
+ \
+ if (_ptes > PTEN) { \
+ MAP->off += PTEN << MAP->page->shift; \
+ _ptes = PTEN; \
+ } else { \
+ MAP->off = 0; \
+ NEXT; \
+ } \
+ \
+ VMM_SPAM(VMM, "ITER %08x %08x PTE(s)", PTEI, (u32)_ptes); \
+ \
+ FILL(VMM, PT, PTEI, _ptes, MAP, _addr); \
+ PTEI += _ptes; \
+ PTEN -= _ptes; \
+ } \
+ nvkm_done((PT)->memory); \
+} while(0)
+
+#define VMM_MAP_ITER_MEM(VMM,PT,PTEI,PTEN,MAP,FILL) \
+ VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
+ ((u64)MAP->mem->offset << NVKM_RAM_MM_SHIFT), \
+ ((u64)MAP->mem->length << NVKM_RAM_MM_SHIFT), \
+ (MAP->mem = MAP->mem->next))
+#define VMM_MAP_ITER_DMA(VMM,PT,PTEI,PTEN,MAP,FILL) \
+ VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
+ *MAP->dma, PAGE_SIZE, MAP->dma++)
+#define VMM_MAP_ITER_SGL(VMM,PT,PTEI,PTEN,MAP,FILL) \
+ VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
+ sg_dma_address(MAP->sgl), sg_dma_len(MAP->sgl), \
+ (MAP->sgl = sg_next(MAP->sgl)))
+
+#define VMM_FO(m,o,d,c,b) nvkm_fo##b((m)->memory, (o), (d), (c))
+#define VMM_WO(m,o,d,c,b) nvkm_wo##b((m)->memory, (o), (d))
+#define VMM_XO(m,v,o,d,c,b,fn,f,a...) do { \
+ const u32 _pteo = (o); u##b _data = (d); \
+ VMM_SPAM((v), " %010llx "f, (m)->addr + _pteo, _data, ##a); \
+ VMM_##fn((m), (m)->base + _pteo, _data, (c), b); \
+} while(0)
+
+#define VMM_WO032(m,v,o,d) VMM_XO((m),(v),(o),(d), 1, 32, WO, "%08x")
+#define VMM_FO032(m,v,o,d,c) \
+ VMM_XO((m),(v),(o),(d),(c), 32, FO, "%08x %08x", (c))
+
+#define VMM_WO064(m,v,o,d) VMM_XO((m),(v),(o),(d), 1, 64, WO, "%016llx")
+#define VMM_FO064(m,v,o,d,c) \
+ VMM_XO((m),(v),(o),(d),(c), 64, FO, "%016llx %08x", (c))
+
+#define VMM_XO128(m,v,o,lo,hi,c,f,a...) do { \
+ u32 _pteo = (o), _ptes = (c); \
+ const u64 _addr = (m)->addr + _pteo; \
+ VMM_SPAM((v), " %010llx %016llx%016llx"f, _addr, (hi), (lo), ##a); \
+ while (_ptes--) { \
+ nvkm_wo64((m)->memory, (m)->base + _pteo + 0, (lo)); \
+ nvkm_wo64((m)->memory, (m)->base + _pteo + 8, (hi)); \
+ _pteo += 0x10; \
+ } \
+} while(0)
+
+#define VMM_WO128(m,v,o,lo,hi) VMM_XO128((m),(v),(o),(lo),(hi), 1, "")
+#define VMM_FO128(m,v,o,lo,hi,c) do { \
+ nvkm_kmap((m)->memory); \
+ VMM_XO128((m),(v),(o),(lo),(hi),(c), " %08x", (c)); \
+ nvkm_done((m)->memory); \
+} while(0)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
new file mode 100644
index 0000000000..5e857c02e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -0,0 +1,426 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+#include <subdev/ltc.h>
+#include <subdev/timer.h>
+
+#include <nvif/if900d.h>
+#include <nvif/unpack.h>
+
+static inline void
+gf100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 base = (addr >> 8) | map->type;
+ u64 data = base;
+
+ if (map->ctag && !(map->next & (1ULL << 44))) {
+ while (ptes--) {
+ data = base | ((map->ctag >> 1) << 44);
+ if (!(map->ctag++ & 1))
+ data |= BIT_ULL(60);
+
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ base += map->next;
+ }
+ } else {
+ map->type += ptes * map->ctag;
+
+ while (ptes--) {
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ data += map->next;
+ }
+ }
+}
+
+void
+gf100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
+}
+
+void
+gf100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ if (map->page->shift == PAGE_SHIFT) {
+ VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u64 data = (*map->dma++ >> 8) | map->type;
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ map->type += map->ctag;
+ }
+ nvkm_done(pt->memory);
+ return;
+ }
+
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
+}
+
+void
+gf100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
+}
+
+void
+gf100_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
+}
+
+const struct nvkm_vmm_desc_func
+gf100_vmm_pgt = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .mem = gf100_vmm_pgt_mem,
+ .dma = gf100_vmm_pgt_dma,
+ .sgl = gf100_vmm_pgt_sgl,
+};
+
+void
+gf100_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ struct nvkm_mmu_pt *pt;
+ u64 data = 0;
+
+ if ((pt = pgt->pt[0])) {
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 0; break;
+ case NVKM_MEM_TARGET_HOST: data |= 2ULL << 0;
+ data |= BIT_ULL(35); /* VOL */
+ break;
+ case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 0; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ data |= pt->addr >> 8;
+ }
+
+ if ((pt = pgt->pt[1])) {
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 32; break;
+ case NVKM_MEM_TARGET_HOST: data |= 2ULL << 32;
+ data |= BIT_ULL(34); /* VOL */
+ break;
+ case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 32; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ data |= pt->addr << 24;
+ }
+
+ nvkm_kmap(pd->memory);
+ VMM_WO064(pd, vmm, pdei * 8, data);
+ nvkm_done(pd->memory);
+}
+
+const struct nvkm_vmm_desc_func
+gf100_vmm_pgd = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .pde = gf100_vmm_pgd_pde,
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_17_12[] = {
+ { SPT, 15, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_17_17[] = {
+ { LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_16_12[] = {
+ { SPT, 14, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_16_16[] = {
+ { LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+void
+gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
+{
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ nvkm_wr32(device, 0x100cb8, addr);
+}
+
+void
+gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
+{
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
+ u64 addr = 0;
+
+ mutex_lock(&vmm->mmu->mutex);
+ /* Looks like maybe a "free flush slots" counter, the
+ * faster you write to 0x100cbc to more it decreases.
+ */
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
+ break;
+ );
+
+ if (!(type & 0x00000002) /* ALL_PDB. */) {
+ switch (nvkm_memory_target(pd->memory)) {
+ case NVKM_MEM_TARGET_VRAM: addr |= 0x00000000; break;
+ case NVKM_MEM_TARGET_HOST: addr |= 0x00000002; break;
+ case NVKM_MEM_TARGET_NCOH: addr |= 0x00000003; break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ addr |= (vmm->pd->pt[0]->addr >> 12) << 4;
+
+ vmm->func->invalidate_pdb(vmm, addr);
+ }
+
+ nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
+
+ /* Wait for flush to be queued? */
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100c80) & 0x00008000)
+ break;
+ );
+ mutex_unlock(&vmm->mmu->mutex);
+}
+
+void
+gf100_vmm_flush(struct nvkm_vmm *vmm, int depth)
+{
+ u32 type = 0x00000001; /* PAGE_ALL */
+ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+ type |= 0x00000004; /* HUB_ONLY */
+ gf100_vmm_invalidate(vmm, type);
+}
+
+int
+gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
+ const struct nvkm_vmm_page *page = map->page;
+ const bool gm20x = page->desc->func->sparse != NULL;
+ union {
+ struct gf100_vmm_map_vn vn;
+ struct gf100_vmm_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ struct nvkm_memory *memory = map->memory;
+ u8 kind, kind_inv, priv, ro, vol;
+ int kindn, aper, ret = -ENOSYS;
+ const u8 *kindm;
+
+ map->next = (1 << page->shift) >> 8;
+ map->type = map->ctag = 0;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ vol = !!args->v0.vol;
+ ro = !!args->v0.ro;
+ priv = !!args->v0.priv;
+ kind = args->v0.kind;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ vol = target == NVKM_MEM_TARGET_HOST;
+ ro = 0;
+ priv = 0;
+ kind = 0x00;
+ } else {
+ VMM_DEBUG(vmm, "args");
+ return ret;
+ }
+
+ aper = vmm->func->aper(target);
+ if (WARN_ON(aper < 0))
+ return aper;
+
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
+ VMM_DEBUG(vmm, "kind %02x", kind);
+ return -EINVAL;
+ }
+
+ if (kindm[kind] != kind) {
+ u32 comp = (page->shift == 16 && !gm20x) ? 16 : 17;
+ u32 tags = ALIGN(nvkm_memory_size(memory), 1 << 17) >> comp;
+ if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
+ VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
+ return -EINVAL;
+ }
+
+ if (!map->no_comp) {
+ ret = nvkm_memory_tags_get(memory, device, tags,
+ nvkm_ltc_tags_clear,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
+ }
+
+ if (!map->no_comp && map->tags->mn) {
+ u64 tags = map->tags->mn->offset + (map->offset >> 17);
+ if (page->shift == 17 || !gm20x) {
+ map->type |= tags << 44;
+ map->ctag |= 1ULL << 44;
+ map->next |= 1ULL << 44;
+ } else {
+ map->ctag |= tags << 1 | 1;
+ }
+ } else {
+ kind = kindm[kind];
+ }
+ }
+
+ map->type |= BIT(0);
+ map->type |= (u64)priv << 1;
+ map->type |= (u64) ro << 2;
+ map->type |= (u64) vol << 32;
+ map->type |= (u64)aper << 33;
+ map->type |= (u64)kind << 36;
+ return 0;
+}
+
+int
+gf100_vmm_aper(enum nvkm_memory_target target)
+{
+ switch (target) {
+ case NVKM_MEM_TARGET_VRAM: return 0;
+ case NVKM_MEM_TARGET_HOST: return 2;
+ case NVKM_MEM_TARGET_NCOH: return 3;
+ default:
+ return -EINVAL;
+ }
+}
+
+void
+gf100_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ nvkm_fo64(inst, 0x0200, 0x00000000, 2);
+}
+
+int
+gf100_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base)
+{
+ struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
+
+ switch (nvkm_memory_target(pd->memory)) {
+ case NVKM_MEM_TARGET_VRAM: base |= 0ULL << 0; break;
+ case NVKM_MEM_TARGET_HOST: base |= 2ULL << 0;
+ base |= BIT_ULL(2) /* VOL. */;
+ break;
+ case NVKM_MEM_TARGET_NCOH: base |= 3ULL << 0; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ base |= pd->addr;
+
+ nvkm_kmap(inst);
+ nvkm_wo64(inst, 0x0200, base);
+ nvkm_wo64(inst, 0x0208, vmm->limit - 1);
+ nvkm_done(inst);
+ return 0;
+}
+
+int
+gf100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ return gf100_vmm_join_(vmm, inst, 0);
+}
+
+static const struct nvkm_vmm_func
+gf100_vmm_17 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
+ .page = {
+ { 17, &gf100_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &gf100_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gf100_vmm_16 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
+ .page = {
+ { 16, &gf100_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &gf100_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+int
+gf100_vmm_new_(const struct nvkm_vmm_func *func_16,
+ const struct nvkm_vmm_func *func_17,
+ struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ switch (mmu->subdev.device->fb->page) {
+ case 16: return nv04_vmm_new_(func_16, mmu, 0, managed, addr, size,
+ argv, argc, key, name, pvmm);
+ case 17: return nv04_vmm_new_(func_17, mmu, 0, managed, addr, size,
+ argv, argc, key, name, pvmm);
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
+int
+gf100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, managed, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c
new file mode 100644
index 0000000000..0b59c01fd1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+void
+gk104_vmm_lpt_invalid(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ /* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */
+ VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(1) /* PRIV. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gk104_vmm_lpt = {
+ .invalid = gk104_vmm_lpt_invalid,
+ .unmap = gf100_vmm_pgt_unmap,
+ .mem = gf100_vmm_pgt_mem,
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_17_12[] = {
+ { SPT, 15, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_17_17[] = {
+ { LPT, 10, 8, 0x1000, &gk104_vmm_lpt },
+ { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_16_12[] = {
+ { SPT, 14, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_16_16[] = {
+ { LPT, 10, 8, 0x1000, &gk104_vmm_lpt },
+ { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+static const struct nvkm_vmm_func
+gk104_vmm_17 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
+ .page = {
+ { 17, &gk104_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &gk104_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gk104_vmm_16 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
+ .page = {
+ { 16, &gk104_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &gk104_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+int
+gk104_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gk104_vmm_16, &gk104_vmm_17, mmu, managed, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c
new file mode 100644
index 0000000000..5a9582dce9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <core/memory.h>
+
+int
+gk20a_vmm_aper(enum nvkm_memory_target target)
+{
+ switch (target) {
+ case NVKM_MEM_TARGET_NCOH: return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct nvkm_vmm_func
+gk20a_vmm_17 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
+ .page = {
+ { 17, &gk104_vmm_desc_17_17[0], NVKM_VMM_PAGE_xxHC },
+ { 12, &gk104_vmm_desc_17_12[0], NVKM_VMM_PAGE_xxHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gk20a_vmm_16 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
+ .page = {
+ { 16, &gk104_vmm_desc_16_16[0], NVKM_VMM_PAGE_xxHC },
+ { 12, &gk104_vmm_desc_16_12[0], NVKM_VMM_PAGE_xxHx },
+ {}
+ }
+};
+
+int
+gk20a_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gk20a_vmm_16, &gk20a_vmm_17, mmu, managed, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
new file mode 100644
index 0000000000..2e61af02d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <nvif/ifb00d.h>
+#include <nvif/unpack.h>
+
+static void
+gm200_vmm_pgt_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ /* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
+ VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(32) /* VOL. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gm200_vmm_spt = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gm200_vmm_pgt_sparse,
+ .mem = gf100_vmm_pgt_mem,
+ .dma = gf100_vmm_pgt_dma,
+ .sgl = gf100_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc_func
+gm200_vmm_lpt = {
+ .invalid = gk104_vmm_lpt_invalid,
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gm200_vmm_pgt_sparse,
+ .mem = gf100_vmm_pgt_mem,
+};
+
+static void
+gm200_vmm_pgd_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ /* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
+ VMM_FO064(pt, vmm, pdei * 8, BIT_ULL(35) /* VOL_BIG. */, pdes);
+}
+
+static const struct nvkm_vmm_desc_func
+gm200_vmm_pgd = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gm200_vmm_pgd_sparse,
+ .pde = gf100_vmm_pgd_pde,
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_17_12[] = {
+ { SPT, 15, 8, 0x1000, &gm200_vmm_spt },
+ { PGD, 13, 8, 0x1000, &gm200_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_17_17[] = {
+ { LPT, 10, 8, 0x1000, &gm200_vmm_lpt },
+ { PGD, 13, 8, 0x1000, &gm200_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_16_12[] = {
+ { SPT, 14, 8, 0x1000, &gm200_vmm_spt },
+ { PGD, 14, 8, 0x1000, &gm200_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_16_16[] = {
+ { LPT, 10, 8, 0x1000, &gm200_vmm_lpt },
+ { PGD, 14, 8, 0x1000, &gm200_vmm_pgd },
+ {}
+};
+
+int
+gm200_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base)
+{
+ if (vmm->func->page[1].shift == 16)
+ base |= BIT_ULL(11);
+ return gf100_vmm_join_(vmm, inst, base);
+}
+
+int
+gm200_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ return gm200_vmm_join_(vmm, inst, 0);
+}
+
+static const struct nvkm_vmm_func
+gm200_vmm_17 = {
+ .join = gm200_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
+ .page = {
+ { 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
+ { 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gm200_vmm_desc_17_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gm200_vmm_16 = {
+ .join = gm200_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
+ .page = {
+ { 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
+ { 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gm200_vmm_desc_16_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+int
+gm200_vmm_new_(const struct nvkm_vmm_func *func_16,
+ const struct nvkm_vmm_func *func_17,
+ struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ const struct nvkm_vmm_func *func;
+ union {
+ struct gm200_vmm_vn vn;
+ struct gm200_vmm_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ switch (args->v0.bigpage) {
+ case 16: func = func_16; break;
+ case 17: func = func_17; break;
+ default:
+ return -EINVAL;
+ }
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ func = func_17;
+ } else
+ return ret;
+
+ return nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
+}
+
+int
+gm200_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gm200_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr,
+ size, argv, argc, key, name, pvmm);
+}
+
+int
+gm200_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
new file mode 100644
index 0000000000..96b759695d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+static const struct nvkm_vmm_func
+gm20b_vmm_17 = {
+ .join = gm200_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gk20a_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
+ .page = {
+ { 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
+ { 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SxHC },
+ { 12, &gm200_vmm_desc_17_12[0], NVKM_VMM_PAGE_SxHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gm20b_vmm_16 = {
+ .join = gm200_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gk20a_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
+ .page = {
+ { 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
+ { 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SxHC },
+ { 12, &gm200_vmm_desc_16_12[0], NVKM_VMM_PAGE_SxHx },
+ {}
+ }
+};
+
+int
+gm20b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gm200_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, managed, addr,
+ size, argv, argc, key, name, pvmm);
+}
+
+int
+gm20b_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, managed, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
new file mode 100644
index 0000000000..f3630d0e0d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -0,0 +1,635 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <core/client.h>
+#include <subdev/fb.h>
+#include <subdev/ltc.h>
+#include <subdev/timer.h>
+#include <engine/gr.h>
+
+#include <nvif/ifc00d.h>
+#include <nvif/unpack.h>
+
+static void
+gp100_vmm_pfn_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ struct device *dev = vmm->mmu->subdev.device->dev;
+ dma_addr_t addr;
+
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0);
+ u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4);
+ u64 data = (u64)datahi << 32 | datalo;
+ if ((data & (3ULL << 1)) != 0) {
+ addr = (data >> 8) << 12;
+ dma_unmap_page(dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ ptei++;
+ }
+ nvkm_done(pt->memory);
+}
+
+static bool
+gp100_vmm_pfn_clear(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ bool dma = false;
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0);
+ u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4);
+ u64 data = (u64)datahi << 32 | datalo;
+ if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) {
+ VMM_WO064(pt, vmm, ptei * 8, data & ~BIT_ULL(0));
+ dma = true;
+ }
+ ptei++;
+ }
+ nvkm_done(pt->memory);
+ return dma;
+}
+
+static void
+gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ struct device *dev = vmm->mmu->subdev.device->dev;
+ dma_addr_t addr;
+
+ nvkm_kmap(pt->memory);
+ for (; ptes; ptes--, map->pfn++) {
+ u64 data = 0;
+
+ if (!(*map->pfn & NVKM_VMM_PFN_V))
+ continue;
+
+ if (!(*map->pfn & NVKM_VMM_PFN_W))
+ data |= BIT_ULL(6); /* RO. */
+
+ if (!(*map->pfn & NVKM_VMM_PFN_A))
+ data |= BIT_ULL(7); /* Atomic disable. */
+
+ if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
+ addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
+ addr = dma_map_page(dev, pfn_to_page(addr), 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (!WARN_ON(dma_mapping_error(dev, addr))) {
+ data |= addr >> 4;
+ data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */
+ data |= BIT_ULL(3); /* VOL. */
+ data |= BIT_ULL(0); /* VALID. */
+ }
+ } else {
+ data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
+ data |= BIT_ULL(0); /* VALID. */
+ }
+
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ }
+ nvkm_done(pt->memory);
+}
+
+static inline void
+gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 data = (addr >> 4) | map->type;
+
+ map->type += ptes * map->ctag;
+
+ while (ptes--) {
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ data += map->next;
+ }
+}
+
+static void
+gp100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
+}
+
+static void
+gp100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ if (map->page->shift == PAGE_SHIFT) {
+ VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u64 data = (*map->dma++ >> 4) | map->type;
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ map->type += map->ctag;
+ }
+ nvkm_done(pt->memory);
+ return;
+ }
+
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
+}
+
+static void
+gp100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
+}
+
+static void
+gp100_vmm_pgt_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ /* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
+ VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(3) /* VOL. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_spt = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gp100_vmm_pgt_sparse,
+ .mem = gp100_vmm_pgt_mem,
+ .dma = gp100_vmm_pgt_dma,
+ .sgl = gp100_vmm_pgt_sgl,
+ .pfn = gp100_vmm_pgt_pfn,
+ .pfn_clear = gp100_vmm_pfn_clear,
+ .pfn_unmap = gp100_vmm_pfn_unmap,
+};
+
+static void
+gp100_vmm_lpt_invalid(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ /* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */
+ VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(5) /* PRIV. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_lpt = {
+ .invalid = gp100_vmm_lpt_invalid,
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gp100_vmm_pgt_sparse,
+ .mem = gp100_vmm_pgt_mem,
+};
+
+static inline void
+gp100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 data = (addr >> 4) | map->type;
+
+ map->type += ptes * map->ctag;
+
+ while (ptes--) {
+ VMM_WO128(pt, vmm, ptei++ * 0x10, data, 0ULL);
+ data += map->next;
+ }
+}
+
+static void
+gp100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pd0_pte);
+}
+
+static inline bool
+gp100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data)
+{
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM: *data |= 1ULL << 1; break;
+ case NVKM_MEM_TARGET_HOST: *data |= 2ULL << 1;
+ *data |= BIT_ULL(3); /* VOL. */
+ break;
+ case NVKM_MEM_TARGET_NCOH: *data |= 3ULL << 1; break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+ *data |= pt->addr >> 4;
+ return true;
+}
+
+static void
+gp100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ u64 data[2] = {};
+
+ if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0]))
+ return;
+ if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1]))
+ return;
+
+ nvkm_kmap(pd->memory);
+ VMM_WO128(pd, vmm, pdei * 0x10, data[0], data[1]);
+ nvkm_done(pd->memory);
+}
+
+static void
+gp100_vmm_pd0_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ /* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
+ VMM_FO128(pt, vmm, pdei * 0x10, BIT_ULL(3) /* VOL_BIG. */, 0ULL, pdes);
+}
+
+static void
+gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes);
+}
+
+static void
+gp100_vmm_pd0_pfn_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ struct device *dev = vmm->mmu->subdev.device->dev;
+ dma_addr_t addr;
+
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
+ u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
+ u64 data = (u64)datahi << 32 | datalo;
+
+ if ((data & (3ULL << 1)) != 0) {
+ addr = (data >> 8) << 12;
+ dma_unmap_page(dev, addr, 1UL << 21, DMA_BIDIRECTIONAL);
+ }
+ ptei++;
+ }
+ nvkm_done(pt->memory);
+}
+
+static bool
+gp100_vmm_pd0_pfn_clear(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ bool dma = false;
+
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
+ u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
+ u64 data = (u64)datahi << 32 | datalo;
+
+ if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) {
+ VMM_WO064(pt, vmm, ptei * 16, data & ~BIT_ULL(0));
+ dma = true;
+ }
+ ptei++;
+ }
+ nvkm_done(pt->memory);
+ return dma;
+}
+
+static void
+gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ struct device *dev = vmm->mmu->subdev.device->dev;
+ dma_addr_t addr;
+
+ nvkm_kmap(pt->memory);
+ for (; ptes; ptes--, map->pfn++) {
+ u64 data = 0;
+
+ if (!(*map->pfn & NVKM_VMM_PFN_V))
+ continue;
+
+ if (!(*map->pfn & NVKM_VMM_PFN_W))
+ data |= BIT_ULL(6); /* RO. */
+
+ if (!(*map->pfn & NVKM_VMM_PFN_A))
+ data |= BIT_ULL(7); /* Atomic disable. */
+
+ if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
+ addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
+ addr = dma_map_page(dev, pfn_to_page(addr), 0,
+ 1UL << 21, DMA_BIDIRECTIONAL);
+ if (!WARN_ON(dma_mapping_error(dev, addr))) {
+ data |= addr >> 4;
+ data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */
+ data |= BIT_ULL(3); /* VOL. */
+ data |= BIT_ULL(0); /* VALID. */
+ }
+ } else {
+ data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
+ data |= BIT_ULL(0); /* VALID. */
+ }
+
+ VMM_WO064(pt, vmm, ptei++ * 16, data);
+ }
+ nvkm_done(pt->memory);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_pd0 = {
+ .unmap = gp100_vmm_pd0_unmap,
+ .sparse = gp100_vmm_pd0_sparse,
+ .pde = gp100_vmm_pd0_pde,
+ .mem = gp100_vmm_pd0_mem,
+ .pfn = gp100_vmm_pd0_pfn,
+ .pfn_clear = gp100_vmm_pd0_pfn_clear,
+ .pfn_unmap = gp100_vmm_pd0_pfn_unmap,
+};
+
+static void
+gp100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ u64 data = 0;
+
+ if (!gp100_vmm_pde(pgt->pt[0], &data))
+ return;
+
+ nvkm_kmap(pd->memory);
+ VMM_WO064(pd, vmm, pdei * 8, data);
+ nvkm_done(pd->memory);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_pd1 = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gp100_vmm_pgt_sparse,
+ .pde = gp100_vmm_pd1_pde,
+};
+
+const struct nvkm_vmm_desc
+gp100_vmm_desc_16[] = {
+ { LPT, 5, 8, 0x0100, &gp100_vmm_desc_lpt },
+ { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
+ { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ { PGD, 2, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gp100_vmm_desc_12[] = {
+ { SPT, 9, 8, 0x1000, &gp100_vmm_desc_spt },
+ { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
+ { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ { PGD, 2, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ {}
+};
+
+int
+gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
+ const struct nvkm_vmm_page *page = map->page;
+ union {
+ struct gp100_vmm_map_vn vn;
+ struct gp100_vmm_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ struct nvkm_memory *memory = map->memory;
+ u8 kind, kind_inv, priv, ro, vol;
+ int kindn, aper, ret = -ENOSYS;
+ const u8 *kindm;
+
+ map->next = (1ULL << page->shift) >> 4;
+ map->type = 0;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ vol = !!args->v0.vol;
+ ro = !!args->v0.ro;
+ priv = !!args->v0.priv;
+ kind = args->v0.kind;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ vol = target == NVKM_MEM_TARGET_HOST;
+ ro = 0;
+ priv = 0;
+ kind = 0x00;
+ } else {
+ VMM_DEBUG(vmm, "args");
+ return ret;
+ }
+
+ aper = vmm->func->aper(target);
+ if (WARN_ON(aper < 0))
+ return aper;
+
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
+ VMM_DEBUG(vmm, "kind %02x", kind);
+ return -EINVAL;
+ }
+
+ if (kindm[kind] != kind) {
+ u64 tags = nvkm_memory_size(memory) >> 16;
+ if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
+ VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
+ return -EINVAL;
+ }
+
+ if (!map->no_comp) {
+ ret = nvkm_memory_tags_get(memory, device, tags,
+ nvkm_ltc_tags_clear,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
+ }
+
+ if (!map->no_comp && map->tags->mn) {
+ tags = map->tags->mn->offset + (map->offset >> 16);
+ map->ctag |= ((1ULL << page->shift) >> 16) << 36;
+ map->type |= tags << 36;
+ map->next |= map->ctag;
+ } else {
+ kind = kindm[kind];
+ }
+ }
+
+ map->type |= BIT(0);
+ map->type |= (u64)aper << 1;
+ map->type |= (u64) vol << 3;
+ map->type |= (u64)priv << 5;
+ map->type |= (u64) ro << 6;
+ map->type |= (u64)kind << 56;
+ return 0;
+}
+
+static int
+gp100_vmm_fault_cancel(struct nvkm_vmm *vmm, void *argv, u32 argc)
+{
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ union {
+ struct gp100_vmm_fault_cancel_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ u32 aper;
+
+ if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false)))
+ return ret;
+
+ /* Translate MaxwellFaultBufferA instance pointer to the same
+ * format as the NV_GR_FECS_CURRENT_CTX register.
+ */
+ aper = (args->v0.inst >> 8) & 3;
+ args->v0.inst >>= 12;
+ args->v0.inst |= aper << 28;
+ args->v0.inst |= 0x80000000;
+
+ if (!WARN_ON(nvkm_gr_ctxsw_pause(device))) {
+ if (nvkm_gr_ctxsw_inst(device) == args->v0.inst) {
+ gf100_vmm_invalidate(vmm, 0x0000001b
+ /* CANCEL_TARGETED. */ |
+ (args->v0.hub << 20) |
+ (args->v0.gpc << 15) |
+ (args->v0.client << 9));
+ }
+ WARN_ON(nvkm_gr_ctxsw_resume(device));
+ }
+
+ return 0;
+}
+
+static int
+gp100_vmm_fault_replay(struct nvkm_vmm *vmm, void *argv, u32 argc)
+{
+ union {
+ struct gp100_vmm_fault_replay_vn vn;
+ } *args = argv;
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ gf100_vmm_invalidate(vmm, 0x0000000b); /* REPLAY_GLOBAL. */
+ }
+
+ return ret;
+}
+
+int
+gp100_vmm_mthd(struct nvkm_vmm *vmm,
+ struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
+{
+ switch (mthd) {
+ case GP100_VMM_VN_FAULT_REPLAY:
+ return gp100_vmm_fault_replay(vmm, argv, argc);
+ case GP100_VMM_VN_FAULT_CANCEL:
+ return gp100_vmm_fault_cancel(vmm, argv, argc);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+void
+gp100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
+{
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ nvkm_wr32(device, 0x100cb8, lower_32_bits(addr));
+ nvkm_wr32(device, 0x100cec, upper_32_bits(addr));
+}
+
+void
+gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
+{
+ u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
+ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+ type |= 0x00000004; /* HUB_ONLY */
+ type |= 0x00000001; /* PAGE_ALL */
+ gf100_vmm_invalidate(vmm, type);
+}
+
+int
+gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11) /* 64KiB */;
+ if (vmm->replay) {
+ base |= BIT_ULL(4); /* FAULT_REPLAY_TEX */
+ base |= BIT_ULL(5); /* FAULT_REPLAY_GCC */
+ }
+ return gf100_vmm_join_(vmm, inst, base);
+}
+
+static const struct nvkm_vmm_func
+gp100_vmm = {
+ .join = gp100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gp100_vmm_valid,
+ .flush = gp100_vmm_flush,
+ .mthd = gp100_vmm_mthd,
+ .invalidate_pdb = gp100_vmm_invalidate_pdb,
+ .page = {
+ { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+ { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+ { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+ { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+ { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+int
+gp100_vmm_new_(const struct nvkm_vmm_func *func,
+ struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ union {
+ struct gp100_vmm_vn vn;
+ struct gp100_vmm_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ bool replay;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ replay = args->v0.fault_replay != 0;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ replay = false;
+ } else
+ return ret;
+
+ ret = nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
+ if (ret)
+ return ret;
+
+ (*pvmm)->replay = replay;
+ return 0;
+}
+
+int
+gp100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gp100_vmm_new_(&gp100_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
new file mode 100644
index 0000000000..e081239afe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+static const struct nvkm_vmm_func
+gp10b_vmm = {
+ .join = gp100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gk20a_vmm_aper,
+ .valid = gp100_vmm_valid,
+ .flush = gp100_vmm_flush,
+ .mthd = gp100_vmm_mthd,
+ .invalidate_pdb = gp100_vmm_invalidate_pdb,
+ .page = {
+ { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+ { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+ { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+ { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SxHC },
+ { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SxHC },
+ { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SxHx },
+ {}
+ }
+};
+
+int
+gp10b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gp100_vmm_new_(&gp10b_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c
new file mode 100644
index 0000000000..f0e21f6325
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+#include <subdev/ltc.h>
+
+#include <nvif/ifc00d.h>
+#include <nvif/unpack.h>
+
+int
+gv100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ u64 data[2], mask;
+ int ret = gp100_vmm_join(vmm, inst), i;
+ if (ret)
+ return ret;
+
+ nvkm_kmap(inst);
+ data[0] = nvkm_ro32(inst, 0x200);
+ data[1] = nvkm_ro32(inst, 0x204);
+ mask = BIT_ULL(0);
+
+ nvkm_wo32(inst, 0x21c, 0x00000000);
+
+ for (i = 0; i < 64; i++) {
+ if (mask & BIT_ULL(i)) {
+ nvkm_wo32(inst, 0x2a4 + (i * 0x10), data[1]);
+ nvkm_wo32(inst, 0x2a0 + (i * 0x10), data[0]);
+ } else {
+ nvkm_wo32(inst, 0x2a4 + (i * 0x10), 0x00000001);
+ nvkm_wo32(inst, 0x2a0 + (i * 0x10), 0x00000001);
+ }
+ nvkm_wo32(inst, 0x2a8 + (i * 0x10), 0x00000000);
+ }
+
+ nvkm_wo32(inst, 0x298, lower_32_bits(mask));
+ nvkm_wo32(inst, 0x29c, upper_32_bits(mask));
+ nvkm_done(inst);
+ return 0;
+}
+
+static const struct nvkm_vmm_func
+gv100_vmm = {
+ .join = gv100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gp100_vmm_valid,
+ .flush = gp100_vmm_flush,
+ .mthd = gp100_vmm_mthd,
+ .invalidate_pdb = gp100_vmm_invalidate_pdb,
+ .page = {
+ { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+ { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+ { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+ { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+ { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+int
+gv100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gp100_vmm_new_(&gv100_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
new file mode 100644
index 0000000000..bdddd99f58
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+static const struct nvkm_vmm_func
+mcp77_vmm = {
+ .join = nv50_vmm_join,
+ .part = nv50_vmm_part,
+ .valid = nv50_vmm_valid,
+ .flush = nv50_vmm_flush,
+ .page_block = 1 << 29,
+ .page = {
+ { 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxx },
+ { 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+int
+mcp77_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&mcp77_vmm, mmu, 0, managed, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
new file mode 100644
index 0000000000..4c6b3b7d22
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <nvif/if000d.h>
+#include <nvif/unpack.h>
+
+static inline void
+nv04_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u32 data = addr | 0x00000003; /* PRESENT, RW. */
+ while (ptes--) {
+ VMM_WO032(pt, vmm, 8 + ptei++ * 4, data);
+ data += 0x00001000;
+ }
+}
+
+static void
+nv04_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
+}
+
+static void
+nv04_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+ nvkm_kmap(pt->memory);
+ while (ptes--)
+ VMM_WO032(pt, vmm, 8 + (ptei++ * 4), *map->dma++ | 0x00000003);
+ nvkm_done(pt->memory);
+#else
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv04_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ VMM_FO032(pt, vmm, 8 + (ptei * 4), 0, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+nv04_vmm_desc_pgt = {
+ .unmap = nv04_vmm_pgt_unmap,
+ .dma = nv04_vmm_pgt_dma,
+ .sgl = nv04_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc
+nv04_vmm_desc_12[] = {
+ { PGT, 15, 4, 0x1000, &nv04_vmm_desc_pgt },
+ {}
+};
+
+int
+nv04_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ union {
+ struct nv04_vmm_map_vn vn;
+ } *args = argv;
+ int ret = -ENOSYS;
+ if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
+ VMM_DEBUG(vmm, "args");
+ return ret;
+}
+
+static const struct nvkm_vmm_func
+nv04_vmm = {
+ .valid = nv04_vmm_valid,
+ .page = {
+ { 12, &nv04_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
+ {}
+ }
+};
+
+int
+nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
+ u32 pd_header, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ union {
+ struct nv04_vmm_vn vn;
+ } *args = argv;
+ int ret;
+
+ ret = nvkm_vmm_new_(func, mmu, pd_header, managed, addr, size,
+ key, name, pvmm);
+ if (ret)
+ return ret;
+
+ return nvif_unvers(-ENOSYS, &argv, &argc, args->vn);
+}
+
+int
+nv04_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ struct nvkm_memory *mem;
+ struct nvkm_vmm *vmm;
+ int ret;
+
+ ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, managed, addr, size,
+ argv, argc, key, name, &vmm);
+ *pvmm = vmm;
+ if (ret)
+ return ret;
+
+ mem = vmm->pd->pt[0]->memory;
+ nvkm_kmap(mem);
+ nvkm_wo32(mem, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
+ nvkm_wo32(mem, 0x00004, vmm->limit - 1);
+ nvkm_done(mem);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
new file mode 100644
index 0000000000..31984671da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/timer.h>
+
+static void
+nv41_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u32 data = (addr >> 7) | 0x00000001; /* VALID. */
+ while (ptes--) {
+ VMM_WO032(pt, vmm, ptei++ * 4, data);
+ data += 0x00000020;
+ }
+}
+
+static void
+nv41_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
+}
+
+static void
+nv41_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u32 data = (*map->dma++ >> 7) | 0x00000001;
+ VMM_WO032(pt, vmm, ptei++ * 4, data);
+ }
+ nvkm_done(pt->memory);
+#else
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv41_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ VMM_FO032(pt, vmm, ptei * 4, 0, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+nv41_vmm_desc_pgt = {
+ .unmap = nv41_vmm_pgt_unmap,
+ .dma = nv41_vmm_pgt_dma,
+ .sgl = nv41_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc
+nv41_vmm_desc_12[] = {
+ { PGT, 17, 4, 0x1000, &nv41_vmm_desc_pgt },
+ {}
+};
+
+static void
+nv41_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+
+ mutex_lock(&vmm->mmu->mutex);
+ nvkm_wr32(device, 0x100810, 0x00000022);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100810) & 0x00000020)
+ break;
+ );
+ nvkm_wr32(device, 0x100810, 0x00000000);
+ mutex_unlock(&vmm->mmu->mutex);
+}
+
+static const struct nvkm_vmm_func
+nv41_vmm = {
+ .valid = nv04_vmm_valid,
+ .flush = nv41_vmm_flush,
+ .page = {
+ { 12, &nv41_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
+ {}
+ }
+};
+
+int
+nv41_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&nv41_vmm, mmu, 0, managed, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
new file mode 100644
index 0000000000..a82936ba98
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/timer.h>
+
+static void
+nv44_vmm_pgt_fill(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ dma_addr_t *list, u32 ptei, u32 ptes)
+{
+ u32 pteo = (ptei << 2) & ~0x0000000f;
+ u32 tmp[4];
+
+ tmp[0] = nvkm_ro32(pt->memory, pteo + 0x0);
+ tmp[1] = nvkm_ro32(pt->memory, pteo + 0x4);
+ tmp[2] = nvkm_ro32(pt->memory, pteo + 0x8);
+ tmp[3] = nvkm_ro32(pt->memory, pteo + 0xc);
+
+ while (ptes--) {
+ u32 addr = (list ? *list++ : vmm->null) >> 12;
+ switch (ptei++ & 0x3) {
+ case 0:
+ tmp[0] &= ~0x07ffffff;
+ tmp[0] |= addr;
+ break;
+ case 1:
+ tmp[0] &= ~0xf8000000;
+ tmp[0] |= addr << 27;
+ tmp[1] &= ~0x003fffff;
+ tmp[1] |= addr >> 5;
+ break;
+ case 2:
+ tmp[1] &= ~0xffc00000;
+ tmp[1] |= addr << 22;
+ tmp[2] &= ~0x0001ffff;
+ tmp[2] |= addr >> 10;
+ break;
+ case 3:
+ tmp[2] &= ~0xfffe0000;
+ tmp[2] |= addr << 17;
+ tmp[3] &= ~0x00000fff;
+ tmp[3] |= addr >> 15;
+ break;
+ }
+ }
+
+ VMM_WO032(pt, vmm, pteo + 0x0, tmp[0]);
+ VMM_WO032(pt, vmm, pteo + 0x4, tmp[1]);
+ VMM_WO032(pt, vmm, pteo + 0x8, tmp[2]);
+ VMM_WO032(pt, vmm, pteo + 0xc, tmp[3] | 0x40000000);
+}
+
+static void
+nv44_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ dma_addr_t tmp[4], i;
+
+ if (ptei & 3) {
+ const u32 pten = min(ptes, 4 - (ptei & 3));
+ for (i = 0; i < pten; i++, addr += 0x1000)
+ tmp[i] = addr;
+ nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, pten);
+ ptei += pten;
+ ptes -= pten;
+ }
+
+ while (ptes >= 4) {
+ for (i = 0; i < 4; i++, addr += 0x1000)
+ tmp[i] = addr >> 12;
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >> 0 | tmp[1] << 27);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >> 5 | tmp[2] << 22);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
+ ptes -= 4;
+ }
+
+ if (ptes) {
+ for (i = 0; i < ptes; i++, addr += 0x1000)
+ tmp[i] = addr;
+ nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, ptes);
+ }
+}
+
+static void
+nv44_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
+}
+
+static void
+nv44_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+ nvkm_kmap(pt->memory);
+ if (ptei & 3) {
+ const u32 pten = min(ptes, 4 - (ptei & 3));
+ nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, pten);
+ ptei += pten;
+ ptes -= pten;
+ map->dma += pten;
+ }
+
+ while (ptes >= 4) {
+ u32 tmp[4], i;
+ for (i = 0; i < 4; i++)
+ tmp[i] = *map->dma++ >> 12;
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >> 0 | tmp[1] << 27);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >> 5 | tmp[2] << 22);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
+ ptes -= 4;
+ }
+
+ if (ptes) {
+ nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, ptes);
+ map->dma += ptes;
+ }
+ nvkm_done(pt->memory);
+#else
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv44_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ nvkm_kmap(pt->memory);
+ if (ptei & 3) {
+ const u32 pten = min(ptes, 4 - (ptei & 3));
+ nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, pten);
+ ptei += pten;
+ ptes -= pten;
+ }
+
+ while (ptes > 4) {
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ ptes -= 4;
+ }
+
+ if (ptes)
+ nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, ptes);
+ nvkm_done(pt->memory);
+}
+
+static const struct nvkm_vmm_desc_func
+nv44_vmm_desc_pgt = {
+ .unmap = nv44_vmm_pgt_unmap,
+ .dma = nv44_vmm_pgt_dma,
+ .sgl = nv44_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc
+nv44_vmm_desc_12[] = {
+ { PGT, 17, 4, 0x80000, &nv44_vmm_desc_pgt },
+ {}
+};
+
+static void
+nv44_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ nvkm_wr32(device, 0x100814, vmm->limit - 4096);
+ nvkm_wr32(device, 0x100808, 0x000000020);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100808) & 0x00000001)
+ break;
+ );
+ nvkm_wr32(device, 0x100808, 0x00000000);
+}
+
+static const struct nvkm_vmm_func
+nv44_vmm = {
+ .valid = nv04_vmm_valid,
+ .flush = nv44_vmm_flush,
+ .page = {
+ { 12, &nv44_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
+ {}
+ }
+};
+
+int
+nv44_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ struct nvkm_subdev *subdev = &mmu->subdev;
+ struct nvkm_vmm *vmm;
+ int ret;
+
+ ret = nv04_vmm_new_(&nv44_vmm, mmu, 0, managed, addr, size,
+ argv, argc, key, name, &vmm);
+ *pvmm = vmm;
+ if (ret)
+ return ret;
+
+ vmm->nullp = dma_alloc_coherent(subdev->device->dev, 16 * 1024,
+ &vmm->null, GFP_KERNEL);
+ if (!vmm->nullp) {
+ nvkm_warn(subdev, "unable to allocate dummy pages\n");
+ vmm->null = 0;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
new file mode 100644
index 0000000000..ff08ad5005
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <engine/gr.h>
+
+#include <nvif/if500d.h>
+#include <nvif/unpack.h>
+
+static inline void
+nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 next = addr + map->type, data;
+ u32 pten;
+ int log2blk;
+
+ map->type += ptes * map->ctag;
+
+ while (ptes) {
+ for (log2blk = 7; log2blk >= 0; log2blk--) {
+ pten = 1 << log2blk;
+ if (ptes >= pten && IS_ALIGNED(ptei, pten))
+ break;
+ }
+
+ data = next | (log2blk << 7);
+ next += pten * map->next;
+ ptes -= pten;
+
+ while (pten--)
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ }
+}
+
+static void
+nv50_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
+}
+
+static void
+nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ if (map->page->shift == PAGE_SHIFT) {
+ VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u64 data = *map->dma++ + map->type;
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ map->type += map->ctag;
+ }
+ nvkm_done(pt->memory);
+ return;
+ }
+
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
+}
+
+static void
+nv50_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
+}
+
+static void
+nv50_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+nv50_vmm_pgt = {
+ .unmap = nv50_vmm_pgt_unmap,
+ .mem = nv50_vmm_pgt_mem,
+ .dma = nv50_vmm_pgt_dma,
+ .sgl = nv50_vmm_pgt_sgl,
+};
+
+static bool
+nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata)
+{
+ struct nvkm_mmu_pt *pt;
+ u64 data = 0xdeadcafe00000000ULL;
+ if (pgt && (pt = pgt->pt[0])) {
+ switch (pgt->page) {
+ case 16: data = 0x00000001; break;
+ case 12: data = 0x00000003;
+ switch (nvkm_memory_size(pt->memory)) {
+ case 0x100000: data |= 0x00000000; break;
+ case 0x040000: data |= 0x00000020; break;
+ case 0x020000: data |= 0x00000040; break;
+ case 0x010000: data |= 0x00000060; break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM: data |= 0x00000000; break;
+ case NVKM_MEM_TARGET_HOST: data |= 0x00000008; break;
+ case NVKM_MEM_TARGET_NCOH: data |= 0x0000000c; break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ data |= pt->addr;
+ }
+ *pdata = data;
+ return true;
+}
+
+static void
+nv50_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_join *join;
+ u32 pdeo = vmm->mmu->func->vmm.pd_offset + (pdei * 8);
+ u64 data;
+
+ if (!nv50_vmm_pde(vmm, pgd->pde[pdei], &data))
+ return;
+
+ list_for_each_entry(join, &vmm->join, head) {
+ nvkm_kmap(join->inst);
+ nvkm_wo64(join->inst, pdeo, data);
+ nvkm_done(join->inst);
+ }
+}
+
+static const struct nvkm_vmm_desc_func
+nv50_vmm_pgd = {
+ .pde = nv50_vmm_pgd_pde,
+};
+
+const struct nvkm_vmm_desc
+nv50_vmm_desc_12[] = {
+ { PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
+ { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+nv50_vmm_desc_16[] = {
+ { PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
+ { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
+ {}
+};
+
+void
+nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+ struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ int i, id;
+
+ mutex_lock(&vmm->mmu->mutex);
+ for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+ if (!atomic_read(&vmm->engref[i]))
+ continue;
+
+ /* unfortunate hw bug workaround... */
+ if (i == NVKM_ENGINE_GR && device->gr) {
+ int ret = nvkm_gr_tlb_flush(device->gr);
+ if (ret != -ENODEV)
+ continue;
+ }
+
+ switch (i) {
+ case NVKM_ENGINE_GR : id = 0x00; break;
+ case NVKM_ENGINE_VP :
+ case NVKM_ENGINE_MSPDEC: id = 0x01; break;
+ case NVKM_SUBDEV_BAR : id = 0x06; break;
+ case NVKM_ENGINE_MSPPP :
+ case NVKM_ENGINE_MPEG : id = 0x08; break;
+ case NVKM_ENGINE_BSP :
+ case NVKM_ENGINE_MSVLD : id = 0x09; break;
+ case NVKM_ENGINE_CIPHER:
+ case NVKM_ENGINE_SEC : id = 0x0a; break;
+ case NVKM_ENGINE_CE : id = 0x0d; break;
+ default:
+ continue;
+ }
+
+ nvkm_wr32(device, 0x100c80, (id << 16) | 1);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+ break;
+ ) < 0)
+ nvkm_error(subdev, "%s mmu invalidate timeout\n", nvkm_subdev_type[i]);
+ }
+ mutex_unlock(&vmm->mmu->mutex);
+}
+
+int
+nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ const struct nvkm_vmm_page *page = map->page;
+ union {
+ struct nv50_vmm_map_vn vn;
+ struct nv50_vmm_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ struct nvkm_ram *ram = device->fb->ram;
+ struct nvkm_memory *memory = map->memory;
+ u8 aper, kind, kind_inv, comp, priv, ro;
+ int kindn, ret = -ENOSYS;
+ const u8 *kindm;
+
+ map->type = map->ctag = 0;
+ map->next = 1 << page->shift;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ ro = !!args->v0.ro;
+ priv = !!args->v0.priv;
+ kind = args->v0.kind & 0x7f;
+ comp = args->v0.comp & 0x03;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ ro = 0;
+ priv = 0;
+ kind = 0x00;
+ comp = 0;
+ } else {
+ VMM_DEBUG(vmm, "args");
+ return ret;
+ }
+
+ switch (nvkm_memory_target(memory)) {
+ case NVKM_MEM_TARGET_VRAM:
+ if (ram->stolen) {
+ map->type |= ram->stolen;
+ aper = 3;
+ } else {
+ aper = 0;
+ }
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ aper = 2;
+ break;
+ case NVKM_MEM_TARGET_NCOH:
+ aper = 3;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
+ VMM_DEBUG(vmm, "kind %02x", kind);
+ return -EINVAL;
+ }
+
+ if (map->mem && map->mem->type != kindm[kind]) {
+ VMM_DEBUG(vmm, "kind %02x bankswz: %d %d", kind,
+ kindm[kind], map->mem->type);
+ return -EINVAL;
+ }
+
+ if (comp) {
+ u32 tags = (nvkm_memory_size(memory) >> 16) * comp;
+ if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
+ VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
+ return -EINVAL;
+ }
+
+ if (!map->no_comp) {
+ ret = nvkm_memory_tags_get(memory, device, tags, NULL,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
+
+ if (map->tags->mn) {
+ u32 tags = map->tags->mn->offset +
+ (map->offset >> 16);
+ map->ctag |= (u64)comp << 49;
+ map->type |= (u64)comp << 47;
+ map->type |= (u64)tags << 49;
+ map->next |= map->ctag;
+ }
+ }
+ }
+
+ map->type |= BIT(0); /* Valid. */
+ map->type |= (u64)ro << 3;
+ map->type |= (u64)aper << 4;
+ map->type |= (u64)priv << 6;
+ map->type |= (u64)kind << 40;
+ return 0;
+}
+
+void
+nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ struct nvkm_vmm_join *join;
+
+ list_for_each_entry(join, &vmm->join, head) {
+ if (join->inst == inst) {
+ list_del(&join->head);
+ kfree(join);
+ break;
+ }
+ }
+}
+
+int
+nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
+ struct nvkm_vmm_join *join;
+ int ret = 0;
+ u64 data;
+ u32 pdei;
+
+ if (!(join = kmalloc(sizeof(*join), GFP_KERNEL)))
+ return -ENOMEM;
+ join->inst = inst;
+ list_add_tail(&join->head, &vmm->join);
+
+ nvkm_kmap(join->inst);
+ for (pdei = vmm->start >> 29; pdei <= (vmm->limit - 1) >> 29; pdei++) {
+ if (!nv50_vmm_pde(vmm, vmm->pd->pde[pdei], &data)) {
+ ret = -EINVAL;
+ break;
+ }
+ nvkm_wo64(join->inst, pd_offset + (pdei * 8), data);
+ }
+ nvkm_done(join->inst);
+ return ret;
+}
+
+static const struct nvkm_vmm_func
+nv50_vmm = {
+ .join = nv50_vmm_join,
+ .part = nv50_vmm_part,
+ .valid = nv50_vmm_valid,
+ .flush = nv50_vmm_flush,
+ .page_block = 1 << 29,
+ .page = {
+ { 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+int
+nv50_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&nv50_vmm, mmu, 0, managed, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
new file mode 100644
index 0000000000..5a08458fe1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/timer.h>
+
+static void
+tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
+{
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
+
+ type |= 0x00000001; /* PAGE_ALL */
+ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+ type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */
+
+ mutex_lock(&vmm->mmu->mutex);
+
+ nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
+ nvkm_wr32(device, 0xb830a4, 0x00000000);
+ nvkm_wr32(device, 0x100e68, 0x00000000);
+ nvkm_wr32(device, 0xb830b0, 0x80000000 | type);
+
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0xb830b0) & 0x80000000))
+ break;
+ );
+
+ mutex_unlock(&vmm->mmu->mutex);
+}
+
+static const struct nvkm_vmm_func
+tu102_vmm = {
+ .join = gv100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gp100_vmm_valid,
+ .flush = tu102_vmm_flush,
+ .mthd = gp100_vmm_mthd,
+ .page = {
+ { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+ { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+ { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+ { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+ { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+int
+tu102_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gp100_vmm_new_(&tu102_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
+}