diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/gpu/drm/nouveau/nvkm/subdev/instmem | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/instmem')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c | 246 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | 604 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c | 230 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c | 263 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c | 400 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h | 33 |
7 files changed, 1782 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild new file mode 100644 index 000000000..06cbe19ce --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: MIT +nvkm-y += nvkm/subdev/instmem/base.o +nvkm-y += nvkm/subdev/instmem/nv04.o +nvkm-y += nvkm/subdev/instmem/nv40.o +nvkm-y += nvkm/subdev/instmem/nv50.o +nvkm-y += nvkm/subdev/instmem/gk20a.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c new file mode 100644 index 000000000..cd8163a52 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c @@ -0,0 +1,246 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "priv.h" + +#include <subdev/bar.h> + +/****************************************************************************** + * instmem object base implementation + *****************************************************************************/ +static void +nvkm_instobj_load(struct nvkm_instobj *iobj) +{ + struct nvkm_memory *memory = &iobj->memory; + const u64 size = nvkm_memory_size(memory); + void __iomem *map; + int i; + + if (!(map = nvkm_kmap(memory))) { + for (i = 0; i < size; i += 4) + nvkm_wo32(memory, i, iobj->suspend[i / 4]); + } else { + memcpy_toio(map, iobj->suspend, size); + } + nvkm_done(memory); + + kvfree(iobj->suspend); + iobj->suspend = NULL; +} + +static int +nvkm_instobj_save(struct nvkm_instobj *iobj) +{ + struct nvkm_memory *memory = &iobj->memory; + const u64 size = nvkm_memory_size(memory); + void __iomem *map; + int i; + + iobj->suspend = kvmalloc(size, GFP_KERNEL); + if (!iobj->suspend) + return -ENOMEM; + + if (!(map = nvkm_kmap(memory))) { + for (i = 0; i < size; i += 4) + iobj->suspend[i / 4] = nvkm_ro32(memory, i); + } else { + memcpy_fromio(iobj->suspend, map, size); + } + nvkm_done(memory); + return 0; +} + +void +nvkm_instobj_dtor(struct nvkm_instmem *imem, struct nvkm_instobj *iobj) +{ + spin_lock(&imem->lock); + list_del(&iobj->head); + spin_unlock(&imem->lock); +} + +void +nvkm_instobj_ctor(const struct nvkm_memory_func *func, + struct nvkm_instmem *imem, struct nvkm_instobj *iobj) +{ + nvkm_memory_ctor(func, &iobj->memory); + iobj->suspend = NULL; + spin_lock(&imem->lock); + list_add_tail(&iobj->head, &imem->list); + spin_unlock(&imem->lock); +} + +int +nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, + struct nvkm_memory **pmemory) +{ + struct nvkm_subdev *subdev = &imem->subdev; + struct nvkm_memory *memory = NULL; + u32 offset; + int ret; + + ret = imem->func->memory_new(imem, size, align, zero, &memory); + if (ret) { + nvkm_error(subdev, "OOM: %08x %08x %d\n", size, align, ret); + goto done; + } + + nvkm_trace(subdev, "new %08x %08x %d: %010llx %010llx\n", size, align, + zero, nvkm_memory_addr(memory), nvkm_memory_size(memory)); + + if (!imem->func->zero && zero) { + void __iomem *map = nvkm_kmap(memory); + if (unlikely(!map)) { + for (offset = 0; offset < size; offset += 4) + nvkm_wo32(memory, offset, 0x00000000); + } else { + memset_io(map, 0x00, size); + } + nvkm_done(memory); + } + +done: + if (ret) + nvkm_memory_unref(&memory); + *pmemory = memory; + return ret; +} + +/****************************************************************************** + * instmem subdev base implementation + *****************************************************************************/ + +u32 +nvkm_instmem_rd32(struct nvkm_instmem *imem, u32 addr) +{ + return imem->func->rd32(imem, addr); +} + +void +nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data) +{ + return imem->func->wr32(imem, addr, data); +} + +void +nvkm_instmem_boot(struct nvkm_instmem *imem) +{ + /* Separate bootstrapped objects from normal list, as we need + * to make sure they're accessed with the slowpath on suspend + * and resume. + */ + struct nvkm_instobj *iobj, *itmp; + spin_lock(&imem->lock); + list_for_each_entry_safe(iobj, itmp, &imem->list, head) { + list_move_tail(&iobj->head, &imem->boot); + } + spin_unlock(&imem->lock); +} + +static int +nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend) +{ + struct nvkm_instmem *imem = nvkm_instmem(subdev); + struct nvkm_instobj *iobj; + + if (suspend) { + list_for_each_entry(iobj, &imem->list, head) { + int ret = nvkm_instobj_save(iobj); + if (ret) + return ret; + } + + nvkm_bar_bar2_fini(subdev->device); + + list_for_each_entry(iobj, &imem->boot, head) { + int ret = nvkm_instobj_save(iobj); + if (ret) + return ret; + } + } + + if (imem->func->fini) + imem->func->fini(imem); + + return 0; +} + +static int +nvkm_instmem_init(struct nvkm_subdev *subdev) +{ + struct nvkm_instmem *imem = nvkm_instmem(subdev); + struct nvkm_instobj *iobj; + + list_for_each_entry(iobj, &imem->boot, head) { + if (iobj->suspend) + nvkm_instobj_load(iobj); + } + + nvkm_bar_bar2_init(subdev->device); + + list_for_each_entry(iobj, &imem->list, head) { + if (iobj->suspend) + nvkm_instobj_load(iobj); + } + + return 0; +} + +static int +nvkm_instmem_oneinit(struct nvkm_subdev *subdev) +{ + struct nvkm_instmem *imem = nvkm_instmem(subdev); + if (imem->func->oneinit) + return imem->func->oneinit(imem); + return 0; +} + +static void * +nvkm_instmem_dtor(struct nvkm_subdev *subdev) +{ + struct nvkm_instmem *imem = nvkm_instmem(subdev); + void *data = imem; + if (imem->func->dtor) + data = imem->func->dtor(imem); + mutex_destroy(&imem->mutex); + return data; +} + +static const struct nvkm_subdev_func +nvkm_instmem = { + .dtor = nvkm_instmem_dtor, + .oneinit = nvkm_instmem_oneinit, + .init = nvkm_instmem_init, + .fini = nvkm_instmem_fini, +}; + +void +nvkm_instmem_ctor(const struct nvkm_instmem_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_instmem *imem) +{ + nvkm_subdev_ctor(&nvkm_instmem, device, type, inst, &imem->subdev); + imem->func = func; + spin_lock_init(&imem->lock); + INIT_LIST_HEAD(&imem->list); + INIT_LIST_HEAD(&imem->boot); + mutex_init(&imem->mutex); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c new file mode 100644 index 000000000..648ecf5a8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c @@ -0,0 +1,604 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * GK20A does not have dedicated video memory, and to accurately represent this + * fact Nouveau will not create a RAM device for it. Therefore its instmem + * implementation must be done directly on top of system memory, while + * preserving coherency for read and write operations. + * + * Instmem can be allocated through two means: + * 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory + * pages contiguous to the GPU. This is the preferred way. + * 2) If no IOMMU unit is probed, the DMA API is used to allocate physically + * contiguous memory. + * + * In both cases CPU read and writes are performed by creating a write-combined + * mapping. The GPU L2 cache must thus be flushed/invalidated when required. To + * be conservative we do this every time we acquire or release an instobj, but + * ideally L2 management should be handled at a higher level. + * + * To improve performance, CPU mappings are not removed upon instobj release. + * Instead they are placed into a LRU list to be recycled when the mapped space + * goes beyond a certain threshold. At the moment this limit is 1MB. + */ +#include "priv.h" + +#include <core/memory.h> +#include <core/tegra.h> +#include <subdev/ltc.h> +#include <subdev/mmu.h> + +struct gk20a_instobj { + struct nvkm_memory memory; + struct nvkm_mm_node *mn; + struct gk20a_instmem *imem; + + /* CPU mapping */ + u32 *vaddr; +}; +#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory) + +/* + * Used for objects allocated using the DMA API + */ +struct gk20a_instobj_dma { + struct gk20a_instobj base; + + dma_addr_t handle; + struct nvkm_mm_node r; +}; +#define gk20a_instobj_dma(p) \ + container_of(gk20a_instobj(p), struct gk20a_instobj_dma, base) + +/* + * Used for objects flattened using the IOMMU API + */ +struct gk20a_instobj_iommu { + struct gk20a_instobj base; + + /* to link into gk20a_instmem::vaddr_lru */ + struct list_head vaddr_node; + /* how many clients are using vaddr? */ + u32 use_cpt; + + /* will point to the higher half of pages */ + dma_addr_t *dma_addrs; + /* array of base.mem->size pages (+ dma_addr_ts) */ + struct page *pages[]; +}; +#define gk20a_instobj_iommu(p) \ + container_of(gk20a_instobj(p), struct gk20a_instobj_iommu, base) + +struct gk20a_instmem { + struct nvkm_instmem base; + + /* protects vaddr_* and gk20a_instobj::vaddr* */ + struct mutex lock; + + /* CPU mappings LRU */ + unsigned int vaddr_use; + unsigned int vaddr_max; + struct list_head vaddr_lru; + + /* Only used if IOMMU if present */ + struct mutex *mm_mutex; + struct nvkm_mm *mm; + struct iommu_domain *domain; + unsigned long iommu_pgshift; + u16 iommu_bit; + + /* Only used by DMA API */ + unsigned long attrs; +}; +#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base) + +static enum nvkm_memory_target +gk20a_instobj_target(struct nvkm_memory *memory) +{ + return NVKM_MEM_TARGET_NCOH; +} + +static u8 +gk20a_instobj_page(struct nvkm_memory *memory) +{ + return 12; +} + +static u64 +gk20a_instobj_addr(struct nvkm_memory *memory) +{ + return (u64)gk20a_instobj(memory)->mn->offset << 12; +} + +static u64 +gk20a_instobj_size(struct nvkm_memory *memory) +{ + return (u64)gk20a_instobj(memory)->mn->length << 12; +} + +/* + * Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held. + */ +static void +gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj) +{ + struct gk20a_instmem *imem = obj->base.imem; + /* there should not be any user left... */ + WARN_ON(obj->use_cpt); + list_del(&obj->vaddr_node); + vunmap(obj->base.vaddr); + obj->base.vaddr = NULL; + imem->vaddr_use -= nvkm_memory_size(&obj->base.memory); + nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use, + imem->vaddr_max); +} + +/* + * Must be called while holding gk20a_instmem::lock + */ +static void +gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size) +{ + while (imem->vaddr_use + size > imem->vaddr_max) { + /* no candidate that can be unmapped, abort... */ + if (list_empty(&imem->vaddr_lru)) + break; + + gk20a_instobj_iommu_recycle_vaddr( + list_first_entry(&imem->vaddr_lru, + struct gk20a_instobj_iommu, vaddr_node)); + } +} + +static void __iomem * +gk20a_instobj_acquire_dma(struct nvkm_memory *memory) +{ + struct gk20a_instobj *node = gk20a_instobj(memory); + struct gk20a_instmem *imem = node->imem; + struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; + + nvkm_ltc_flush(ltc); + + return node->vaddr; +} + +static void __iomem * +gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) +{ + struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); + struct gk20a_instmem *imem = node->base.imem; + struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; + const u64 size = nvkm_memory_size(memory); + + nvkm_ltc_flush(ltc); + + mutex_lock(&imem->lock); + + if (node->base.vaddr) { + if (!node->use_cpt) { + /* remove from LRU list since mapping in use again */ + list_del(&node->vaddr_node); + } + goto out; + } + + /* try to free some address space if we reached the limit */ + gk20a_instmem_vaddr_gc(imem, size); + + /* map the pages */ + node->base.vaddr = vmap(node->pages, size >> PAGE_SHIFT, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + if (!node->base.vaddr) { + nvkm_error(&imem->base.subdev, "cannot map instobj - " + "this is not going to end well...\n"); + goto out; + } + + imem->vaddr_use += size; + nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", + imem->vaddr_use, imem->vaddr_max); + +out: + node->use_cpt++; + mutex_unlock(&imem->lock); + + return node->base.vaddr; +} + +static void +gk20a_instobj_release_dma(struct nvkm_memory *memory) +{ + struct gk20a_instobj *node = gk20a_instobj(memory); + struct gk20a_instmem *imem = node->imem; + struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; + + /* in case we got a write-combined mapping */ + wmb(); + nvkm_ltc_invalidate(ltc); +} + +static void +gk20a_instobj_release_iommu(struct nvkm_memory *memory) +{ + struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); + struct gk20a_instmem *imem = node->base.imem; + struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; + + mutex_lock(&imem->lock); + + /* we should at least have one user to release... */ + if (WARN_ON(node->use_cpt == 0)) + goto out; + + /* add unused objs to the LRU list to recycle their mapping */ + if (--node->use_cpt == 0) + list_add_tail(&node->vaddr_node, &imem->vaddr_lru); + +out: + mutex_unlock(&imem->lock); + + wmb(); + nvkm_ltc_invalidate(ltc); +} + +static u32 +gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset) +{ + struct gk20a_instobj *node = gk20a_instobj(memory); + + return node->vaddr[offset / 4]; +} + +static void +gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) +{ + struct gk20a_instobj *node = gk20a_instobj(memory); + + node->vaddr[offset / 4] = data; +} + +static int +gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, + struct nvkm_vma *vma, void *argv, u32 argc) +{ + struct gk20a_instobj *node = gk20a_instobj(memory); + struct nvkm_vmm_map map = { + .memory = &node->memory, + .offset = offset, + .mem = node->mn, + }; + + return nvkm_vmm_map(vmm, vma, argv, argc, &map); +} + +static void * +gk20a_instobj_dtor_dma(struct nvkm_memory *memory) +{ + struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); + struct gk20a_instmem *imem = node->base.imem; + struct device *dev = imem->base.subdev.device->dev; + + if (unlikely(!node->base.vaddr)) + goto out; + + dma_free_attrs(dev, (u64)node->base.mn->length << PAGE_SHIFT, + node->base.vaddr, node->handle, imem->attrs); + +out: + return node; +} + +static void * +gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) +{ + struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); + struct gk20a_instmem *imem = node->base.imem; + struct device *dev = imem->base.subdev.device->dev; + struct nvkm_mm_node *r = node->base.mn; + int i; + + if (unlikely(!r)) + goto out; + + mutex_lock(&imem->lock); + + /* vaddr has already been recycled */ + if (node->base.vaddr) + gk20a_instobj_iommu_recycle_vaddr(node); + + mutex_unlock(&imem->lock); + + /* clear IOMMU bit to unmap pages */ + r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); + + /* Unmap pages from GPU address space and free them */ + for (i = 0; i < node->base.mn->length; i++) { + iommu_unmap(imem->domain, + (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE); + dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE, + DMA_BIDIRECTIONAL); + __free_page(node->pages[i]); + } + + /* Release area from GPU address space */ + mutex_lock(imem->mm_mutex); + nvkm_mm_free(imem->mm, &r); + mutex_unlock(imem->mm_mutex); + +out: + return node; +} + +static const struct nvkm_memory_func +gk20a_instobj_func_dma = { + .dtor = gk20a_instobj_dtor_dma, + .target = gk20a_instobj_target, + .page = gk20a_instobj_page, + .addr = gk20a_instobj_addr, + .size = gk20a_instobj_size, + .acquire = gk20a_instobj_acquire_dma, + .release = gk20a_instobj_release_dma, + .map = gk20a_instobj_map, +}; + +static const struct nvkm_memory_func +gk20a_instobj_func_iommu = { + .dtor = gk20a_instobj_dtor_iommu, + .target = gk20a_instobj_target, + .page = gk20a_instobj_page, + .addr = gk20a_instobj_addr, + .size = gk20a_instobj_size, + .acquire = gk20a_instobj_acquire_iommu, + .release = gk20a_instobj_release_iommu, + .map = gk20a_instobj_map, +}; + +static const struct nvkm_memory_ptrs +gk20a_instobj_ptrs = { + .rd32 = gk20a_instobj_rd32, + .wr32 = gk20a_instobj_wr32, +}; + +static int +gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, + struct gk20a_instobj **_node) +{ + struct gk20a_instobj_dma *node; + struct nvkm_subdev *subdev = &imem->base.subdev; + struct device *dev = subdev->device->dev; + + if (!(node = kzalloc(sizeof(*node), GFP_KERNEL))) + return -ENOMEM; + *_node = &node->base; + + nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory); + node->base.memory.ptrs = &gk20a_instobj_ptrs; + + node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, + &node->handle, GFP_KERNEL, + imem->attrs); + if (!node->base.vaddr) { + nvkm_error(subdev, "cannot allocate DMA memory\n"); + return -ENOMEM; + } + + /* alignment check */ + if (unlikely(node->handle & (align - 1))) + nvkm_warn(subdev, + "memory not aligned as requested: %pad (0x%x)\n", + &node->handle, align); + + /* present memory for being mapped using small pages */ + node->r.type = 12; + node->r.offset = node->handle >> 12; + node->r.length = (npages << PAGE_SHIFT) >> 12; + + node->base.mn = &node->r; + return 0; +} + +static int +gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, + struct gk20a_instobj **_node) +{ + struct gk20a_instobj_iommu *node; + struct nvkm_subdev *subdev = &imem->base.subdev; + struct device *dev = subdev->device->dev; + struct nvkm_mm_node *r; + int ret; + int i; + + /* + * despite their variable size, instmem allocations are small enough + * (< 1 page) to be handled by kzalloc + */ + if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) + + sizeof(*node->dma_addrs)) * npages), GFP_KERNEL))) + return -ENOMEM; + *_node = &node->base; + node->dma_addrs = (void *)(node->pages + npages); + + nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory); + node->base.memory.ptrs = &gk20a_instobj_ptrs; + + /* Allocate backing memory */ + for (i = 0; i < npages; i++) { + struct page *p = alloc_page(GFP_KERNEL); + dma_addr_t dma_adr; + + if (p == NULL) { + ret = -ENOMEM; + goto free_pages; + } + node->pages[i] = p; + dma_adr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, dma_adr)) { + nvkm_error(subdev, "DMA mapping error!\n"); + ret = -ENOMEM; + goto free_pages; + } + node->dma_addrs[i] = dma_adr; + } + + mutex_lock(imem->mm_mutex); + /* Reserve area from GPU address space */ + ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages, + align >> imem->iommu_pgshift, &r); + mutex_unlock(imem->mm_mutex); + if (ret) { + nvkm_error(subdev, "IOMMU space is full!\n"); + goto free_pages; + } + + /* Map into GPU address space */ + for (i = 0; i < npages; i++) { + u32 offset = (r->offset + i) << imem->iommu_pgshift; + + ret = iommu_map(imem->domain, offset, node->dma_addrs[i], + PAGE_SIZE, IOMMU_READ | IOMMU_WRITE); + if (ret < 0) { + nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret); + + while (i-- > 0) { + offset -= PAGE_SIZE; + iommu_unmap(imem->domain, offset, PAGE_SIZE); + } + goto release_area; + } + } + + /* IOMMU bit tells that an address is to be resolved through the IOMMU */ + r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift); + + node->base.mn = r; + return 0; + +release_area: + mutex_lock(imem->mm_mutex); + nvkm_mm_free(imem->mm, &r); + mutex_unlock(imem->mm_mutex); + +free_pages: + for (i = 0; i < npages && node->pages[i] != NULL; i++) { + dma_addr_t dma_addr = node->dma_addrs[i]; + if (dma_addr) + dma_unmap_page(dev, dma_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL); + __free_page(node->pages[i]); + } + + return ret; +} + +static int +gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, + struct nvkm_memory **pmemory) +{ + struct gk20a_instmem *imem = gk20a_instmem(base); + struct nvkm_subdev *subdev = &imem->base.subdev; + struct gk20a_instobj *node = NULL; + int ret; + + nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__, + imem->domain ? "IOMMU" : "DMA", size, align); + + /* Round size and align to page bounds */ + size = max(roundup(size, PAGE_SIZE), PAGE_SIZE); + align = max(roundup(align, PAGE_SIZE), PAGE_SIZE); + + if (imem->domain) + ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT, + align, &node); + else + ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT, + align, &node); + *pmemory = node ? &node->memory : NULL; + if (ret) + return ret; + + node->imem = imem; + + nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n", + size, align, (u64)node->mn->offset << 12); + + return 0; +} + +static void * +gk20a_instmem_dtor(struct nvkm_instmem *base) +{ + struct gk20a_instmem *imem = gk20a_instmem(base); + + /* perform some sanity checks... */ + if (!list_empty(&imem->vaddr_lru)) + nvkm_warn(&base->subdev, "instobj LRU not empty!\n"); + + if (imem->vaddr_use != 0) + nvkm_warn(&base->subdev, "instobj vmap area not empty! " + "0x%x bytes still mapped\n", imem->vaddr_use); + + return imem; +} + +static const struct nvkm_instmem_func +gk20a_instmem = { + .dtor = gk20a_instmem_dtor, + .memory_new = gk20a_instobj_new, + .zero = false, +}; + +int +gk20a_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_instmem **pimem) +{ + struct nvkm_device_tegra *tdev = device->func->tegra(device); + struct gk20a_instmem *imem; + + if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) + return -ENOMEM; + nvkm_instmem_ctor(&gk20a_instmem, device, type, inst, &imem->base); + mutex_init(&imem->lock); + *pimem = &imem->base; + + /* do not allow more than 1MB of CPU-mapped instmem */ + imem->vaddr_use = 0; + imem->vaddr_max = 0x100000; + INIT_LIST_HEAD(&imem->vaddr_lru); + + if (tdev->iommu.domain) { + imem->mm_mutex = &tdev->iommu.mutex; + imem->mm = &tdev->iommu.mm; + imem->domain = tdev->iommu.domain; + imem->iommu_pgshift = tdev->iommu.pgshift; + imem->iommu_bit = tdev->func->iommu_bit; + + nvkm_info(&imem->base.subdev, "using IOMMU\n"); + } else { + imem->attrs = DMA_ATTR_WEAK_ORDERING | + DMA_ATTR_WRITE_COMBINE; + + nvkm_info(&imem->base.subdev, "using DMA API\n"); + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c new file mode 100644 index 000000000..25603b01d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c @@ -0,0 +1,230 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#define nv04_instmem(p) container_of((p), struct nv04_instmem, base) +#include "priv.h" + +#include <core/ramht.h> + +struct nv04_instmem { + struct nvkm_instmem base; + struct nvkm_mm heap; +}; + +/****************************************************************************** + * instmem object implementation + *****************************************************************************/ +#define nv04_instobj(p) container_of((p), struct nv04_instobj, base.memory) + +struct nv04_instobj { + struct nvkm_instobj base; + struct nv04_instmem *imem; + struct nvkm_mm_node *node; +}; + +static void +nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) +{ + struct nv04_instobj *iobj = nv04_instobj(memory); + struct nvkm_device *device = iobj->imem->base.subdev.device; + nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data); +} + +static u32 +nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset) +{ + struct nv04_instobj *iobj = nv04_instobj(memory); + struct nvkm_device *device = iobj->imem->base.subdev.device; + return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset); +} + +static const struct nvkm_memory_ptrs +nv04_instobj_ptrs = { + .rd32 = nv04_instobj_rd32, + .wr32 = nv04_instobj_wr32, +}; + +static void +nv04_instobj_release(struct nvkm_memory *memory) +{ +} + +static void __iomem * +nv04_instobj_acquire(struct nvkm_memory *memory) +{ + struct nv04_instobj *iobj = nv04_instobj(memory); + struct nvkm_device *device = iobj->imem->base.subdev.device; + return device->pri + 0x700000 + iobj->node->offset; +} + +static u64 +nv04_instobj_size(struct nvkm_memory *memory) +{ + return nv04_instobj(memory)->node->length; +} + +static u64 +nv04_instobj_addr(struct nvkm_memory *memory) +{ + return nv04_instobj(memory)->node->offset; +} + +static enum nvkm_memory_target +nv04_instobj_target(struct nvkm_memory *memory) +{ + return NVKM_MEM_TARGET_INST; +} + +static void * +nv04_instobj_dtor(struct nvkm_memory *memory) +{ + struct nv04_instobj *iobj = nv04_instobj(memory); + mutex_lock(&iobj->imem->base.mutex); + nvkm_mm_free(&iobj->imem->heap, &iobj->node); + mutex_unlock(&iobj->imem->base.mutex); + nvkm_instobj_dtor(&iobj->imem->base, &iobj->base); + return iobj; +} + +static const struct nvkm_memory_func +nv04_instobj_func = { + .dtor = nv04_instobj_dtor, + .target = nv04_instobj_target, + .size = nv04_instobj_size, + .addr = nv04_instobj_addr, + .acquire = nv04_instobj_acquire, + .release = nv04_instobj_release, +}; + +static int +nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, + struct nvkm_memory **pmemory) +{ + struct nv04_instmem *imem = nv04_instmem(base); + struct nv04_instobj *iobj; + int ret; + + if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) + return -ENOMEM; + *pmemory = &iobj->base.memory; + + nvkm_instobj_ctor(&nv04_instobj_func, &imem->base, &iobj->base); + iobj->base.memory.ptrs = &nv04_instobj_ptrs; + iobj->imem = imem; + + mutex_lock(&imem->base.mutex); + ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node); + mutex_unlock(&imem->base.mutex); + return ret; +} + +/****************************************************************************** + * instmem subdev implementation + *****************************************************************************/ + +static u32 +nv04_instmem_rd32(struct nvkm_instmem *imem, u32 addr) +{ + return nvkm_rd32(imem->subdev.device, 0x700000 + addr); +} + +static void +nv04_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data) +{ + nvkm_wr32(imem->subdev.device, 0x700000 + addr, data); +} + +static int +nv04_instmem_oneinit(struct nvkm_instmem *base) +{ + struct nv04_instmem *imem = nv04_instmem(base); + struct nvkm_device *device = imem->base.subdev.device; + int ret; + + /* PRAMIN aperture maps over the end of VRAM, reserve it */ + imem->base.reserved = 512 * 1024; + + ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1); + if (ret) + return ret; + + /* 0x00000-0x10000: reserve for probable vbios image */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false, + &imem->base.vbios); + if (ret) + return ret; + + /* 0x10000-0x18000: reserve for RAMHT */ + ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht); + if (ret) + return ret; + + /* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00800, 0, true, + &imem->base.ramfc); + if (ret) + return ret; + + /* 0x18800-0x18a00: reserve for RAMRO */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00200, 0, false, + &imem->base.ramro); + if (ret) + return ret; + + return 0; +} + +static void * +nv04_instmem_dtor(struct nvkm_instmem *base) +{ + struct nv04_instmem *imem = nv04_instmem(base); + nvkm_memory_unref(&imem->base.ramfc); + nvkm_memory_unref(&imem->base.ramro); + nvkm_ramht_del(&imem->base.ramht); + nvkm_memory_unref(&imem->base.vbios); + nvkm_mm_fini(&imem->heap); + return imem; +} + +static const struct nvkm_instmem_func +nv04_instmem = { + .dtor = nv04_instmem_dtor, + .oneinit = nv04_instmem_oneinit, + .rd32 = nv04_instmem_rd32, + .wr32 = nv04_instmem_wr32, + .memory_new = nv04_instobj_new, + .zero = false, +}; + +int +nv04_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_instmem **pimem) +{ + struct nv04_instmem *imem; + + if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) + return -ENOMEM; + nvkm_instmem_ctor(&nv04_instmem, device, type, inst, &imem->base); + *pimem = &imem->base; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c new file mode 100644 index 000000000..6b462f960 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c @@ -0,0 +1,263 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#define nv40_instmem(p) container_of((p), struct nv40_instmem, base) +#include "priv.h" + +#include <core/ramht.h> +#include <engine/gr/nv40.h> + +struct nv40_instmem { + struct nvkm_instmem base; + struct nvkm_mm heap; + void __iomem *iomem; +}; + +/****************************************************************************** + * instmem object implementation + *****************************************************************************/ +#define nv40_instobj(p) container_of((p), struct nv40_instobj, base.memory) + +struct nv40_instobj { + struct nvkm_instobj base; + struct nv40_instmem *imem; + struct nvkm_mm_node *node; +}; + +static void +nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) +{ + struct nv40_instobj *iobj = nv40_instobj(memory); + iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset); +} + +static u32 +nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset) +{ + struct nv40_instobj *iobj = nv40_instobj(memory); + return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset); +} + +static const struct nvkm_memory_ptrs +nv40_instobj_ptrs = { + .rd32 = nv40_instobj_rd32, + .wr32 = nv40_instobj_wr32, +}; + +static void +nv40_instobj_release(struct nvkm_memory *memory) +{ + wmb(); +} + +static void __iomem * +nv40_instobj_acquire(struct nvkm_memory *memory) +{ + struct nv40_instobj *iobj = nv40_instobj(memory); + return iobj->imem->iomem + iobj->node->offset; +} + +static u64 +nv40_instobj_size(struct nvkm_memory *memory) +{ + return nv40_instobj(memory)->node->length; +} + +static u64 +nv40_instobj_addr(struct nvkm_memory *memory) +{ + return nv40_instobj(memory)->node->offset; +} + +static enum nvkm_memory_target +nv40_instobj_target(struct nvkm_memory *memory) +{ + return NVKM_MEM_TARGET_INST; +} + +static void * +nv40_instobj_dtor(struct nvkm_memory *memory) +{ + struct nv40_instobj *iobj = nv40_instobj(memory); + mutex_lock(&iobj->imem->base.mutex); + nvkm_mm_free(&iobj->imem->heap, &iobj->node); + mutex_unlock(&iobj->imem->base.mutex); + nvkm_instobj_dtor(&iobj->imem->base, &iobj->base); + return iobj; +} + +static const struct nvkm_memory_func +nv40_instobj_func = { + .dtor = nv40_instobj_dtor, + .target = nv40_instobj_target, + .size = nv40_instobj_size, + .addr = nv40_instobj_addr, + .acquire = nv40_instobj_acquire, + .release = nv40_instobj_release, +}; + +static int +nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, + struct nvkm_memory **pmemory) +{ + struct nv40_instmem *imem = nv40_instmem(base); + struct nv40_instobj *iobj; + int ret; + + if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) + return -ENOMEM; + *pmemory = &iobj->base.memory; + + nvkm_instobj_ctor(&nv40_instobj_func, &imem->base, &iobj->base); + iobj->base.memory.ptrs = &nv40_instobj_ptrs; + iobj->imem = imem; + + mutex_lock(&imem->base.mutex); + ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node); + mutex_unlock(&imem->base.mutex); + return ret; +} + +/****************************************************************************** + * instmem subdev implementation + *****************************************************************************/ + +static u32 +nv40_instmem_rd32(struct nvkm_instmem *base, u32 addr) +{ + return ioread32_native(nv40_instmem(base)->iomem + addr); +} + +static void +nv40_instmem_wr32(struct nvkm_instmem *base, u32 addr, u32 data) +{ + iowrite32_native(data, nv40_instmem(base)->iomem + addr); +} + +static int +nv40_instmem_oneinit(struct nvkm_instmem *base) +{ + struct nv40_instmem *imem = nv40_instmem(base); + struct nvkm_device *device = imem->base.subdev.device; + int ret, vs; + + /* PRAMIN aperture maps over the end of vram, reserve enough space + * to fit graphics contexts for every channel, the magics come + * from engine/gr/nv40.c + */ + vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8); + if (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs; + else if (device->chipset < 0x43) imem->base.reserved = 0x4f00 * vs; + else if (nv44_gr_class(device)) imem->base.reserved = 0x4980 * vs; + else imem->base.reserved = 0x4a40 * vs; + imem->base.reserved += 16 * 1024; + imem->base.reserved *= 32; /* per-channel */ + imem->base.reserved += 512 * 1024; /* pci(e)gart table */ + imem->base.reserved += 512 * 1024; /* object storage */ + imem->base.reserved = round_up(imem->base.reserved, 4096); + + ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1); + if (ret) + return ret; + + /* 0x00000-0x10000: reserve for probable vbios image */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false, + &imem->base.vbios); + if (ret) + return ret; + + /* 0x10000-0x18000: reserve for RAMHT */ + ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht); + if (ret) + return ret; + + /* 0x18000-0x18200: reserve for RAMRO + * 0x18200-0x20000: padding + */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x08000, 0, false, + &imem->base.ramro); + if (ret) + return ret; + + /* 0x20000-0x21000: reserve for RAMFC + * 0x21000-0x40000: padding and some unknown crap + */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x20000, 0, true, + &imem->base.ramfc); + if (ret) + return ret; + + return 0; +} + +static void * +nv40_instmem_dtor(struct nvkm_instmem *base) +{ + struct nv40_instmem *imem = nv40_instmem(base); + nvkm_memory_unref(&imem->base.ramfc); + nvkm_memory_unref(&imem->base.ramro); + nvkm_ramht_del(&imem->base.ramht); + nvkm_memory_unref(&imem->base.vbios); + nvkm_mm_fini(&imem->heap); + if (imem->iomem) + iounmap(imem->iomem); + return imem; +} + +static const struct nvkm_instmem_func +nv40_instmem = { + .dtor = nv40_instmem_dtor, + .oneinit = nv40_instmem_oneinit, + .rd32 = nv40_instmem_rd32, + .wr32 = nv40_instmem_wr32, + .memory_new = nv40_instobj_new, + .zero = false, +}; + +int +nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_instmem **pimem) +{ + struct nv40_instmem *imem; + int bar; + + if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) + return -ENOMEM; + nvkm_instmem_ctor(&nv40_instmem, device, type, inst, &imem->base); + *pimem = &imem->base; + + /* map bar */ + if (device->func->resource_size(device, 2)) + bar = 2; + else + bar = 3; + + imem->iomem = ioremap_wc(device->func->resource_addr(device, bar), + device->func->resource_size(device, bar)); + if (!imem->iomem) { + nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n"); + return -EFAULT; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c new file mode 100644 index 000000000..c51bac761 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c @@ -0,0 +1,400 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#define nv50_instmem(p) container_of((p), struct nv50_instmem, base) +#include "priv.h" + +#include <core/memory.h> +#include <subdev/bar.h> +#include <subdev/fb.h> +#include <subdev/mmu.h> + +struct nv50_instmem { + struct nvkm_instmem base; + u64 addr; + + /* Mappings that can be evicted when BAR2 space has been exhausted. */ + struct list_head lru; +}; + +/****************************************************************************** + * instmem object implementation + *****************************************************************************/ +#define nv50_instobj(p) container_of((p), struct nv50_instobj, base.memory) + +struct nv50_instobj { + struct nvkm_instobj base; + struct nv50_instmem *imem; + struct nvkm_memory *ram; + struct nvkm_vma *bar; + refcount_t maps; + void *map; + struct list_head lru; +}; + +static void +nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data) +{ + struct nv50_instobj *iobj = nv50_instobj(memory); + struct nv50_instmem *imem = iobj->imem; + struct nvkm_device *device = imem->base.subdev.device; + u64 base = (nvkm_memory_addr(iobj->ram) + offset) & 0xffffff00000ULL; + u64 addr = (nvkm_memory_addr(iobj->ram) + offset) & 0x000000fffffULL; + unsigned long flags; + + spin_lock_irqsave(&imem->base.lock, flags); + if (unlikely(imem->addr != base)) { + nvkm_wr32(device, 0x001700, base >> 16); + imem->addr = base; + } + nvkm_wr32(device, 0x700000 + addr, data); + spin_unlock_irqrestore(&imem->base.lock, flags); +} + +static u32 +nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset) +{ + struct nv50_instobj *iobj = nv50_instobj(memory); + struct nv50_instmem *imem = iobj->imem; + struct nvkm_device *device = imem->base.subdev.device; + u64 base = (nvkm_memory_addr(iobj->ram) + offset) & 0xffffff00000ULL; + u64 addr = (nvkm_memory_addr(iobj->ram) + offset) & 0x000000fffffULL; + u32 data; + unsigned long flags; + + spin_lock_irqsave(&imem->base.lock, flags); + if (unlikely(imem->addr != base)) { + nvkm_wr32(device, 0x001700, base >> 16); + imem->addr = base; + } + data = nvkm_rd32(device, 0x700000 + addr); + spin_unlock_irqrestore(&imem->base.lock, flags); + return data; +} + +static const struct nvkm_memory_ptrs +nv50_instobj_slow = { + .rd32 = nv50_instobj_rd32_slow, + .wr32 = nv50_instobj_wr32_slow, +}; + +static void +nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) +{ + iowrite32_native(data, nv50_instobj(memory)->map + offset); +} + +static u32 +nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset) +{ + return ioread32_native(nv50_instobj(memory)->map + offset); +} + +static const struct nvkm_memory_ptrs +nv50_instobj_fast = { + .rd32 = nv50_instobj_rd32, + .wr32 = nv50_instobj_wr32, +}; + +static void +nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) +{ + struct nv50_instmem *imem = iobj->imem; + struct nv50_instobj *eobj; + struct nvkm_memory *memory = &iobj->base.memory; + struct nvkm_subdev *subdev = &imem->base.subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_vma *bar = NULL, *ebar; + u64 size = nvkm_memory_size(memory); + void *emap; + int ret; + + /* Attempt to allocate BAR2 address-space and map the object + * into it. The lock has to be dropped while doing this due + * to the possibility of recursion for page table allocation. + */ + mutex_unlock(&imem->base.mutex); + while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) { + /* Evict unused mappings, and keep retrying until we either + * succeed,or there's no more objects left on the LRU. + */ + mutex_lock(&imem->base.mutex); + eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru); + if (eobj) { + nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n", + nvkm_memory_addr(&eobj->base.memory), + nvkm_memory_size(&eobj->base.memory), + eobj->bar->addr); + list_del_init(&eobj->lru); + ebar = eobj->bar; + eobj->bar = NULL; + emap = eobj->map; + eobj->map = NULL; + } + mutex_unlock(&imem->base.mutex); + if (!eobj) + break; + iounmap(emap); + nvkm_vmm_put(vmm, &ebar); + } + + if (ret == 0) + ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0); + mutex_lock(&imem->base.mutex); + if (ret || iobj->bar) { + /* We either failed, or another thread beat us. */ + mutex_unlock(&imem->base.mutex); + nvkm_vmm_put(vmm, &bar); + mutex_lock(&imem->base.mutex); + return; + } + + /* Make the mapping visible to the host. */ + iobj->bar = bar; + iobj->map = ioremap_wc(device->func->resource_addr(device, 3) + + (u32)iobj->bar->addr, size); + if (!iobj->map) { + nvkm_warn(subdev, "PRAMIN ioremap failed\n"); + nvkm_vmm_put(vmm, &iobj->bar); + } +} + +static int +nv50_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, + struct nvkm_vma *vma, void *argv, u32 argc) +{ + memory = nv50_instobj(memory)->ram; + return nvkm_memory_map(memory, offset, vmm, vma, argv, argc); +} + +static void +nv50_instobj_release(struct nvkm_memory *memory) +{ + struct nv50_instobj *iobj = nv50_instobj(memory); + struct nv50_instmem *imem = iobj->imem; + struct nvkm_subdev *subdev = &imem->base.subdev; + + wmb(); + nvkm_bar_flush(subdev->device->bar); + + if (refcount_dec_and_mutex_lock(&iobj->maps, &imem->base.mutex)) { + /* Add the now-unused mapping to the LRU instead of directly + * unmapping it here, in case we need to map it again later. + */ + if (likely(iobj->lru.next) && iobj->map) { + BUG_ON(!list_empty(&iobj->lru)); + list_add_tail(&iobj->lru, &imem->lru); + } + + /* Switch back to NULL accessors when last map is gone. */ + iobj->base.memory.ptrs = NULL; + mutex_unlock(&imem->base.mutex); + } +} + +static void __iomem * +nv50_instobj_acquire(struct nvkm_memory *memory) +{ + struct nv50_instobj *iobj = nv50_instobj(memory); + struct nvkm_instmem *imem = &iobj->imem->base; + struct nvkm_vmm *vmm; + void __iomem *map = NULL; + + /* Already mapped? */ + if (refcount_inc_not_zero(&iobj->maps)) + return iobj->map; + + /* Take the lock, and re-check that another thread hasn't + * already mapped the object in the meantime. + */ + mutex_lock(&imem->mutex); + if (refcount_inc_not_zero(&iobj->maps)) { + mutex_unlock(&imem->mutex); + return iobj->map; + } + + /* Attempt to get a direct CPU mapping of the object. */ + if ((vmm = nvkm_bar_bar2_vmm(imem->subdev.device))) { + if (!iobj->map) + nv50_instobj_kmap(iobj, vmm); + map = iobj->map; + } + + if (!refcount_inc_not_zero(&iobj->maps)) { + /* Exclude object from eviction while it's being accessed. */ + if (likely(iobj->lru.next)) + list_del_init(&iobj->lru); + + if (map) + iobj->base.memory.ptrs = &nv50_instobj_fast; + else + iobj->base.memory.ptrs = &nv50_instobj_slow; + refcount_set(&iobj->maps, 1); + } + + mutex_unlock(&imem->mutex); + return map; +} + +static void +nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm) +{ + struct nv50_instobj *iobj = nv50_instobj(memory); + struct nvkm_instmem *imem = &iobj->imem->base; + + /* Exclude bootstrapped objects (ie. the page tables for the + * instmem BAR itself) from eviction. + */ + mutex_lock(&imem->mutex); + if (likely(iobj->lru.next)) { + list_del_init(&iobj->lru); + iobj->lru.next = NULL; + } + + nv50_instobj_kmap(iobj, vmm); + nvkm_instmem_boot(imem); + mutex_unlock(&imem->mutex); +} + +static u64 +nv50_instobj_size(struct nvkm_memory *memory) +{ + return nvkm_memory_size(nv50_instobj(memory)->ram); +} + +static u64 +nv50_instobj_addr(struct nvkm_memory *memory) +{ + return nvkm_memory_addr(nv50_instobj(memory)->ram); +} + +static u64 +nv50_instobj_bar2(struct nvkm_memory *memory) +{ + struct nv50_instobj *iobj = nv50_instobj(memory); + u64 addr = ~0ULL; + if (nv50_instobj_acquire(&iobj->base.memory)) { + iobj->lru.next = NULL; /* Exclude from eviction. */ + addr = iobj->bar->addr; + } + nv50_instobj_release(&iobj->base.memory); + return addr; +} + +static enum nvkm_memory_target +nv50_instobj_target(struct nvkm_memory *memory) +{ + return nvkm_memory_target(nv50_instobj(memory)->ram); +} + +static void * +nv50_instobj_dtor(struct nvkm_memory *memory) +{ + struct nv50_instobj *iobj = nv50_instobj(memory); + struct nvkm_instmem *imem = &iobj->imem->base; + struct nvkm_vma *bar; + void *map; + + mutex_lock(&imem->mutex); + if (likely(iobj->lru.next)) + list_del(&iobj->lru); + map = iobj->map; + bar = iobj->bar; + mutex_unlock(&imem->mutex); + + if (map) { + struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device); + iounmap(map); + if (likely(vmm)) /* Can be NULL during BAR destructor. */ + nvkm_vmm_put(vmm, &bar); + } + + nvkm_memory_unref(&iobj->ram); + nvkm_instobj_dtor(imem, &iobj->base); + return iobj; +} + +static const struct nvkm_memory_func +nv50_instobj_func = { + .dtor = nv50_instobj_dtor, + .target = nv50_instobj_target, + .bar2 = nv50_instobj_bar2, + .addr = nv50_instobj_addr, + .size = nv50_instobj_size, + .boot = nv50_instobj_boot, + .acquire = nv50_instobj_acquire, + .release = nv50_instobj_release, + .map = nv50_instobj_map, +}; + +static int +nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, + struct nvkm_memory **pmemory) +{ + struct nv50_instmem *imem = nv50_instmem(base); + struct nv50_instobj *iobj; + struct nvkm_device *device = imem->base.subdev.device; + u8 page = max(order_base_2(align), 12); + + if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) + return -ENOMEM; + *pmemory = &iobj->base.memory; + + nvkm_instobj_ctor(&nv50_instobj_func, &imem->base, &iobj->base); + iobj->imem = imem; + refcount_set(&iobj->maps, 0); + INIT_LIST_HEAD(&iobj->lru); + + return nvkm_ram_get(device, 0, 1, page, size, true, true, &iobj->ram); +} + +/****************************************************************************** + * instmem subdev implementation + *****************************************************************************/ + +static void +nv50_instmem_fini(struct nvkm_instmem *base) +{ + nv50_instmem(base)->addr = ~0ULL; +} + +static const struct nvkm_instmem_func +nv50_instmem = { + .fini = nv50_instmem_fini, + .memory_new = nv50_instobj_new, + .zero = false, +}; + +int +nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_instmem **pimem) +{ + struct nv50_instmem *imem; + + if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) + return -ENOMEM; + nvkm_instmem_ctor(&nv50_instmem, device, type, inst, &imem->base); + INIT_LIST_HEAD(&imem->lru); + *pimem = &imem->base; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h new file mode 100644 index 000000000..56c15e30a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_INSTMEM_PRIV_H__ +#define __NVKM_INSTMEM_PRIV_H__ +#define nvkm_instmem(p) container_of((p), struct nvkm_instmem, subdev) +#include <subdev/instmem.h> + +struct nvkm_instmem_func { + void *(*dtor)(struct nvkm_instmem *); + int (*oneinit)(struct nvkm_instmem *); + void (*fini)(struct nvkm_instmem *); + u32 (*rd32)(struct nvkm_instmem *, u32 addr); + void (*wr32)(struct nvkm_instmem *, u32 addr, u32 data); + int (*memory_new)(struct nvkm_instmem *, u32 size, u32 align, + bool zero, struct nvkm_memory **); + bool zero; +}; + +void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *, + enum nvkm_subdev_type, int, struct nvkm_instmem *); +void nvkm_instmem_boot(struct nvkm_instmem *); + +#include <core/memory.h> + +struct nvkm_instobj { + struct nvkm_memory memory; + struct list_head head; + u32 *suspend; +}; + +void nvkm_instobj_ctor(const struct nvkm_memory_func *func, + struct nvkm_instmem *, struct nvkm_instobj *); +void nvkm_instobj_dtor(struct nvkm_instmem *, struct nvkm_instobj *); +#endif |