diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm')
168 files changed, 10142 insertions, 782 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c index 91fb494d40..adc60b25f8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c @@ -79,8 +79,7 @@ nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, int ver, int i; /* Convert device name to lowercase */ - strncpy(cname, device->chip->name, sizeof(cname)); - cname[sizeof(cname) - 1] = '\0'; + strscpy(cname, device->chip->name, sizeof(cname)); i = strlen(cname); while (i) { --i; @@ -113,6 +112,22 @@ nvkm_firmware_put(const struct firmware *fw) #define nvkm_firmware_mem(p) container_of((p), struct nvkm_firmware, mem.memory) +static struct scatterlist * +nvkm_firmware_mem_sgl(struct nvkm_memory *memory) +{ + struct nvkm_firmware *fw = nvkm_firmware_mem(memory); + + switch (fw->func->type) { + case NVKM_FIRMWARE_IMG_DMA: return &fw->mem.sgl; + case NVKM_FIRMWARE_IMG_SGT: return fw->mem.sgt.sgl; + default: + WARN_ON(1); + break; + } + + return NULL; +} + static int nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc) @@ -121,10 +136,10 @@ nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *v struct nvkm_vmm_map map = { .memory = &fw->mem.memory, .offset = offset, - .sgl = &fw->mem.sgl, + .sgl = nvkm_firmware_mem_sgl(memory), }; - if (WARN_ON(fw->func->type != NVKM_FIRMWARE_IMG_DMA)) + if (!map.sgl) return -ENOSYS; return nvkm_vmm_map(vmm, vma, argv, argc, &map); @@ -133,12 +148,15 @@ nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *v static u64 nvkm_firmware_mem_size(struct nvkm_memory *memory) { - return sg_dma_len(&nvkm_firmware_mem(memory)->mem.sgl); + struct scatterlist *sgl = nvkm_firmware_mem_sgl(memory); + + return sgl ? sg_dma_len(sgl) : 0; } static u64 nvkm_firmware_mem_addr(struct nvkm_memory *memory) { + BUG_ON(nvkm_firmware_mem(memory)->func->type != NVKM_FIRMWARE_IMG_DMA); return nvkm_firmware_mem(memory)->phys; } @@ -189,6 +207,12 @@ nvkm_firmware_dtor(struct nvkm_firmware *fw) nvkm_memory_unref(&memory); dma_free_coherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), fw->img, fw->phys); break; + case NVKM_FIRMWARE_IMG_SGT: + nvkm_memory_unref(&memory); + dma_unmap_sgtable(fw->device->dev, &fw->mem.sgt, DMA_TO_DEVICE, 0); + sg_free_table(&fw->mem.sgt); + vfree(fw->img); + break; default: WARN_ON(1); break; @@ -226,6 +250,49 @@ nvkm_firmware_ctor(const struct nvkm_firmware_func *func, const char *name, sg_dma_len(&fw->mem.sgl) = len; } break; + case NVKM_FIRMWARE_IMG_SGT: + len = ALIGN(fw->len, PAGE_SIZE); + + fw->img = vmalloc(len); + if (fw->img) { + int pages = len >> PAGE_SHIFT; + int ret = 0; + + memcpy(fw->img, src, fw->len); + + ret = sg_alloc_table(&fw->mem.sgt, pages, GFP_KERNEL); + if (ret == 0) { + struct scatterlist *sgl; + u8 *data = fw->img; + int i; + + for_each_sgtable_sg(&fw->mem.sgt, sgl, i) { + struct page *page = vmalloc_to_page(data); + + if (!page) { + ret = -EFAULT; + break; + } + + sg_set_page(sgl, page, PAGE_SIZE, 0); + data += PAGE_SIZE; + } + + if (ret == 0) { + ret = dma_map_sgtable(fw->device->dev, &fw->mem.sgt, + DMA_TO_DEVICE, 0); + } + + if (ret) + sg_free_table(&fw->mem.sgt); + } + + if (ret) { + vfree(fw->img); + fw->img = NULL; + } + } + break; default: WARN_ON(1); return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c index c69daac9ba..a705c2dfca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c @@ -140,12 +140,23 @@ nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target, { struct nvkm_instmem *imem = device->imem; struct nvkm_memory *memory; + bool preserve = true; int ret; - if (unlikely(target != NVKM_MEM_TARGET_INST || !imem)) + if (unlikely(!imem)) return -ENOSYS; - ret = nvkm_instobj_new(imem, size, align, zero, &memory); + switch (target) { + case NVKM_MEM_TARGET_INST_SR_LOST: + preserve = false; + break; + case NVKM_MEM_TARGET_INST: + break; + default: + return -ENOSYS; + } + + ret = nvkm_instobj_new(imem, size, align, zero, preserve, &memory); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild index c6dfed18f3..bfaaff645a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild @@ -17,6 +17,8 @@ include $(src)/nvkm/engine/msppp/Kbuild include $(src)/nvkm/engine/msvld/Kbuild include $(src)/nvkm/engine/nvenc/Kbuild include $(src)/nvkm/engine/nvdec/Kbuild +include $(src)/nvkm/engine/nvjpg/Kbuild +include $(src)/nvkm/engine/ofa/Kbuild include $(src)/nvkm/engine/pm/Kbuild include $(src)/nvkm/engine/sec/Kbuild include $(src)/nvkm/engine/sec2/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild index 8bf1635ffa..165d61fc5d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild @@ -10,3 +10,5 @@ nvkm-y += nvkm/engine/ce/gv100.o nvkm-y += nvkm/engine/ce/tu102.o nvkm-y += nvkm/engine/ce/ga100.o nvkm-y += nvkm/engine/ce/ga102.o + +nvkm-y += nvkm/engine/ce/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c index 315a69f7fd..9427a592bd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c @@ -21,6 +21,7 @@ */ #include "priv.h" +#include <subdev/gsp.h> #include <subdev/vfn.h> #include <nvif/class.h> @@ -88,5 +89,8 @@ int ga100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { + if (nvkm_gsp_rm(device->gsp)) + return r535_ce_new(&ga100_ce, device, type, inst, pengine); + return nvkm_engine_new_(&ga100_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c index 461b73c7e2..ce56ede7c2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + #include <nvif/class.h> static const struct nvkm_engine_func @@ -41,5 +43,8 @@ int ga102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { + if (nvkm_gsp_rm(device->gsp)) + return r535_ce_new(&ga102_ce, device, type, inst, pengine); + return nvkm_engine_new_(&ga102_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h index 0be72c463b..806a76a722 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h @@ -3,6 +3,9 @@ #define __NVKM_CE_PRIV_H__ #include <engine/ce.h> +int r535_ce_new(const struct nvkm_engine_func *, struct nvkm_device *, + enum nvkm_subdev_type, int, struct nvkm_engine **); + void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_chan *); void gk104_ce_intr(struct nvkm_engine *); void gp100_ce_intr(struct nvkm_engine *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c new file mode 100644 index 0000000000..bd0d435dbb --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c @@ -0,0 +1,108 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/object.h> +#include <subdev/gsp.h> +#include <engine/fifo.h> + +#include <nvrm/nvtypes.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h> + +struct r535_ce_obj { + struct nvkm_object object; + struct nvkm_gsp_object rm; +}; + +static void * +r535_ce_obj_dtor(struct nvkm_object *object) +{ + struct r535_ce_obj *obj = container_of(object, typeof(*obj), object); + + nvkm_gsp_rm_free(&obj->rm); + return obj; +} + +static const struct nvkm_object_func +r535_ce_obj = { + .dtor = r535_ce_obj_dtor, +}; + +static int +r535_ce_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); + struct r535_ce_obj *obj; + NVC0B5_ALLOCATION_PARAMETERS *args; + + if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_ce_obj, oclass, &obj->object); + *pobject = &obj->object; + + args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, + sizeof(*args), &obj->rm); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->version = 1; + args->engineType = NV2080_ENGINE_TYPE_COPY0 + oclass->engine->subdev.inst; + + return nvkm_gsp_rm_alloc_wr(&obj->rm, args); +} + +static void * +r535_ce_dtor(struct nvkm_engine *engine) +{ + kfree(engine->func); + return engine; +} + +int +r535_ce_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) +{ + struct nvkm_engine_func *rm; + int nclass, ret; + + for (nclass = 0; hw->sclass[nclass].oclass; nclass++); + + if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_ce_dtor; + for (int i = 0; i < nclass; i++) { + rm->sclass[i].minver = hw->sclass[i].minver; + rm->sclass[i].maxver = hw->sclass[i].maxver; + rm->sclass[i].oclass = hw->sclass[i].oclass; + rm->sclass[i].ctor = r535_ce_obj_ctor; + } + + ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c index 9563c01751..7c8647dcb3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + #include <nvif/class.h> static const struct nvkm_engine_func @@ -37,5 +39,8 @@ int tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { + if (nvkm_gsp_rm(device->gsp)) + return r535_ce_new(&tu102_ce, device, type, inst, pengine); + return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 1c81e5b34d..31ed3da32f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2408,7 +2408,7 @@ nv162_chipset = { .fb = { 0x00000001, tu102_fb_new }, .fuse = { 0x00000001, gm107_fuse_new }, .gpio = { 0x00000001, gk104_gpio_new }, - .gsp = { 0x00000001, gv100_gsp_new }, + .gsp = { 0x00000001, tu102_gsp_new }, .i2c = { 0x00000001, gm200_i2c_new }, .imem = { 0x00000001, nv50_instmem_new }, .ltc = { 0x00000001, gp102_ltc_new }, @@ -2426,8 +2426,8 @@ nv162_chipset = { .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, tu102_fifo_new }, .gr = { 0x00000001, tu102_gr_new }, - .nvdec = { 0x00000001, gm107_nvdec_new }, - .nvenc = { 0x00000001, gm107_nvenc_new }, + .nvdec = { 0x00000001, tu102_nvdec_new }, + .nvenc = { 0x00000001, tu102_nvenc_new }, .sec2 = { 0x00000001, tu102_sec2_new }, }; @@ -2443,7 +2443,7 @@ nv164_chipset = { .fb = { 0x00000001, tu102_fb_new }, .fuse = { 0x00000001, gm107_fuse_new }, .gpio = { 0x00000001, gk104_gpio_new }, - .gsp = { 0x00000001, gv100_gsp_new }, + .gsp = { 0x00000001, tu102_gsp_new }, .i2c = { 0x00000001, gm200_i2c_new }, .imem = { 0x00000001, nv50_instmem_new }, .ltc = { 0x00000001, gp102_ltc_new }, @@ -2461,8 +2461,8 @@ nv164_chipset = { .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, tu102_fifo_new }, .gr = { 0x00000001, tu102_gr_new }, - .nvdec = { 0x00000003, gm107_nvdec_new }, - .nvenc = { 0x00000001, gm107_nvenc_new }, + .nvdec = { 0x00000003, tu102_nvdec_new }, + .nvenc = { 0x00000001, tu102_nvenc_new }, .sec2 = { 0x00000001, tu102_sec2_new }, }; @@ -2478,7 +2478,7 @@ nv166_chipset = { .fb = { 0x00000001, tu102_fb_new }, .fuse = { 0x00000001, gm107_fuse_new }, .gpio = { 0x00000001, gk104_gpio_new }, - .gsp = { 0x00000001, gv100_gsp_new }, + .gsp = { 0x00000001, tu102_gsp_new }, .i2c = { 0x00000001, gm200_i2c_new }, .imem = { 0x00000001, nv50_instmem_new }, .ltc = { 0x00000001, gp102_ltc_new }, @@ -2496,8 +2496,8 @@ nv166_chipset = { .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, tu102_fifo_new }, .gr = { 0x00000001, tu102_gr_new }, - .nvdec = { 0x00000007, gm107_nvdec_new }, - .nvenc = { 0x00000001, gm107_nvenc_new }, + .nvdec = { 0x00000007, tu102_nvdec_new }, + .nvenc = { 0x00000001, tu102_nvenc_new }, .sec2 = { 0x00000001, tu102_sec2_new }, }; @@ -2513,7 +2513,7 @@ nv167_chipset = { .fb = { 0x00000001, tu102_fb_new }, .fuse = { 0x00000001, gm107_fuse_new }, .gpio = { 0x00000001, gk104_gpio_new }, - .gsp = { 0x00000001, gv100_gsp_new }, + .gsp = { 0x00000001, tu116_gsp_new }, .i2c = { 0x00000001, gm200_i2c_new }, .imem = { 0x00000001, nv50_instmem_new }, .ltc = { 0x00000001, gp102_ltc_new }, @@ -2531,8 +2531,8 @@ nv167_chipset = { .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, tu102_fifo_new }, .gr = { 0x00000001, tu102_gr_new }, - .nvdec = { 0x00000001, gm107_nvdec_new }, - .nvenc = { 0x00000001, gm107_nvenc_new }, + .nvdec = { 0x00000001, tu102_nvdec_new }, + .nvenc = { 0x00000001, tu102_nvenc_new }, .sec2 = { 0x00000001, tu102_sec2_new }, }; @@ -2548,7 +2548,7 @@ nv168_chipset = { .fb = { 0x00000001, tu102_fb_new }, .fuse = { 0x00000001, gm107_fuse_new }, .gpio = { 0x00000001, gk104_gpio_new }, - .gsp = { 0x00000001, gv100_gsp_new }, + .gsp = { 0x00000001, tu116_gsp_new }, .i2c = { 0x00000001, gm200_i2c_new }, .imem = { 0x00000001, nv50_instmem_new }, .ltc = { 0x00000001, gp102_ltc_new }, @@ -2566,8 +2566,8 @@ nv168_chipset = { .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, tu102_fifo_new }, .gr = { 0x00000001, tu102_gr_new }, - .nvdec = { 0x00000001, gm107_nvdec_new }, - .nvenc = { 0x00000001, gm107_nvenc_new }, + .nvdec = { 0x00000001, tu102_nvdec_new }, + .nvenc = { 0x00000001, tu102_nvenc_new }, .sec2 = { 0x00000001, tu102_sec2_new }, }; @@ -2580,6 +2580,7 @@ nv170_chipset = { .fault = { 0x00000001, tu102_fault_new }, .fb = { 0x00000001, ga100_fb_new }, .gpio = { 0x00000001, gk104_gpio_new }, + .gsp = { 0x00000001, ga100_gsp_new }, .i2c = { 0x00000001, gm200_i2c_new }, .imem = { 0x00000001, nv50_instmem_new }, .mc = { 0x00000001, ga100_mc_new }, @@ -2591,6 +2592,9 @@ nv170_chipset = { .vfn = { 0x00000001, ga100_vfn_new }, .ce = { 0x000003ff, ga100_ce_new }, .fifo = { 0x00000001, ga100_fifo_new }, + .nvdec = { 0x0000001f, ga100_nvdec_new }, + .nvjpg = { 0x00000001, ga100_nvjpg_new }, + .ofa = { 0x00000001, ga100_ofa_new }, }; static const struct nvkm_device_chip @@ -2619,7 +2623,9 @@ nv172_chipset = { .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, - .nvdec = { 0x00000001, ga102_nvdec_new }, + .nvdec = { 0x00000003, ga102_nvdec_new }, + .nvenc = { 0x00000001, ga102_nvenc_new }, + .ofa = { 0x00000001, ga102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2649,7 +2655,9 @@ nv173_chipset = { .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, - .nvdec = { 0x00000001, ga102_nvdec_new }, + .nvdec = { 0x00000003, ga102_nvdec_new }, + .nvenc = { 0x00000001, ga102_nvenc_new }, + .ofa = { 0x00000001, ga102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2679,7 +2687,9 @@ nv174_chipset = { .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, - .nvdec = { 0x00000001, ga102_nvdec_new }, + .nvdec = { 0x00000003, ga102_nvdec_new }, + .nvenc = { 0x00000001, ga102_nvenc_new }, + .ofa = { 0x00000001, ga102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2709,7 +2719,9 @@ nv176_chipset = { .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, - .nvdec = { 0x00000001, ga102_nvdec_new }, + .nvdec = { 0x00000003, ga102_nvdec_new }, + .nvenc = { 0x00000001, ga102_nvenc_new }, + .ofa = { 0x00000001, ga102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2739,7 +2751,139 @@ nv177_chipset = { .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, - .nvdec = { 0x00000001, ga102_nvdec_new }, + .nvdec = { 0x00000003, ga102_nvdec_new }, + .nvenc = { 0x00000001, ga102_nvenc_new }, + .ofa = { 0x00000001, ga102_ofa_new }, + .sec2 = { 0x00000001, ga102_sec2_new }, +}; + +static const struct nvkm_device_chip +nv192_chipset = { + .name = "AD102", + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fault = { 0x00000001, tu102_fault_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gsp = { 0x00000001, ad102_gsp_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .ce = { 0x0000001f, ga102_ce_new }, + .disp = { 0x00000001, ad102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, + .gr = { 0x00000001, ad102_gr_new }, + .nvdec = { 0x0000000f, ad102_nvdec_new }, + .nvenc = { 0x00000007, ad102_nvenc_new }, + .nvjpg = { 0x0000000f, ad102_nvjpg_new }, + .ofa = { 0x00000001, ad102_ofa_new }, + .sec2 = { 0x00000001, ga102_sec2_new }, +}; + +static const struct nvkm_device_chip +nv193_chipset = { + .name = "AD103", + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fault = { 0x00000001, tu102_fault_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gsp = { 0x00000001, ad102_gsp_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .ce = { 0x0000001f, ga102_ce_new }, + .disp = { 0x00000001, ad102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, + .gr = { 0x00000001, ad102_gr_new }, + .nvdec = { 0x0000000f, ad102_nvdec_new }, + .nvenc = { 0x00000007, ad102_nvenc_new }, + .nvjpg = { 0x0000000f, ad102_nvjpg_new }, + .ofa = { 0x00000001, ad102_ofa_new }, + .sec2 = { 0x00000001, ga102_sec2_new }, +}; + +static const struct nvkm_device_chip +nv194_chipset = { + .name = "AD104", + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fault = { 0x00000001, tu102_fault_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gsp = { 0x00000001, ad102_gsp_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .ce = { 0x0000001f, ga102_ce_new }, + .disp = { 0x00000001, ad102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, + .gr = { 0x00000001, ad102_gr_new }, + .nvdec = { 0x0000000f, ad102_nvdec_new }, + .nvenc = { 0x00000007, ad102_nvenc_new }, + .nvjpg = { 0x0000000f, ad102_nvjpg_new }, + .ofa = { 0x00000001, ad102_ofa_new }, + .sec2 = { 0x00000001, ga102_sec2_new }, +}; + +static const struct nvkm_device_chip +nv196_chipset = { + .name = "AD106", + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fault = { 0x00000001, tu102_fault_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gsp = { 0x00000001, ad102_gsp_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .ce = { 0x0000001f, ga102_ce_new }, + .disp = { 0x00000001, ad102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, + .gr = { 0x00000001, ad102_gr_new }, + .nvdec = { 0x0000000f, ad102_nvdec_new }, + .nvenc = { 0x00000007, ad102_nvenc_new }, + .nvjpg = { 0x0000000f, ad102_nvjpg_new }, + .ofa = { 0x00000001, ad102_ofa_new }, + .sec2 = { 0x00000001, ga102_sec2_new }, +}; + +static const struct nvkm_device_chip +nv197_chipset = { + .name = "AD107", + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fault = { 0x00000001, tu102_fault_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gsp = { 0x00000001, ad102_gsp_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .ce = { 0x0000001f, ga102_ce_new }, + .disp = { 0x00000001, ad102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, + .gr = { 0x00000001, ad102_gr_new }, + .nvdec = { 0x0000000f, ad102_nvdec_new }, + .nvenc = { 0x00000007, ad102_nvenc_new }, + .nvjpg = { 0x0000000f, ad102_nvjpg_new }, + .ofa = { 0x00000001, ad102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -3061,6 +3205,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x140: device->card_type = GV100; break; case 0x160: device->card_type = TU100; break; case 0x170: device->card_type = GA100; break; + case 0x190: device->card_type = AD100; break; default: break; } @@ -3163,6 +3308,11 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x174: device->chip = &nv174_chipset; break; case 0x176: device->chip = &nv176_chipset; break; case 0x177: device->chip = &nv177_chipset; break; + case 0x192: device->chip = &nv192_chipset; break; + case 0x193: device->chip = &nv193_chipset; break; + case 0x194: device->chip = &nv194_chipset; break; + case 0x196: device->chip = &nv196_chipset; break; + case 0x197: device->chip = &nv197_chipset; break; default: if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) { switch (device->chipset) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h index 24faaac158..bf3176bec1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h @@ -43,6 +43,8 @@ #include <engine/msvld.h> #include <engine/nvenc.h> #include <engine/nvdec.h> +#include <engine/nvjpg.h> +#include <engine/ofa.h> #include <engine/pm.h> #include <engine/sec.h> #include <engine/sec2.h> diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index 9b39ec3416..7fd4800a87 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -147,6 +147,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size) case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break; case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break; case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break; + case AD100: args->v0.family = NV_DEVICE_INFO_V0_ADA; break; default: args->v0.family = 0; break; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild index e1aecd3fe9..e346e924fe 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild @@ -27,6 +27,9 @@ nvkm-y += nvkm/engine/disp/gp102.o nvkm-y += nvkm/engine/disp/gv100.o nvkm-y += nvkm/engine/disp/tu102.o nvkm-y += nvkm/engine/disp/ga102.o +nvkm-y += nvkm/engine/disp/ad102.o + +nvkm-y += nvkm/engine/disp/r535.o nvkm-y += nvkm/engine/disp/udisp.o nvkm-y += nvkm/engine/disp/uconn.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c new file mode 100644 index 0000000000..7f300a79aa --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c @@ -0,0 +1,52 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" +#include "chan.h" + +#include <subdev/gsp.h> + +#include <nvif/class.h> + +static const struct nvkm_disp_func +ad102_disp = { + .uevent = &gv100_disp_chan_uevent, + .ramht_size = 0x2000, + .root = { 0, 0,AD102_DISP }, + .user = { + {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new }, + {{ 0, 0,GA102_DISP_CURSOR }, nvkm_disp_chan_new, &gv100_disp_curs }, + {{ 0, 0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm }, + {{ 0, 0,AD102_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gv100_disp_core }, + {{ 0, 0,GA102_DISP_WINDOW_CHANNEL_DMA }, nvkm_disp_wndw_new, &gv100_disp_wndw }, + {} + }, +}; + +int +ad102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_disp_new(&ad102_disp, device, type, inst, pdisp); + + return -ENODEV; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c index 73104b59f9..b24eb1e560 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c @@ -23,15 +23,12 @@ */ #include "priv.h" #include "conn.h" -#include "dp.h" #include "head.h" #include "ior.h" #include "outp.h" #include <core/client.h> #include <core/ramht.h> -#include <subdev/bios.h> -#include <subdev/bios/dcb.h> #include <nvif/class.h> #include <nvif/cl0046.h> @@ -105,18 +102,14 @@ static int nvkm_disp_fini(struct nvkm_engine *engine, bool suspend) { struct nvkm_disp *disp = nvkm_disp(engine); - struct nvkm_conn *conn; struct nvkm_outp *outp; if (disp->func->fini) - disp->func->fini(disp); + disp->func->fini(disp, suspend); list_for_each_entry(outp, &disp->outps, head) { - nvkm_outp_fini(outp); - } - - list_for_each_entry(conn, &disp->conns, head) { - nvkm_conn_fini(conn); + if (outp->func->fini) + outp->func->fini(outp); } return 0; @@ -126,16 +119,12 @@ static int nvkm_disp_init(struct nvkm_engine *engine) { struct nvkm_disp *disp = nvkm_disp(engine); - struct nvkm_conn *conn; struct nvkm_outp *outp; struct nvkm_ior *ior; - list_for_each_entry(conn, &disp->conns, head) { - nvkm_conn_init(conn); - } - list_for_each_entry(outp, &disp->outps, head) { - nvkm_outp_init(outp); + if (outp->func->init) + outp->func->init(outp); } if (disp->func->init) { @@ -148,7 +137,8 @@ nvkm_disp_init(struct nvkm_engine *engine) * each output resource to 'fully enabled'. */ list_for_each_entry(ior, &disp->iors, head) { - ior->func->power(ior, true, true, true, true, true); + if (ior->func->power) + ior->func->power(ior, true, true, true, true, true); } return 0; @@ -159,142 +149,15 @@ nvkm_disp_oneinit(struct nvkm_engine *engine) { struct nvkm_disp *disp = nvkm_disp(engine); struct nvkm_subdev *subdev = &disp->engine.subdev; - struct nvkm_bios *bios = subdev->device->bios; - struct nvkm_outp *outp, *outt, *pair; - struct nvkm_conn *conn; struct nvkm_head *head; - struct nvkm_ior *ior; - struct nvbios_connE connE; - struct dcb_output dcbE; - u8 hpd = 0, ver, hdr; - u32 data; int ret, i; - /* Create output path objects for each VBIOS display path. */ - i = -1; - while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) { - if (ver < 0x40) /* No support for chipsets prior to NV50. */ - break; - if (dcbE.type == DCB_OUTPUT_UNUSED) - continue; - if (dcbE.type == DCB_OUTPUT_EOL) - break; - outp = NULL; - - switch (dcbE.type) { - case DCB_OUTPUT_ANALOG: - case DCB_OUTPUT_TV: - case DCB_OUTPUT_TMDS: - case DCB_OUTPUT_LVDS: - ret = nvkm_outp_new(disp, i, &dcbE, &outp); - break; - case DCB_OUTPUT_DP: - ret = nvkm_dp_new(disp, i, &dcbE, &outp); - break; - case DCB_OUTPUT_WFD: - /* No support for WFD yet. */ - ret = -ENODEV; - continue; - default: - nvkm_warn(subdev, "dcb %d type %d unknown\n", - i, dcbE.type); - continue; - } - - if (ret) { - if (outp) { - if (ret != -ENODEV) - OUTP_ERR(outp, "ctor failed: %d", ret); - else - OUTP_DBG(outp, "not supported"); - nvkm_outp_del(&outp); - continue; - } - nvkm_error(subdev, "failed to create outp %d\n", i); - continue; - } - - list_add_tail(&outp->head, &disp->outps); - hpd = max(hpd, (u8)(dcbE.connector + 1)); - } - - /* Create connector objects based on available output paths. */ - list_for_each_entry_safe(outp, outt, &disp->outps, head) { - /* VBIOS data *should* give us the most useful information. */ - data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, - &connE); - - /* No bios connector data... */ - if (!data) { - /* Heuristic: anything with the same ccb index is - * considered to be on the same connector, any - * output path without an associated ccb entry will - * be put on its own connector. - */ - int ccb_index = outp->info.i2c_index; - if (ccb_index != 0xf) { - list_for_each_entry(pair, &disp->outps, head) { - if (pair->info.i2c_index == ccb_index) { - outp->conn = pair->conn; - break; - } - } - } - - /* Connector shared with another output path. */ - if (outp->conn) - continue; - - memset(&connE, 0x00, sizeof(connE)); - connE.type = DCB_CONNECTOR_NONE; - i = -1; - } else { - i = outp->info.connector; - } - - /* Check that we haven't already created this connector. */ - list_for_each_entry(conn, &disp->conns, head) { - if (conn->index == outp->info.connector) { - outp->conn = conn; - break; - } - } - - if (outp->conn) - continue; - - /* Apparently we need to create a new one! */ - ret = nvkm_conn_new(disp, i, &connE, &outp->conn); - if (ret) { - nvkm_error(subdev, "failed to create outp %d conn: %d\n", outp->index, ret); - nvkm_conn_del(&outp->conn); - list_del(&outp->head); - nvkm_outp_del(&outp); - continue; - } - - list_add_tail(&outp->conn->head, &disp->conns); - } - if (disp->func->oneinit) { ret = disp->func->oneinit(disp); if (ret) return ret; } - /* Enforce identity-mapped SOR assignment for panels, which have - * certain bits (ie. backlight controls) wired to a specific SOR. - */ - list_for_each_entry(outp, &disp->outps, head) { - if (outp->conn->info.type == DCB_CONNECTOR_LVDS || - outp->conn->info.type == DCB_CONNECTOR_eDP) { - ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1); - if (!WARN_ON(!ior)) - ior->identity = true; - outp->identity = true; - } - } - i = 0; list_for_each_entry(head, &disp->heads, head) i = max(i, head->id + 1); @@ -346,6 +209,9 @@ nvkm_disp_dtor(struct nvkm_engine *engine) nvkm_head_del(&head); } + if (disp->func && disp->func->dtor) + disp->func->dtor(disp); + return data; } @@ -377,8 +243,10 @@ nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device, spin_lock_init(&disp->client.lock); ret = nvkm_engine_ctor(&nvkm_disp, device, type, inst, true, &disp->engine); - if (ret) + if (ret) { + disp->func = NULL; return ret; + } if (func->super) { disp->super.wq = create_singlethread_workqueue("nvkm-disp"); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.h index 398336ffb6..0202905101 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.h @@ -22,6 +22,10 @@ struct nvkm_disp_chan { u64 push; u32 suspend_put; + + struct { + struct nvkm_gsp_object object; + } rm; }; int nvkm_disp_core_new(const struct nvkm_oclass *, void *, u32, struct nvkm_object **); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c index fbdae11378..ff88a5a525 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c @@ -30,16 +30,6 @@ #include <nvif/event.h> void -nvkm_conn_fini(struct nvkm_conn *conn) -{ -} - -void -nvkm_conn_init(struct nvkm_conn *conn) -{ -} - -void nvkm_conn_del(struct nvkm_conn **pconn) { struct nvkm_conn *conn = *pconn; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h index a0600e72b0..01c3146c70 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h @@ -19,8 +19,6 @@ struct nvkm_conn { int nvkm_conn_new(struct nvkm_disp *, int index, struct nvbios_connE *, struct nvkm_conn **); void nvkm_conn_del(struct nvkm_conn **); -void nvkm_conn_init(struct nvkm_conn *); -void nvkm_conn_fini(struct nvkm_conn *); #define CONN_MSG(c,l,f,a...) do { \ struct nvkm_conn *_conn = (c); \ diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index b8ac66b4a2..a109348bd6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c @@ -41,6 +41,40 @@ */ #define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100) +static int +nvkm_dp_mst_id_put(struct nvkm_outp *outp, u32 id) +{ + return 0; +} + +static int +nvkm_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid) +{ + *pid = BIT(outp->index); + return 0; +} + +static int +nvkm_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *size) +{ + int ret = nvkm_i2c_aux_acquire(outp->dp.aux); + + if (ret) + return ret; + + ret = nvkm_i2c_aux_xfer(outp->dp.aux, false, type, addr, data, size); + nvkm_i2c_aux_release(outp->dp.aux); + return ret; +} + +static int +nvkm_dp_aux_pwr(struct nvkm_outp *outp, bool pu) +{ + outp->dp.enabled = pu; + nvkm_dp_enable(outp, outp->dp.enabled); + return 0; +} + struct lt_state { struct nvkm_outp *outp; @@ -282,31 +316,20 @@ nvkm_dp_train_link(struct nvkm_outp *outp, int rate) struct lt_state lt = { .outp = outp, .pc2 = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED, + .repeaters = outp->dp.lttprs, }; - u8 sink[2], data; + u8 sink[2]; int ret; OUTP_DBG(outp, "training %dx%02x", ior->dp.nr, ior->dp.bw); - /* Select LTTPR non-transparent mode if we have a valid configuration, - * use transparent mode otherwise. - */ - if (outp->dp.lttpr[0] >= 0x14) { - data = DPCD_LTTPR_MODE_TRANSPARENT; - nvkm_wraux(outp->dp.aux, DPCD_LTTPR_MODE, &data, sizeof(data)); - - if (outp->dp.lttprs) { - data = DPCD_LTTPR_MODE_NON_TRANSPARENT; - nvkm_wraux(outp->dp.aux, DPCD_LTTPR_MODE, &data, sizeof(data)); - lt.repeaters = outp->dp.lttprs; - } - } - /* Set desired link configuration on the sink. */ sink[0] = (outp->dp.rate[rate].dpcd < 0) ? ior->dp.bw : 0; sink[1] = ior->dp.nr; if (ior->dp.ef) sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN; + if (outp->dp.lt.post_adj) + sink[1] |= 0x20; ret = nvkm_wraux(outp->dp.aux, DPCD_LC00_LINK_BW_SET, sink, 2); if (ret) @@ -447,71 +470,58 @@ nvkm_dp_train_init(struct nvkm_outp *outp) } static int -nvkm_dp_train(struct nvkm_outp *outp, u32 dataKBps) +nvkm_dp_drive(struct nvkm_outp *outp, u8 lanes, u8 pe[4], u8 vs[4]) +{ + struct lt_state lt = { + .outp = outp, + .stat[4] = (pe[0] << 2) | (vs[0] << 0) | + (pe[1] << 6) | (vs[1] << 4), + .stat[5] = (pe[2] << 2) | (vs[2] << 0) | + (pe[3] << 6) | (vs[3] << 4), + }; + + return nvkm_dp_train_drive(<, false); +} + +static int +nvkm_dp_train(struct nvkm_outp *outp, bool retrain) { struct nvkm_ior *ior = outp->ior; - int ret = -EINVAL, nr, rate; - u8 pwr; + int ret, rate; - /* Retraining link? Skip source configuration, it can mess up the active modeset. */ - if (atomic_read(&outp->dp.lt.done)) { - for (rate = 0; rate < outp->dp.rates; rate++) { - if (outp->dp.rate[rate].rate == ior->dp.bw * 27000) - return nvkm_dp_train_link(outp, ret); - } - WARN_ON(1); - return -EINVAL; + for (rate = 0; rate < outp->dp.rates; rate++) { + if (outp->dp.rate[rate].rate == (retrain ? ior->dp.bw : outp->dp.lt.bw) * 27000) + break; } - /* Ensure sink is not in a low-power state. */ - if (!nvkm_rdaux(outp->dp.aux, DPCD_SC00, &pwr, 1)) { - if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) { - pwr &= ~DPCD_SC00_SET_POWER; - pwr |= DPCD_SC00_SET_POWER_D0; - nvkm_wraux(outp->dp.aux, DPCD_SC00, &pwr, 1); - } + if (WARN_ON(rate == outp->dp.rates)) + return -EINVAL; + + /* Retraining link? Skip source configuration, it can mess up the active modeset. */ + if (retrain) { + mutex_lock(&outp->dp.mutex); + ret = nvkm_dp_train_link(outp, rate); + mutex_unlock(&outp->dp.mutex); + return ret; } + mutex_lock(&outp->dp.mutex); + OUTP_DBG(outp, "training"); + ior->dp.mst = outp->dp.lt.mst; ior->dp.ef = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP; - ior->dp.nr = 0; + ior->dp.bw = outp->dp.lt.bw; + ior->dp.nr = outp->dp.lt.nr; - /* Link training. */ - OUTP_DBG(outp, "training"); nvkm_dp_train_init(outp); - - /* Validate and train at configuration requested (if any) on ACQUIRE. */ - if (outp->dp.lt.nr) { - for (nr = outp->dp.links; ret < 0 && nr; nr >>= 1) { - for (rate = 0; nr == outp->dp.lt.nr && rate < outp->dp.rates; rate++) { - if (outp->dp.rate[rate].rate / 27000 == outp->dp.lt.bw) { - ior->dp.bw = outp->dp.rate[rate].rate / 27000; - ior->dp.nr = nr; - ret = nvkm_dp_train_links(outp, rate); - } - } - } - } - - /* Otherwise, loop through all valid link configurations that support the data rate. */ - for (nr = outp->dp.links; ret < 0 && nr; nr >>= 1) { - for (rate = 0; ret < 0 && rate < outp->dp.rates; rate++) { - if (outp->dp.rate[rate].rate * nr >= dataKBps || WARN_ON(!ior->dp.nr)) { - /* Program selected link configuration. */ - ior->dp.bw = outp->dp.rate[rate].rate / 27000; - ior->dp.nr = nr; - ret = nvkm_dp_train_links(outp, rate); - } - } - } - - /* Finish up. */ + ret = nvkm_dp_train_links(outp, rate); nvkm_dp_train_fini(outp); if (ret < 0) OUTP_ERR(outp, "training failed"); else OUTP_DBG(outp, "training done"); - atomic_set(&outp->dp.lt.done, 1); + + mutex_unlock(&outp->dp.mutex); return ret; } @@ -529,155 +539,10 @@ nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior) static void nvkm_dp_release(struct nvkm_outp *outp) { - /* Prevent link from being retrained if sink sends an IRQ. */ - atomic_set(&outp->dp.lt.done, 0); outp->ior->dp.nr = 0; -} - -static int -nvkm_dp_acquire(struct nvkm_outp *outp) -{ - struct nvkm_ior *ior = outp->ior; - struct nvkm_head *head; - bool retrain = true; - u32 datakbps = 0; - u32 dataKBps; - u32 linkKBps; - u8 stat[3]; - int ret, i; - - mutex_lock(&outp->dp.mutex); - - /* Check that link configuration meets current requirements. */ - list_for_each_entry(head, &outp->disp->heads, head) { - if (ior->asy.head & (1 << head->id)) { - u32 khz = (head->asy.hz >> ior->asy.rgdiv) / 1000; - datakbps += khz * head->asy.or.depth; - } - } - - linkKBps = ior->dp.bw * 27000 * ior->dp.nr; - dataKBps = DIV_ROUND_UP(datakbps, 8); - OUTP_DBG(outp, "data %d KB/s link %d KB/s mst %d->%d", - dataKBps, linkKBps, ior->dp.mst, outp->dp.lt.mst); - if (linkKBps < dataKBps || ior->dp.mst != outp->dp.lt.mst) { - OUTP_DBG(outp, "link requirements changed"); - goto done; - } - - /* Check that link is still trained. */ - ret = nvkm_rdaux(outp->dp.aux, DPCD_LS02, stat, 3); - if (ret) { - OUTP_DBG(outp, "failed to read link status, assuming no sink"); - goto done; - } - - if (stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE) { - for (i = 0; i < ior->dp.nr; i++) { - u8 lane = (stat[i >> 1] >> ((i & 1) * 4)) & 0x0f; - if (!(lane & DPCD_LS02_LANE0_CR_DONE) || - !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) || - !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) { - OUTP_DBG(outp, "lane %d not equalised", lane); - goto done; - } - } - retrain = false; - } else { - OUTP_DBG(outp, "no inter-lane alignment"); - } - -done: - if (retrain || !atomic_read(&outp->dp.lt.done)) - ret = nvkm_dp_train(outp, dataKBps); - mutex_unlock(&outp->dp.mutex); - return ret; -} - -static bool -nvkm_dp_enable_supported_link_rates(struct nvkm_outp *outp) -{ - u8 sink_rates[DPCD_RC10_SUPPORTED_LINK_RATES__SIZE]; - int i, j, k; - - if (outp->conn->info.type != DCB_CONNECTOR_eDP || - outp->dp.dpcd[DPCD_RC00_DPCD_REV] < 0x13 || - nvkm_rdaux(outp->dp.aux, DPCD_RC10_SUPPORTED_LINK_RATES(0), - sink_rates, sizeof(sink_rates))) - return false; - - for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) { - const u32 rate = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10; - - if (!rate || WARN_ON(outp->dp.rates == ARRAY_SIZE(outp->dp.rate))) - break; - - if (rate > outp->info.dpconf.link_bw * 27000) { - OUTP_DBG(outp, "rate %d !outp", rate); - continue; - } - - for (j = 0; j < outp->dp.rates; j++) { - if (rate > outp->dp.rate[j].rate) { - for (k = outp->dp.rates; k > j; k--) - outp->dp.rate[k] = outp->dp.rate[k - 1]; - break; - } - } - - outp->dp.rate[j].dpcd = i / 2; - outp->dp.rate[j].rate = rate; - outp->dp.rates++; - } - - for (i = 0; i < outp->dp.rates; i++) - OUTP_DBG(outp, "link_rate[%d] = %d", outp->dp.rate[i].dpcd, outp->dp.rate[i].rate); + nvkm_dp_disable(outp, outp->ior); - return outp->dp.rates != 0; -} - -/* XXX: This is a big fat hack, and this is just drm_dp_read_dpcd_caps() - * converted to work inside nvkm. This is a temporary holdover until we start - * passing the drm_dp_aux device through NVKM - */ -static int -nvkm_dp_read_dpcd_caps(struct nvkm_outp *outp) -{ - struct nvkm_i2c_aux *aux = outp->dp.aux; - u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; - int ret; - - ret = nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, DP_RECEIVER_CAP_SIZE); - if (ret < 0) - return ret; - - /* - * Prior to DP1.3 the bit represented by - * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. - * If it is set DP_DPCD_REV at 0000h could be at a value less than - * the true capability of the panel. The only way to check is to - * then compare 0000h and 2200h. - */ - if (!(outp->dp.dpcd[DP_TRAINING_AUX_RD_INTERVAL] & - DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) - return 0; - - ret = nvkm_rdaux(aux, DP_DP13_DPCD_REV, dpcd_ext, sizeof(dpcd_ext)); - if (ret < 0) - return ret; - - if (outp->dp.dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { - OUTP_DBG(outp, "Extended DPCD rev less than base DPCD rev (%d > %d)\n", - outp->dp.dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]); - return 0; - } - - if (!memcmp(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext))) - return 0; - - memcpy(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext)); - - return 0; + nvkm_outp_release(outp); } void @@ -711,66 +576,11 @@ nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr) OUTP_DBG(outp, "aux power -> always"); nvkm_i2c_aux_monitor(aux, true); outp->dp.aux_pwr = true; - - /* Detect any LTTPRs before reading DPCD receiver caps. */ - if (!nvkm_rdaux(aux, DPCD_LTTPR_REV, outp->dp.lttpr, sizeof(outp->dp.lttpr)) && - outp->dp.lttpr[0] >= 0x14 && outp->dp.lttpr[2]) { - switch (outp->dp.lttpr[2]) { - case 0x80: outp->dp.lttprs = 1; break; - case 0x40: outp->dp.lttprs = 2; break; - case 0x20: outp->dp.lttprs = 3; break; - case 0x10: outp->dp.lttprs = 4; break; - case 0x08: outp->dp.lttprs = 5; break; - case 0x04: outp->dp.lttprs = 6; break; - case 0x02: outp->dp.lttprs = 7; break; - case 0x01: outp->dp.lttprs = 8; break; - default: - /* Unknown LTTPR count, we'll switch to transparent mode. */ - WARN_ON(1); - outp->dp.lttprs = 0; - break; - } - } else { - /* No LTTPR support, or zero LTTPR count - don't touch it at all. */ - memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr)); - } - - if (!nvkm_dp_read_dpcd_caps(outp)) { - const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 }; - const u8 *rate; - int rate_max; - - outp->dp.rates = 0; - outp->dp.links = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT; - outp->dp.links = min(outp->dp.links, outp->info.dpconf.link_nr); - if (outp->dp.lttprs && outp->dp.lttpr[4]) - outp->dp.links = min_t(int, outp->dp.links, outp->dp.lttpr[4]); - - rate_max = outp->dp.dpcd[DPCD_RC01_MAX_LINK_RATE]; - rate_max = min(rate_max, outp->info.dpconf.link_bw); - if (outp->dp.lttprs && outp->dp.lttpr[1]) - rate_max = min_t(int, rate_max, outp->dp.lttpr[1]); - - if (!nvkm_dp_enable_supported_link_rates(outp)) { - for (rate = rates; *rate; rate++) { - if (*rate > rate_max) - continue; - - if (WARN_ON(outp->dp.rates == ARRAY_SIZE(outp->dp.rate))) - break; - - outp->dp.rate[outp->dp.rates].dpcd = -1; - outp->dp.rate[outp->dp.rates].rate = *rate * 27000; - outp->dp.rates++; - } - } - } } else if (!auxpwr && outp->dp.aux_pwr) { OUTP_DBG(outp, "aux power -> demand"); nvkm_i2c_aux_monitor(aux, false); outp->dp.aux_pwr = false; - atomic_set(&outp->dp.lt.done, 0); /* Restore eDP panel GPIO to its prior state if we changed it, as * it could potentially interfere with other outputs. @@ -793,6 +603,7 @@ nvkm_dp_fini(struct nvkm_outp *outp) static void nvkm_dp_init(struct nvkm_outp *outp) { + nvkm_outp_init(outp); nvkm_dp_enable(outp, outp->dp.enabled); } @@ -807,9 +618,18 @@ nvkm_dp_func = { .dtor = nvkm_dp_dtor, .init = nvkm_dp_init, .fini = nvkm_dp_fini, - .acquire = nvkm_dp_acquire, + .detect = nvkm_outp_detect, + .inherit = nvkm_outp_inherit, + .acquire = nvkm_outp_acquire, .release = nvkm_dp_release, - .disable = nvkm_dp_disable, + .bl.get = nvkm_outp_bl_get, + .bl.set = nvkm_outp_bl_set, + .dp.aux_pwr = nvkm_dp_aux_pwr, + .dp.aux_xfer = nvkm_dp_aux_xfer, + .dp.train = nvkm_dp_train, + .dp.drive = nvkm_dp_drive, + .dp.mst_id_get = nvkm_dp_mst_id_get, + .dp.mst_id_put = nvkm_dp_mst_id_put, }; int @@ -819,7 +639,7 @@ nvkm_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE, struct n struct nvkm_bios *bios = device->bios; struct nvkm_i2c *i2c = device->i2c; struct nvkm_outp *outp; - u8 hdr, cnt, len; + u8 ver, hdr, cnt, len; u32 data; int ret; @@ -847,7 +667,9 @@ nvkm_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE, struct n OUTP_DBG(outp, "bios dp %02x %02x %02x %02x", outp->dp.version, hdr, cnt, len); + data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len); + outp->dp.mst = data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04); + mutex_init(&outp->dp.mutex); - atomic_set(&outp->dp.lt.done, 0); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c index 23ae451ba4..1be97a68a8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c @@ -124,6 +124,7 @@ g84_sor = { .state = nv50_sor_state, .power = nv50_sor_power, .clock = nv50_sor_clock, + .bl = &nv50_sor_bl, .hdmi = &g84_sor_hdmi, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c index 67ef889a0c..843a2661ce 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c @@ -295,6 +295,7 @@ g94_sor = { .clock = nv50_sor_clock, .war_2 = g94_sor_war_2, .war_3 = g94_sor_war_3, + .bl = &nv50_sor_bl, .hdmi = &g84_sor_hdmi, .dp = &g94_sor_dp, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c index 52099b75f5..ab0a85c924 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c @@ -24,6 +24,7 @@ #include "head.h" #include "ior.h" +#include <subdev/gsp.h> #include <subdev/timer.h> #include <nvif/class.h> @@ -105,6 +106,7 @@ ga102_sor = { .state = gv100_sor_state, .power = nv50_sor_power, .clock = ga102_sor_clock, + .bl = >215_sor_bl, .hdmi = &gv100_sor_hdmi, .dp = &ga102_sor_dp, .hda = &gv100_sor_hda, @@ -146,5 +148,8 @@ int ga102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp) { + if (nvkm_gsp_rm(device->gsp)) + return r535_disp_new(&ga102_disp, device, type, inst, pdisp); + return nvkm_disp_new_(&ga102_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c index a48e9bdf4c..83a1323600 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c @@ -328,6 +328,7 @@ gf119_sor = { .state = gf119_sor_state, .power = nv50_sor_power, .clock = gf119_sor_clock, + .bl = >215_sor_bl, .hdmi = &gf119_sor_hdmi, .dp = &gf119_sor_dp, .hda = &gf119_sor_hda, @@ -1038,7 +1039,6 @@ gf119_disp_super(struct work_struct *work) continue; nv50_disp_super_2_0(disp, head); } - nvkm_outp_route(disp); list_for_each_entry(head, &disp->heads, head) { if (!(mask[head->id] & 0x00010000)) continue; @@ -1154,7 +1154,7 @@ gf119_disp_intr(struct nvkm_disp *disp) } void -gf119_disp_fini(struct nvkm_disp *disp) +gf119_disp_fini(struct nvkm_disp *disp, bool suspend) { struct nvkm_device *device = disp->engine.subdev.device; /* disable all interrupts */ diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c index 876a21a0ce..a3e2fbadad 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c @@ -115,6 +115,7 @@ gk104_sor = { .state = gf119_sor_state, .power = nv50_sor_power, .clock = gf119_sor_clock, + .bl = >215_sor_bl, .hdmi = &gk104_sor_hdmi, .dp = &gf119_sor_dp, .hda = &gf119_sor_hda, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c index b4d8e86861..688e123ad4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c @@ -70,6 +70,7 @@ gm107_sor = { .state = gf119_sor_state, .power = nv50_sor_power, .clock = gf119_sor_clock, + .bl = >215_sor_bl, .hdmi = &gk104_sor_hdmi, .dp = &gm107_sor_dp, .hda = &gf119_sor_hda, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c index 562ebae57d..511e7831b2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c @@ -68,15 +68,23 @@ gm200_sor_dp = { }; void -gm200_sor_hdmi_scdc(struct nvkm_ior *ior, u8 scdc) +gm200_sor_hdmi_scdc(struct nvkm_ior *ior, u32 khz, bool support, bool scrambling, + bool scrambling_low_rates) { struct nvkm_device *device = ior->disp->engine.subdev.device; const u32 soff = nv50_ior_base(ior); - const u32 ctrl = scdc & 0x3; + u32 ctrl = 0; - nvkm_mask(device, 0x61c5bc + soff, 0x00000003, ctrl); + ior->tmds.high_speed = khz > 340000; + + if (support && scrambling) { + if (ior->tmds.high_speed) + ctrl |= 0x00000002; + if (ior->tmds.high_speed || scrambling_low_rates) + ctrl |= 0x00000001; + } - ior->tmds.high_speed = !!(scdc & 0x2); + nvkm_mask(device, 0x61c5bc + soff, 0x00000003, ctrl); } const struct nvkm_ior_func_hdmi @@ -139,6 +147,7 @@ gm200_sor = { .state = gf119_sor_state, .power = nv50_sor_power, .clock = gf119_sor_clock, + .bl = >215_sor_bl, .hdmi = &gm200_sor_hdmi, .dp = &gm200_sor_dp, .hda = &gf119_sor_hda, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c index 7f1eb43320..4070447bd8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c @@ -37,6 +37,7 @@ gp100_sor = { .state = gf119_sor_state, .power = nv50_sor_power, .clock = gf119_sor_clock, + .bl = >215_sor_bl, .hdmi = &gm200_sor_hdmi, .dp = &gm200_sor_dp, .hda = &gf119_sor_hda, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c index 506ffbe7b8..6318721b66 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c @@ -182,11 +182,49 @@ gt215_sor_hdmi = { .infoframe_vsi = gt215_sor_hdmi_infoframe_vsi, }; +static int +gt215_sor_bl_set(struct nvkm_ior *ior, int lvl) +{ + struct nvkm_device *device = ior->disp->engine.subdev.device; + const u32 soff = nv50_ior_base(ior); + u32 div, val; + + div = nvkm_rd32(device, 0x61c080 + soff); + val = (lvl * div) / 100; + if (div) + nvkm_wr32(device, 0x61c084 + soff, 0xc0000000 | val); + + return 0; +} + +static int +gt215_sor_bl_get(struct nvkm_ior *ior) +{ + struct nvkm_device *device = ior->disp->engine.subdev.device; + const u32 soff = nv50_ior_base(ior); + u32 div, val; + + div = nvkm_rd32(device, 0x61c080 + soff); + val = nvkm_rd32(device, 0x61c084 + soff); + val &= 0x00ffffff; + if (div && div >= val) + return ((val * 100) + (div / 2)) / div; + + return 100; +} + +const struct nvkm_ior_func_bl +gt215_sor_bl = { + .get = gt215_sor_bl_get, + .set = gt215_sor_bl_set, +}; + static const struct nvkm_ior_func gt215_sor = { .state = g94_sor_state, .power = nv50_sor_power, .clock = nv50_sor_clock, + .bl = >215_sor_bl, .hdmi = >215_sor_hdmi, .dp = >215_sor_dp, .hda = >215_sor_hda, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c index 4ebc030e40..cfa3698d3a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c @@ -96,7 +96,7 @@ gv100_sor_dp = { .watermark = gv100_sor_dp_watermark, }; -static void +void gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 size) { struct nvkm_device *device = ior->disp->engine.subdev.device; @@ -120,7 +120,7 @@ gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 siz nvkm_mask(device, 0x6f0100 + hoff, 0x00000001, 0x00000001); } -static void +void gv100_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 size) { struct nvkm_device *device = ior->disp->engine.subdev.device; @@ -212,6 +212,7 @@ gv100_sor = { .state = gv100_sor_state, .power = nv50_sor_power, .clock = gf119_sor_clock, + .bl = >215_sor_bl, .hdmi = &gv100_sor_hdmi, .dp = &gv100_sor_dp, .hda = &gv100_sor_hda, @@ -863,7 +864,6 @@ gv100_disp_super(struct work_struct *work) continue; nv50_disp_super_2_0(disp, head); } - nvkm_outp_route(disp); list_for_each_entry(head, &disp->heads, head) { if (!(mask[head->id] & 0x00010000)) continue; @@ -1115,7 +1115,7 @@ gv100_disp_intr(struct nvkm_disp *disp) } void -gv100_disp_fini(struct nvkm_disp *disp) +gv100_disp_fini(struct nvkm_disp *disp, bool suspend) { struct nvkm_device *device = disp->engine.subdev.device; nvkm_wr32(device, 0x611db0, 0x00000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h index da1b1a626e..3ba04bead2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h @@ -63,11 +63,18 @@ struct nvkm_ior_func { void (*war_2)(struct nvkm_ior *); void (*war_3)(struct nvkm_ior *); + const struct nvkm_ior_func_bl { + int (*get)(struct nvkm_ior *); + int (*set)(struct nvkm_ior *, int lvl); + } *bl; + const struct nvkm_ior_func_hdmi { void (*ctrl)(struct nvkm_ior *, int head, bool enable, u8 max_ac_packet, u8 rekey); - void (*scdc)(struct nvkm_ior *, u8 scdc); + void (*scdc)(struct nvkm_ior *, u32 khz, bool support, bool scrambling, + bool scrambling_low_rates); void (*infoframe_avi)(struct nvkm_ior *, int head, void *data, u32 size); void (*infoframe_vsi)(struct nvkm_ior *, int head, void *data, u32 size); + void (*audio)(struct nvkm_ior *, int head, bool enable); } *hdmi; const struct nvkm_ior_func_dp { @@ -77,6 +84,8 @@ struct nvkm_ior_func { void (*pattern)(struct nvkm_ior *, int pattern); void (*drive)(struct nvkm_ior *, int ln, int pc, int dc, int pe, int tx_pu); + int (*sst)(struct nvkm_ior *, int head, bool ef, + u32 watermark, u32 hblanksym, u32 vblanksym); void (*vcpi)(struct nvkm_ior *, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned); void (*audio)(struct nvkm_ior *, int head, bool enable); @@ -122,6 +131,7 @@ int nv50_sor_cnt(struct nvkm_disp *, unsigned long *); void nv50_sor_state(struct nvkm_ior *, struct nvkm_ior_state *); void nv50_sor_power(struct nvkm_ior *, bool, bool, bool, bool, bool); void nv50_sor_clock(struct nvkm_ior *); +extern const struct nvkm_ior_func_bl nv50_sor_bl; int g84_sor_new(struct nvkm_disp *, int); extern const struct nvkm_ior_func_hdmi g84_sor_hdmi; @@ -138,6 +148,7 @@ void g94_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32); void g94_sor_dp_activesym(struct nvkm_ior *, int, u8, u8, u8, u8); void g94_sor_dp_watermark(struct nvkm_ior *, int, u8); +extern const struct nvkm_ior_func_bl gt215_sor_bl; extern const struct nvkm_ior_func_hdmi gt215_sor_hdmi; void gt215_sor_dp_audio(struct nvkm_ior *, int, bool); extern const struct nvkm_ior_func_hda gt215_sor_hda; @@ -167,7 +178,7 @@ void gm107_sor_dp_pattern(struct nvkm_ior *, int); void gm200_sor_route_set(struct nvkm_outp *, struct nvkm_ior *); int gm200_sor_route_get(struct nvkm_outp *, int *); extern const struct nvkm_ior_func_hdmi gm200_sor_hdmi; -void gm200_sor_hdmi_scdc(struct nvkm_ior *, u8); +void gm200_sor_hdmi_scdc(struct nvkm_ior *, u32, bool, bool, bool); extern const struct nvkm_ior_func_dp gm200_sor_dp; void gm200_sor_dp_drive(struct nvkm_ior *, int, int, int, int, int); @@ -176,6 +187,8 @@ int gp100_sor_new(struct nvkm_disp *, int); int gv100_sor_cnt(struct nvkm_disp *, unsigned long *); void gv100_sor_state(struct nvkm_ior *, struct nvkm_ior_state *); extern const struct nvkm_ior_func_hdmi gv100_sor_hdmi; +void gv100_sor_hdmi_infoframe_avi(struct nvkm_ior *, int, void *, u32); +void gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *, int, void *, u32); void gv100_sor_dp_audio(struct nvkm_ior *, int, bool); void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32); void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c index f96ba47526..e0c5fb6df3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c @@ -44,6 +44,7 @@ mcp89_sor = { .state = g94_sor_state, .power = nv50_sor_power, .clock = nv50_sor_clock, + .bl = >215_sor_bl, .hdmi = >215_sor_hdmi, .dp = &mcp89_sor_dp, .hda = >215_sor_hda, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c index be81168029..03a5f88a4b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c @@ -23,7 +23,9 @@ */ #include "priv.h" #include "chan.h" +#include "conn.h" #include "head.h" +#include "dp.h" #include "ior.h" #include "outp.h" @@ -156,6 +158,37 @@ nv50_pior_cnt(struct nvkm_disp *disp, unsigned long *pmask) return 3; } +static int +nv50_sor_bl_set(struct nvkm_ior *ior, int lvl) +{ + struct nvkm_device *device = ior->disp->engine.subdev.device; + const u32 soff = nv50_ior_base(ior); + u32 div = 1025; + u32 val = (lvl * div) / 100; + + nvkm_wr32(device, 0x61c084 + soff, 0x80000000 | val); + return 0; +} + +static int +nv50_sor_bl_get(struct nvkm_ior *ior) +{ + struct nvkm_device *device = ior->disp->engine.subdev.device; + const u32 soff = nv50_ior_base(ior); + u32 div = 1025; + u32 val; + + val = nvkm_rd32(device, 0x61c084 + soff); + val &= 0x000007ff; + return ((val * 100) + (div / 2)) / div; +} + +const struct nvkm_ior_func_bl +nv50_sor_bl = { + .get = nv50_sor_bl_get, + .set = nv50_sor_bl_set, +}; + void nv50_sor_clock(struct nvkm_ior *sor) { @@ -220,6 +253,7 @@ nv50_sor = { .state = nv50_sor_state, .power = nv50_sor_power, .clock = nv50_sor_clock, + .bl = &nv50_sor_bl, }; static int @@ -1254,10 +1288,6 @@ nv50_disp_super_2_2(struct nvkm_disp *disp, struct nvkm_head *head) ior->asy.link = outp->lvds.dual ? 3 : 1; } - /* Handle any link training, etc. */ - if (outp && outp->func->acquire) - outp->func->acquire(outp); - /* Execute OnInt2 IED script. */ nv50_disp_super_ied_on(head, ior, 0, khz); @@ -1287,7 +1317,6 @@ nv50_disp_super_2_1(struct nvkm_disp *disp, struct nvkm_head *head) void nv50_disp_super_2_0(struct nvkm_disp *disp, struct nvkm_head *head) { - struct nvkm_outp *outp; struct nvkm_ior *ior; /* Determine which OR, if any, we're detaching from the head. */ @@ -1298,14 +1327,6 @@ nv50_disp_super_2_0(struct nvkm_disp *disp, struct nvkm_head *head) /* Execute OffInt2 IED script. */ nv50_disp_super_ied_off(head, ior, 2); - - /* If we're shutting down the OR's only active head, execute - * the output path's disable function. - */ - if (ior->arm.head == (1 << head->id)) { - if ((outp = ior->arm.outp) && outp->func->disable) - outp->func->disable(outp, ior); - } } void @@ -1371,7 +1392,6 @@ nv50_disp_super(struct work_struct *work) continue; nv50_disp_super_2_0(disp, head); } - nvkm_outp_route(disp); list_for_each_entry(head, &disp->heads, head) { if (!(super & (0x00000200 << head->id))) continue; @@ -1484,7 +1504,7 @@ nv50_disp_intr(struct nvkm_disp *disp) } void -nv50_disp_fini(struct nvkm_disp *disp) +nv50_disp_fini(struct nvkm_disp *disp, bool suspend) { struct nvkm_device *device = disp->engine.subdev.device; /* disable all interrupts */ @@ -1563,7 +1583,15 @@ nv50_disp_oneinit(struct nvkm_disp *disp) const struct nvkm_disp_func *func = disp->func; struct nvkm_subdev *subdev = &disp->engine.subdev; struct nvkm_device *device = subdev->device; + struct nvkm_bios *bios = device->bios; + struct nvkm_outp *outp, *outt, *pair; + struct nvkm_conn *conn; + struct nvkm_ior *ior; int ret, i; + u8 ver, hdr; + u32 data; + struct dcb_output dcbE; + struct nvbios_connE connE; if (func->wndw.cnt) { disp->wndw.nr = func->wndw.cnt(disp, &disp->wndw.mask); @@ -1610,8 +1638,130 @@ nv50_disp_oneinit(struct nvkm_disp *disp) if (ret) return ret; - return nvkm_ramht_new(device, func->ramht_size ? func->ramht_size : - 0x1000, 0, disp->inst, &disp->ramht); + ret = nvkm_ramht_new(device, func->ramht_size ? func->ramht_size : 0x1000, 0, disp->inst, + &disp->ramht); + if (ret) + return ret; + + /* Create output path objects for each VBIOS display path. */ + i = -1; + while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) { + if (WARN_ON((ver & 0xf0) != 0x40)) + return -EINVAL; + if (dcbE.type == DCB_OUTPUT_UNUSED) + continue; + if (dcbE.type == DCB_OUTPUT_EOL) + break; + outp = NULL; + + switch (dcbE.type) { + case DCB_OUTPUT_ANALOG: + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_LVDS: + ret = nvkm_outp_new(disp, i, &dcbE, &outp); + break; + case DCB_OUTPUT_DP: + ret = nvkm_dp_new(disp, i, &dcbE, &outp); + break; + case DCB_OUTPUT_TV: + case DCB_OUTPUT_WFD: + /* No support for WFD yet. */ + ret = -ENODEV; + continue; + default: + nvkm_warn(subdev, "dcb %d type %d unknown\n", + i, dcbE.type); + continue; + } + + if (ret) { + if (outp) { + if (ret != -ENODEV) + OUTP_ERR(outp, "ctor failed: %d", ret); + else + OUTP_DBG(outp, "not supported"); + nvkm_outp_del(&outp); + continue; + } + nvkm_error(subdev, "failed to create outp %d\n", i); + continue; + } + + list_add_tail(&outp->head, &disp->outps); + } + + /* Create connector objects based on available output paths. */ + list_for_each_entry_safe(outp, outt, &disp->outps, head) { + /* VBIOS data *should* give us the most useful information. */ + data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, + &connE); + + /* No bios connector data... */ + if (!data) { + /* Heuristic: anything with the same ccb index is + * considered to be on the same connector, any + * output path without an associated ccb entry will + * be put on its own connector. + */ + int ccb_index = outp->info.i2c_index; + if (ccb_index != 0xf) { + list_for_each_entry(pair, &disp->outps, head) { + if (pair->info.i2c_index == ccb_index) { + outp->conn = pair->conn; + break; + } + } + } + + /* Connector shared with another output path. */ + if (outp->conn) + continue; + + memset(&connE, 0x00, sizeof(connE)); + connE.type = DCB_CONNECTOR_NONE; + i = -1; + } else { + i = outp->info.connector; + } + + /* Check that we haven't already created this connector. */ + list_for_each_entry(conn, &disp->conns, head) { + if (conn->index == outp->info.connector) { + outp->conn = conn; + break; + } + } + + if (outp->conn) + continue; + + /* Apparently we need to create a new one! */ + ret = nvkm_conn_new(disp, i, &connE, &outp->conn); + if (ret) { + nvkm_error(subdev, "failed to create outp %d conn: %d\n", outp->index, ret); + nvkm_conn_del(&outp->conn); + list_del(&outp->head); + nvkm_outp_del(&outp); + continue; + } + + list_add_tail(&outp->conn->head, &disp->conns); + } + + /* Enforce identity-mapped SOR assignment for panels, which have + * certain bits (ie. backlight controls) wired to a specific SOR. + */ + list_for_each_entry(outp, &disp->outps, head) { + if (outp->conn->info.type == DCB_CONNECTOR_LVDS || + outp->conn->info.type == DCB_CONNECTOR_eDP) { + ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1); + if (!WARN_ON(!ior)) + ior->identity = true; + outp->identity = true; + } + } + + return 0; } static const struct nvkm_disp_func diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index 6094805fbd..28adc5a30f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c @@ -22,14 +22,16 @@ * Authors: Ben Skeggs */ #include "outp.h" +#include "conn.h" #include "dp.h" #include "ior.h" #include <subdev/bios.h> #include <subdev/bios/dcb.h> +#include <subdev/gpio.h> #include <subdev/i2c.h> -void +static void nvkm_outp_route(struct nvkm_disp *disp) { struct nvkm_outp *outp; @@ -46,8 +48,8 @@ nvkm_outp_route(struct nvkm_disp *disp) list_for_each_entry(ior, &disp->iors, head) { if ((outp = ior->asy.outp)) { - OUTP_DBG(outp, "acquire %s", ior->name); if (ior->asy.outp != ior->arm.outp) { + OUTP_DBG(outp, "acquire %s", ior->name); if (ior->func->route.set) ior->func->route.set(outp, ior); ior->arm.outp = ior->asy.outp; @@ -87,22 +89,20 @@ nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type) } void -nvkm_outp_release(struct nvkm_outp *outp, u8 user) +nvkm_outp_release_or(struct nvkm_outp *outp, u8 user) { struct nvkm_ior *ior = outp->ior; OUTP_TRACE(outp, "release %02x &= %02x %p", outp->acquired, ~user, ior); if (ior) { outp->acquired &= ~user; if (!outp->acquired) { - if (outp->func->release && outp->ior) - outp->func->release(outp); outp->ior->asy.outp = NULL; outp->ior = NULL; } } } -static inline int +int nvkm_outp_acquire_ior(struct nvkm_outp *outp, u8 user, struct nvkm_ior *ior) { outp->ior = ior; @@ -140,7 +140,7 @@ nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type, } int -nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda) +nvkm_outp_acquire_or(struct nvkm_outp *outp, u8 user, bool hda) { struct nvkm_ior *ior = outp->ior; enum nvkm_ior_proto proto; @@ -207,39 +207,110 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda) return nvkm_outp_acquire_hda(outp, type, user, false); } +int +nvkm_outp_bl_set(struct nvkm_outp *outp, int level) +{ + int ret; + + ret = nvkm_outp_acquire_or(outp, NVKM_OUTP_PRIV, false); + if (ret) + return ret; + + if (outp->ior->func->bl) + ret = outp->ior->func->bl->set(outp->ior, level); + else + ret = -EINVAL; + + nvkm_outp_release_or(outp, NVKM_OUTP_PRIV); + return ret; +} + +int +nvkm_outp_bl_get(struct nvkm_outp *outp) +{ + int ret; + + ret = nvkm_outp_acquire_or(outp, NVKM_OUTP_PRIV, false); + if (ret) + return ret; + + if (outp->ior->func->bl) + ret = outp->ior->func->bl->get(outp->ior); + else + ret = -EINVAL; + + nvkm_outp_release_or(outp, NVKM_OUTP_PRIV); + return ret; +} + +int +nvkm_outp_detect(struct nvkm_outp *outp) +{ + struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio; + int ret = -EINVAL; + + if (outp->conn->info.hpd != DCB_GPIO_UNUSED) { + ret = nvkm_gpio_get(gpio, 0, DCB_GPIO_UNUSED, outp->conn->info.hpd); + if (ret < 0) + return ret; + if (ret) + return 1; + + /*TODO: Look into returning NOT_PRESENT if !HPD on DVI/HDMI. + * + * It's uncertain whether this is accurate for all older chipsets, + * so we're returning UNKNOWN, and the DRM will probe DDC instead. + */ + if (outp->info.type == DCB_OUTPUT_DP) + return 0; + } + + return ret; +} + void -nvkm_outp_fini(struct nvkm_outp *outp) +nvkm_outp_release(struct nvkm_outp *outp) { - if (outp->func->fini) - outp->func->fini(outp); + nvkm_outp_release_or(outp, NVKM_OUTP_USER); + nvkm_outp_route(outp->disp); } -static void -nvkm_outp_init_route(struct nvkm_outp *outp) +int +nvkm_outp_acquire(struct nvkm_outp *outp, bool hda) +{ + int ret = nvkm_outp_acquire_or(outp, NVKM_OUTP_USER, hda); + + if (ret) + return ret; + + nvkm_outp_route(outp->disp); + return 0; +} + +struct nvkm_ior * +nvkm_outp_inherit(struct nvkm_outp *outp) { struct nvkm_disp *disp = outp->disp; + struct nvkm_ior *ior; enum nvkm_ior_proto proto; enum nvkm_ior_type type; - struct nvkm_ior *ior; int id, link; /* Find any OR from the class that is able to support this device. */ proto = nvkm_outp_xlat(outp, &type); if (proto == UNKNOWN) - return; + return NULL; ior = nvkm_ior_find(disp, type, -1); - if (!ior) { - WARN_ON(1); - return; - } + if (WARN_ON(!ior)) + return NULL; /* Determine the specific OR, if any, this device is attached to. */ if (ior->func->route.get) { id = ior->func->route.get(outp, &link); if (id < 0) { OUTP_DBG(outp, "no route"); - return; + return NULL; } } else { /* Prior to DCB 4.1, this is hardwired like so. */ @@ -248,10 +319,24 @@ nvkm_outp_init_route(struct nvkm_outp *outp) } ior = nvkm_ior_find(disp, type, id); - if (!ior) { - WARN_ON(1); + if (WARN_ON(!ior)) + return NULL; + + return ior; +} + +void +nvkm_outp_init(struct nvkm_outp *outp) +{ + enum nvkm_ior_proto proto; + enum nvkm_ior_type type; + struct nvkm_ior *ior; + + /* Find any OR from the class that is able to support this device. */ + proto = nvkm_outp_xlat(outp, &type); + ior = outp->func->inherit(outp); + if (!ior) return; - } /* Determine if the OR is already configured for this device. */ ior->func->state(ior, &ior->arm); @@ -274,14 +359,6 @@ nvkm_outp_init_route(struct nvkm_outp *outp) } void -nvkm_outp_init(struct nvkm_outp *outp) -{ - nvkm_outp_init_route(outp); - if (outp->func->init) - outp->func->init(outp); -} - -void nvkm_outp_del(struct nvkm_outp **poutp) { struct nvkm_outp *outp = *poutp; @@ -309,7 +386,8 @@ nvkm_outp_new_(const struct nvkm_outp_func *func, struct nvkm_disp *disp, outp->disp = disp; outp->index = index; outp->info = *dcbE; - outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index); + if (!disp->rm.client.gsp) + outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index); OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x " "edid %x bus %d head %x", @@ -328,6 +406,13 @@ nvkm_outp_new_(const struct nvkm_outp_func *func, struct nvkm_disp *disp, static const struct nvkm_outp_func nvkm_outp = { + .init = nvkm_outp_init, + .detect = nvkm_outp_detect, + .inherit = nvkm_outp_inherit, + .acquire = nvkm_outp_acquire, + .release = nvkm_outp_release, + .bl.get = nvkm_outp_bl_get, + .bl.set = nvkm_outp_bl_set, }; int diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h index 4e7f873f66..ebd2f499b4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h @@ -35,6 +35,8 @@ struct nvkm_outp { struct { struct nvbios_dpout info; u8 version; + bool mst; + bool increased_wm; struct nvkm_i2c_aux *aux; @@ -50,14 +52,13 @@ struct nvkm_outp { u32 rate; } rate[8]; int rates; - int links; struct mutex mutex; struct { - atomic_t done; u8 nr; u8 bw; bool mst; + bool post_adj; } lt; } dp; }; @@ -74,17 +75,45 @@ int nvkm_outp_new(struct nvkm_disp *, int index, struct dcb_output *, struct nvk void nvkm_outp_del(struct nvkm_outp **); void nvkm_outp_init(struct nvkm_outp *); void nvkm_outp_fini(struct nvkm_outp *); -int nvkm_outp_acquire(struct nvkm_outp *, u8 user, bool hda); -void nvkm_outp_release(struct nvkm_outp *, u8 user); -void nvkm_outp_route(struct nvkm_disp *); + +int nvkm_outp_detect(struct nvkm_outp *); + +struct nvkm_ior *nvkm_outp_inherit(struct nvkm_outp *); +int nvkm_outp_acquire(struct nvkm_outp *, bool hda); +int nvkm_outp_acquire_or(struct nvkm_outp *, u8 user, bool hda); +int nvkm_outp_acquire_ior(struct nvkm_outp *, u8 user, struct nvkm_ior *); +void nvkm_outp_release(struct nvkm_outp *); +void nvkm_outp_release_or(struct nvkm_outp *, u8 user); + +int nvkm_outp_bl_get(struct nvkm_outp *); +int nvkm_outp_bl_set(struct nvkm_outp *, int level); struct nvkm_outp_func { void *(*dtor)(struct nvkm_outp *); void (*init)(struct nvkm_outp *); void (*fini)(struct nvkm_outp *); - int (*acquire)(struct nvkm_outp *); + + int (*detect)(struct nvkm_outp *); + int (*edid_get)(struct nvkm_outp *, u8 *data, u16 *size); + + struct nvkm_ior *(*inherit)(struct nvkm_outp *); + int (*acquire)(struct nvkm_outp *, bool hda); void (*release)(struct nvkm_outp *); - void (*disable)(struct nvkm_outp *, struct nvkm_ior *); + + struct { + int (*get)(struct nvkm_outp *); + int (*set)(struct nvkm_outp *, int level); + } bl; + + struct { + int (*aux_pwr)(struct nvkm_outp *, bool pu); + int (*aux_xfer)(struct nvkm_outp *, u8 type, u32 addr, u8 *data, u8 *size); + int (*rates)(struct nvkm_outp *); + int (*train)(struct nvkm_outp *, bool retrain); + int (*drive)(struct nvkm_outp *, u8 lanes, u8 pe[4], u8 vs[4]); + int (*mst_id_get)(struct nvkm_outp *, u32 *id); + int (*mst_id_put)(struct nvkm_outp *, u32 id); + } dp; }; #define OUTP_MSG(o,l,f,a...) do { \ diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h index ec5292a8f3..a3fd7cb7c4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h @@ -8,6 +8,9 @@ struct nvkm_head; struct nvkm_outp; struct dcb_output; +int r535_disp_new(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_disp **); + int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_disp *); int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int, @@ -15,9 +18,10 @@ int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, enum nvk void nvkm_disp_vblank(struct nvkm_disp *, int head); struct nvkm_disp_func { + void (*dtor)(struct nvkm_disp *); int (*oneinit)(struct nvkm_disp *); int (*init)(struct nvkm_disp *); - void (*fini)(struct nvkm_disp *); + void (*fini)(struct nvkm_disp *, bool suspend); void (*intr)(struct nvkm_disp *); void (*intr_error)(struct nvkm_disp *, int chid); @@ -32,7 +36,7 @@ struct nvkm_disp_func { u16 ramht_size; - const struct nvkm_sclass root; + struct nvkm_sclass root; struct nvkm_disp_user { struct nvkm_sclass base; @@ -44,7 +48,7 @@ struct nvkm_disp_func { int nv50_disp_oneinit(struct nvkm_disp *); int nv50_disp_init(struct nvkm_disp *); -void nv50_disp_fini(struct nvkm_disp *); +void nv50_disp_fini(struct nvkm_disp *, bool suspend); void nv50_disp_intr(struct nvkm_disp *); extern const struct nvkm_enum nv50_disp_intr_error_type[]; void nv50_disp_super(struct work_struct *); @@ -56,12 +60,12 @@ void nv50_disp_super_2_2(struct nvkm_disp *, struct nvkm_head *); void nv50_disp_super_3_0(struct nvkm_disp *, struct nvkm_head *); int gf119_disp_init(struct nvkm_disp *); -void gf119_disp_fini(struct nvkm_disp *); +void gf119_disp_fini(struct nvkm_disp *, bool suspend); void gf119_disp_intr(struct nvkm_disp *); void gf119_disp_super(struct work_struct *); void gf119_disp_intr_error(struct nvkm_disp *, int); -void gv100_disp_fini(struct nvkm_disp *); +void gv100_disp_fini(struct nvkm_disp *, bool suspend); void gv100_disp_intr(struct nvkm_disp *); void gv100_disp_super(struct work_struct *); int gv100_disp_wndw_cnt(struct nvkm_disp *, unsigned long *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c new file mode 100644 index 0000000000..6a0a4d3b89 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c @@ -0,0 +1,1714 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" +#include "chan.h" +#include "conn.h" +#include "dp.h" +#include "head.h" +#include "ior.h" +#include "outp.h" + +#include <core/ramht.h> +#include <subdev/bios.h> +#include <subdev/bios/conn.h> +#include <subdev/gsp.h> +#include <subdev/mmu.h> +#include <subdev/vfn.h> + +#include <nvhw/drf.h> + +#include <nvrm/nvtypes.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> +#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h> +#include <nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h> +#include <nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h> + +#include <linux/acpi.h> + +static u64 +r535_chan_user(struct nvkm_disp_chan *chan, u64 *psize) +{ + switch (chan->object.oclass & 0xff) { + case 0x7d: *psize = 0x10000; return 0x680000; + case 0x7e: *psize = 0x01000; return 0x690000 + (chan->head * *psize); + case 0x7b: *psize = 0x01000; return 0x6b0000 + (chan->head * *psize); + case 0x7a: *psize = 0x01000; return 0x6d8000 + (chan->head * *psize); + default: + BUG_ON(1); + break; + } + + return 0ULL; +} + +static void +r535_chan_intr(struct nvkm_disp_chan *chan, bool en) +{ +} + +static void +r535_chan_fini(struct nvkm_disp_chan *chan) +{ + nvkm_gsp_rm_free(&chan->rm.object); +} + +static int +r535_chan_push(struct nvkm_disp_chan *chan) +{ + struct nvkm_gsp *gsp = chan->disp->engine.subdev.device->gsp; + NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + if (chan->memory) { + switch (nvkm_memory_target(chan->memory)) { + case NVKM_MEM_TARGET_NCOH: + ctrl->addressSpace = ADDR_SYSMEM; + ctrl->cacheSnoop = 0; + break; + case NVKM_MEM_TARGET_HOST: + ctrl->addressSpace = ADDR_SYSMEM; + ctrl->cacheSnoop = 1; + break; + case NVKM_MEM_TARGET_VRAM: + ctrl->addressSpace = ADDR_FBMEM; + break; + default: + WARN_ON(1); + return -EINVAL; + } + + ctrl->physicalAddr = nvkm_memory_addr(chan->memory); + ctrl->limit = nvkm_memory_size(chan->memory) - 1; + } + + ctrl->hclass = chan->object.oclass; + ctrl->channelInstance = chan->head; + ctrl->valid = ((chan->object.oclass & 0xff) != 0x7a) ? 1 : 0; + + return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); +} + +static int +r535_curs_init(struct nvkm_disp_chan *chan) +{ + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args; + int ret; + + ret = r535_chan_push(chan); + if (ret) + return ret; + + args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object, + (chan->object.oclass << 16) | chan->head, + chan->object.oclass, sizeof(*args), &chan->rm.object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->channelInstance = chan->head; + + return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args); +} + +static const struct nvkm_disp_chan_func +r535_curs_func = { + .init = r535_curs_init, + .fini = r535_chan_fini, + .intr = r535_chan_intr, + .user = r535_chan_user, +}; + +static const struct nvkm_disp_chan_user +r535_curs = { + .func = &r535_curs_func, + .user = 73, +}; + +static int +r535_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle) +{ + return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle, + chan->chid.user << 25 | + (chan->disp->rm.client.object.handle & 0x3fff)); +} + +static void +r535_dmac_fini(struct nvkm_disp_chan *chan) +{ + struct nvkm_device *device = chan->disp->engine.subdev.device; + const u32 uoff = (chan->chid.user - 1) * 0x1000; + + chan->suspend_put = nvkm_rd32(device, 0x690000 + uoff); + r535_chan_fini(chan); +} + +static int +r535_dmac_init(struct nvkm_disp_chan *chan) +{ + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args; + int ret; + + ret = r535_chan_push(chan); + if (ret) + return ret; + + args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object, + (chan->object.oclass << 16) | chan->head, + chan->object.oclass, sizeof(*args), &chan->rm.object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->channelInstance = chan->head; + args->offset = chan->suspend_put; + + return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args); +} + +static int +r535_dmac_push(struct nvkm_disp_chan *chan, u64 memory) +{ + chan->memory = nvkm_umem_search(chan->object.client, memory); + if (IS_ERR(chan->memory)) + return PTR_ERR(chan->memory); + + return 0; +} + +static const struct nvkm_disp_chan_func +r535_dmac_func = { + .push = r535_dmac_push, + .init = r535_dmac_init, + .fini = r535_dmac_fini, + .intr = r535_chan_intr, + .user = r535_chan_user, + .bind = r535_dmac_bind, +}; + +static const struct nvkm_disp_chan_func +r535_wimm_func = { + .push = r535_dmac_push, + .init = r535_dmac_init, + .fini = r535_dmac_fini, + .intr = r535_chan_intr, + .user = r535_chan_user, +}; + +static const struct nvkm_disp_chan_user +r535_wimm = { + .func = &r535_wimm_func, + .user = 33, +}; + +static const struct nvkm_disp_chan_user +r535_wndw = { + .func = &r535_dmac_func, + .user = 1, +}; + +static void +r535_core_fini(struct nvkm_disp_chan *chan) +{ + struct nvkm_device *device = chan->disp->engine.subdev.device; + + chan->suspend_put = nvkm_rd32(device, 0x680000); + r535_chan_fini(chan); +} + +static const struct nvkm_disp_chan_func +r535_core_func = { + .push = r535_dmac_push, + .init = r535_dmac_init, + .fini = r535_core_fini, + .intr = r535_chan_intr, + .user = r535_chan_user, + .bind = r535_dmac_bind, +}; + +static const struct nvkm_disp_chan_user +r535_core = { + .func = &r535_core_func, + .user = 0, +}; + +static int +r535_sor_bl_set(struct nvkm_ior *sor, int lvl) +{ + struct nvkm_disp *disp = sor->disp; + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->displayId = BIT(sor->asy.outp->index); + ctrl->brightness = lvl; + + return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); +} + +static int +r535_sor_bl_get(struct nvkm_ior *sor) +{ + struct nvkm_disp *disp = sor->disp; + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl; + int ret, lvl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->displayId = BIT(sor->asy.outp->index); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + lvl = ctrl->brightness; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return lvl; +} + +static const struct nvkm_ior_func_bl +r535_sor_bl = { + .get = r535_sor_bl_get, + .set = r535_sor_bl_set, +}; + +static void +r535_sor_hda_eld(struct nvkm_ior *sor, int head, u8 *data, u8 size) +{ + struct nvkm_disp *disp = sor->disp; + NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl; + + if (WARN_ON(size > sizeof(ctrl->bufferELD))) + return; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(sor->asy.outp->index); + ctrl->numELDSize = size; + memcpy(ctrl->bufferELD, data, size); + ctrl->maxFreqSupported = 0; //XXX + ctrl->ctrl = NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, PD, TRUE); + ctrl->ctrl |= NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, ELDV, TRUE); + ctrl->deviceEntry = head; + + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static void +r535_sor_hda_hpd(struct nvkm_ior *sor, int head, bool present) +{ + struct nvkm_disp *disp = sor->disp; + NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl; + + if (present) + return; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(sor->asy.outp->index); + ctrl->deviceEntry = head; + + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static const struct nvkm_ior_func_hda +r535_sor_hda = { + .hpd = r535_sor_hda_hpd, + .eld = r535_sor_hda_eld, +}; + +static void +r535_sor_dp_audio_mute(struct nvkm_ior *sor, bool mute) +{ + struct nvkm_disp *disp = sor->disp; + NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(sor->asy.outp->index); + ctrl->mute = mute; + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static void +r535_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable) +{ + struct nvkm_disp *disp = sor->disp; + NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *ctrl; + + if (!enable) + r535_sor_dp_audio_mute(sor, true); + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(sor->asy.outp->index); + ctrl->enable = enable; + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); + + if (enable) + r535_sor_dp_audio_mute(sor, false); +} + +static void +r535_sor_dp_vcpi(struct nvkm_ior *sor, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned_pbn) +{ + struct nvkm_disp *disp = sor->disp; + struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->subDeviceInstance = 0; + ctrl->head = head; + ctrl->sorIndex = sor->id; + ctrl->dpLink = sor->asy.link == 2; + ctrl->bEnableOverride = 1; + ctrl->bMST = 1; + ctrl->hBlankSym = 0; + ctrl->vBlankSym = 0; + ctrl->colorFormat = 0; + ctrl->bEnableTwoHeadOneOr = 0; + ctrl->singleHeadMultistreamMode = 0; + ctrl->MST.slotStart = slot; + ctrl->MST.slotEnd = slot + slot_nr - 1; + ctrl->MST.PBN = pbn; + ctrl->MST.Timeslice = aligned_pbn; + ctrl->MST.sendACT = 0; + ctrl->MST.singleHeadMSTPipeline = 0; + ctrl->MST.bEnableAudioOverRightPanel = 0; + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static int +r535_sor_dp_sst(struct nvkm_ior *sor, int head, bool ef, + u32 watermark, u32 hblanksym, u32 vblanksym) +{ + struct nvkm_disp *disp = sor->disp; + struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->head = head; + ctrl->sorIndex = sor->id; + ctrl->dpLink = sor->asy.link == 2; + ctrl->bEnableOverride = 1; + ctrl->bMST = 0; + ctrl->hBlankSym = hblanksym; + ctrl->vBlankSym = vblanksym; + ctrl->colorFormat = 0; + ctrl->bEnableTwoHeadOneOr = 0; + ctrl->SST.bEnhancedFraming = ef; + ctrl->SST.tuSize = 64; + ctrl->SST.waterMark = watermark; + ctrl->SST.bEnableAudioOverRightPanel = 0; + return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); +} + +static const struct nvkm_ior_func_dp +r535_sor_dp = { + .sst = r535_sor_dp_sst, + .vcpi = r535_sor_dp_vcpi, + .audio = r535_sor_dp_audio, +}; + +static void +r535_sor_hdmi_scdc(struct nvkm_ior *sor, u32 khz, bool support, bool scrambling, + bool scrambling_low_rates) +{ + struct nvkm_outp *outp = sor->asy.outp; + struct nvkm_disp *disp = outp->disp; + NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(outp->index); + ctrl->caps = 0; + if (support) + ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, SCDC_SUPPORTED, TRUE); + if (scrambling) + ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, GT_340MHZ_CLOCK_SUPPORTED, TRUE); + if (scrambling_low_rates) + ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, LTE_340MHZ_SCRAMBLING_SUPPORTED, TRUE); + + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static void +r535_sor_hdmi_ctrl_audio_mute(struct nvkm_outp *outp, bool mute) +{ + struct nvkm_disp *disp = outp->disp; + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(outp->index); + ctrl->mute = mute; + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static void +r535_sor_hdmi_ctrl_audio(struct nvkm_outp *outp, bool enable) +{ + struct nvkm_disp *disp = outp->disp; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(outp->index); + ctrl->transmitControl = + NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ENABLE, YES) | + NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, OTHER_FRAME, DISABLE) | + NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, SINGLE_FRAME, DISABLE) | + NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ON_HBLANK, DISABLE) | + NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, VIDEO_FMT, SW_CONTROLLED) | + NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, RESERVED_LEGACY_MODE, NO); + ctrl->packetSize = 10; + ctrl->aPacket[0] = 0x03; + ctrl->aPacket[1] = 0x00; + ctrl->aPacket[2] = 0x00; + ctrl->aPacket[3] = enable ? 0x10 : 0x01; + ctrl->aPacket[4] = 0x00; + ctrl->aPacket[5] = 0x00; + ctrl->aPacket[6] = 0x00; + ctrl->aPacket[7] = 0x00; + ctrl->aPacket[8] = 0x00; + ctrl->aPacket[9] = 0x00; + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static void +r535_sor_hdmi_audio(struct nvkm_ior *sor, int head, bool enable) +{ + struct nvkm_device *device = sor->disp->engine.subdev.device; + const u32 hdmi = head * 0x400; + + r535_sor_hdmi_ctrl_audio(sor->asy.outp, enable); + r535_sor_hdmi_ctrl_audio_mute(sor->asy.outp, !enable); + + /* General Control (GCP). */ + nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000); + nvkm_wr32(device, 0x6f00cc + hdmi, !enable ? 0x00000001 : 0x00000010); + nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000001); +} + +static void +r535_sor_hdmi_ctrl(struct nvkm_ior *sor, int head, bool enable, u8 max_ac_packet, u8 rekey) +{ + struct nvkm_disp *disp = sor->disp; + NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *ctrl; + + if (!enable) + return; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(sor->asy.outp->index); + ctrl->enable = enable; + + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static const struct nvkm_ior_func_hdmi +r535_sor_hdmi = { + .ctrl = r535_sor_hdmi_ctrl, + .scdc = r535_sor_hdmi_scdc, + /*TODO: SF_USER -> KMS. */ + .infoframe_avi = gv100_sor_hdmi_infoframe_avi, + .infoframe_vsi = gv100_sor_hdmi_infoframe_vsi, + .audio = r535_sor_hdmi_audio, +}; + +static const struct nvkm_ior_func +r535_sor = { + .hdmi = &r535_sor_hdmi, + .dp = &r535_sor_dp, + .hda = &r535_sor_hda, + .bl = &r535_sor_bl, +}; + +static int +r535_sor_new(struct nvkm_disp *disp, int id) +{ + return nvkm_ior_new_(&r535_sor, disp, SOR, id, true/*XXX: hda cap*/); +} + +static int +r535_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask) +{ + *pmask = 0xf; + return 4; +} + +static void +r535_head_vblank_put(struct nvkm_head *head) +{ + struct nvkm_device *device = head->disp->engine.subdev.device; + + nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000000); +} + +static void +r535_head_vblank_get(struct nvkm_head *head) +{ + struct nvkm_device *device = head->disp->engine.subdev.device; + + nvkm_wr32(device, 0x611800 + (head->id * 4), 0x00000002); + nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000002); +} + +static void +r535_head_state(struct nvkm_head *head, struct nvkm_head_state *state) +{ +} + +static const struct nvkm_head_func +r535_head = { + .state = r535_head_state, + .vblank_get = r535_head_vblank_get, + .vblank_put = r535_head_vblank_put, +}; + +static struct nvkm_conn * +r535_conn_new(struct nvkm_disp *disp, u32 id) +{ + NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *ctrl; + struct nvbios_connE dcbE = {}; + struct nvkm_conn *conn; + int ret, index; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return (void *)ctrl; + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(id); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ERR_PTR(ret); + } + + list_for_each_entry(conn, &disp->conns, head) { + if (conn->index == ctrl->data[0].index) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return conn; + } + } + + dcbE.type = ctrl->data[0].type; + index = ctrl->data[0].index; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + + ret = nvkm_conn_new(disp, index, &dcbE, &conn); + if (ret) + return ERR_PTR(ret); + + list_add_tail(&conn->head, &disp->conns); + return conn; +} + +static void +r535_outp_release(struct nvkm_outp *outp) +{ + outp->disp->rm.assigned_sors &= ~BIT(outp->ior->id); + outp->ior->asy.outp = NULL; + outp->ior = NULL; +} + +static int +r535_outp_acquire(struct nvkm_outp *outp, bool hda) +{ + struct nvkm_disp *disp = outp->disp; + struct nvkm_ior *ior; + NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *ctrl; + int ret, or; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DFP_ASSIGN_SOR, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(outp->index); + ctrl->sorExcludeMask = disp->rm.assigned_sors; + if (hda) + ctrl->flags |= NVDEF(NV0073_CTRL, DFP_ASSIGN_SOR_FLAGS, AUDIO, OPTIMAL); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + for (or = 0; or < ARRAY_SIZE(ctrl->sorAssignListWithTag); or++) { + if (ctrl->sorAssignListWithTag[or].displayMask & BIT(outp->index)) { + disp->rm.assigned_sors |= BIT(or); + break; + } + } + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + + if (WARN_ON(or == ARRAY_SIZE(ctrl->sorAssignListWithTag))) + return -EINVAL; + + ior = nvkm_ior_find(disp, SOR, or); + if (WARN_ON(!ior)) + return -EINVAL; + + nvkm_outp_acquire_ior(outp, NVKM_OUTP_USER, ior); + return 0; +} + +static int +r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid) +{ + NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->head = head; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + *displayid = ctrl->displayId; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + +static struct nvkm_ior * +r535_outp_inherit(struct nvkm_outp *outp) +{ + struct nvkm_disp *disp = outp->disp; + struct nvkm_head *head; + u32 displayid; + int ret; + + list_for_each_entry(head, &disp->heads, head) { + ret = r535_disp_head_displayid(disp, head->id, &displayid); + if (WARN_ON(ret)) + return NULL; + + if (displayid == BIT(outp->index)) { + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl; + u32 id, proto; + struct nvkm_ior *ior; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return NULL; + + ctrl->subDeviceInstance = 0; + ctrl->displayId = displayid; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return NULL; + } + + id = ctrl->index; + proto = ctrl->protocol; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + + ior = nvkm_ior_find(disp, SOR, id); + if (WARN_ON(!ior)) + return NULL; + + switch (proto) { + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A: + ior->arm.proto = TMDS; + ior->arm.link = 1; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B: + ior->arm.proto = TMDS; + ior->arm.link = 2; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS: + ior->arm.proto = TMDS; + ior->arm.link = 3; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: + ior->arm.proto = DP; + ior->arm.link = 1; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: + ior->arm.proto = DP; + ior->arm.link = 2; + break; + default: + WARN_ON(1); + return NULL; + } + + ior->arm.proto_evo = proto; + ior->arm.head = BIT(head->id); + disp->rm.assigned_sors |= BIT(ior->id); + return ior; + } + } + + return NULL; +} + +static int +r535_outp_dfp_get_info(struct nvkm_outp *outp) +{ + NV0073_CTRL_DFP_GET_INFO_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DFP_GET_INFO, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->displayId = BIT(outp->index); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + nvkm_debug(&disp->engine.subdev, "DFP %08x: flags:%08x flags2:%08x\n", + ctrl->displayId, ctrl->flags, ctrl->flags2); + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + +static int +r535_outp_detect(struct nvkm_outp *outp) +{ + NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayMask = BIT(outp->index); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + if (ctrl->displayMask & BIT(outp->index)) { + ret = r535_outp_dfp_get_info(outp); + if (ret == 0) + ret = 1; + } else { + ret = 0; + } + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; +} + +static int +r535_dp_mst_id_put(struct nvkm_outp *outp, u32 id) +{ + NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = id; + return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); +} + +static int +r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid) +{ + NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(outp->index); + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + *pid = ctrl->displayIdAssigned; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + +static int +r535_dp_drive(struct nvkm_outp *outp, u8 lanes, u8 pe[4], u8 vs[4]) +{ + NV0073_CTRL_DP_LANE_DATA_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_SET_LANE_DATA, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->displayId = BIT(outp->index); + ctrl->numLanes = lanes; + for (int i = 0; i < lanes; i++) + ctrl->data[i] = NVVAL(NV0073_CTRL, DP_LANE_DATA, PREEMPHASIS, pe[i]) | + NVVAL(NV0073_CTRL, DP_LANE_DATA, DRIVECURRENT, vs[i]); + + return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); +} + +static int +r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8 link_bw) +{ + struct nvkm_disp *disp = outp->disp; + NV0073_CTRL_DP_CTRL_PARAMS *ctrl; + int ret, retries; + u32 cmd, data; + + cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) | + NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) | + NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES); + data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) | + NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) | + NVVAL(NV0073_CTRL, DP_DATA, TARGET, target); + + if (mst) + cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM); + + if (outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP) + cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE); + + if (target == 0 && + (outp->dp.dpcd[DPCD_RC02] & 0x20) && + !(outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED)) + cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES); + + /* We should retry up to 3 times, but only if GSP asks politely */ + for (retries = 0; retries < 3; ++retries) { + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(outp->index); + ctrl->retryTimeMs = 0; + ctrl->cmd = cmd; + ctrl->data = data; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret == -EAGAIN && ctrl->retryTimeMs) { + /* + * Device (likely an eDP panel) isn't ready yet, wait for the time specified + * by GSP before retrying again + */ + nvkm_debug(&disp->engine.subdev, + "Waiting %dms for GSP LT panel delay before retrying\n", + ctrl->retryTimeMs); + msleep(ctrl->retryTimeMs); + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + } else { + /* GSP didn't say to retry, or we were successful */ + if (ctrl->err) + ret = -EIO; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + break; + } + } + + return ret; +} + +static int +r535_dp_train(struct nvkm_outp *outp, bool retrain) +{ + for (int target = outp->dp.lttprs; target >= 0; target--) { + int ret = r535_dp_train_target(outp, target, outp->dp.lt.mst, + outp->dp.lt.nr, + outp->dp.lt.bw); + if (ret) + return ret; + } + + return 0; +} + +static int +r535_dp_rates(struct nvkm_outp *outp) +{ + NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + + if (outp->conn->info.type != DCB_CONNECTOR_eDP || + !outp->dp.rates || outp->dp.rate[0].dpcd < 0) + return 0; + + if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl))) + return -EINVAL; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->displayId = BIT(outp->index); + for (int i = 0; i < outp->dp.rates; i++) + ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200; + + return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); +} + +static int +r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize) +{ + struct nvkm_disp *disp = outp->disp; + NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl; + u8 size = *psize; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(outp->index); + ctrl->bAddrOnly = !size; + ctrl->cmd = type; + if (ctrl->bAddrOnly) { + ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE); + ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE); + } + ctrl->addr = addr; + ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0; + memcpy(ctrl->data, data, size); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return PTR_ERR(ctrl); + } + + memcpy(data, ctrl->data, size); + *psize = ctrl->size; + ret = ctrl->replyType; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; +} + +static int +r535_dp_aux_pwr(struct nvkm_outp *outp, bool pu) +{ + return 0; +} + +static void +r535_dp_release(struct nvkm_outp *outp) +{ + if (!outp->dp.lt.bw) { + if (!WARN_ON(!outp->dp.rates)) + outp->dp.lt.bw = outp->dp.rate[0].rate / 27000; + else + outp->dp.lt.bw = 0x06; + } + + outp->dp.lt.nr = 0; + + r535_dp_train_target(outp, 0, outp->dp.lt.mst, outp->dp.lt.nr, outp->dp.lt.bw); + r535_outp_release(outp); +} + +static int +r535_dp_acquire(struct nvkm_outp *outp, bool hda) +{ + int ret; + + ret = r535_outp_acquire(outp, hda); + if (ret) + return ret; + + return 0; +} + +static const struct nvkm_outp_func +r535_dp = { + .detect = r535_outp_detect, + .inherit = r535_outp_inherit, + .acquire = r535_dp_acquire, + .release = r535_dp_release, + .dp.aux_pwr = r535_dp_aux_pwr, + .dp.aux_xfer = r535_dp_aux_xfer, + .dp.mst_id_get = r535_dp_mst_id_get, + .dp.mst_id_put = r535_dp_mst_id_put, + .dp.rates = r535_dp_rates, + .dp.train = r535_dp_train, + .dp.drive = r535_dp_drive, +}; + +static int +r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize) +{ + NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + int ret = -E2BIG; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(outp->index); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + ret = -E2BIG; + if (ctrl->bufferSize <= *psize) { + memcpy(data, ctrl->edidBuffer, ctrl->bufferSize); + *psize = ctrl->bufferSize; + ret = 0; + } + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; +} + +static const struct nvkm_outp_func +r535_tmds = { + .detect = r535_outp_detect, + .inherit = r535_outp_inherit, + .acquire = r535_outp_acquire, + .release = r535_outp_release, + .edid_get = r535_tmds_edid_get, +}; + +static int +r535_outp_new(struct nvkm_disp *disp, u32 id) +{ + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl; + enum nvkm_ior_proto proto; + struct dcb_output dcbE = {}; + struct nvkm_conn *conn; + struct nvkm_outp *outp; + u8 locn, link = 0; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(id); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + switch (ctrl->type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_NONE: + return 0; + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + switch (ctrl->protocol) { + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A: + proto = TMDS; + link = 1; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B: + proto = TMDS; + link = 2; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS: + proto = TMDS; + link = 3; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: + proto = DP; + link = 1; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: + proto = DP; + link = 2; + break; + default: + WARN_ON(1); + return -EINVAL; + } + + break; + default: + WARN_ON(1); + return -EINVAL; + } + + locn = ctrl->location; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + + conn = r535_conn_new(disp, id); + if (IS_ERR(conn)) + return PTR_ERR(conn); + + switch (proto) { + case TMDS: dcbE.type = DCB_OUTPUT_TMDS; break; + case DP: dcbE.type = DCB_OUTPUT_DP; break; + default: + WARN_ON(1); + return -EINVAL; + } + + dcbE.location = locn; + dcbE.connector = conn->index; + dcbE.heads = disp->head.mask; + dcbE.i2c_index = 0xff; + dcbE.link = dcbE.sorconf.link = link; + + if (proto == TMDS) { + ret = nvkm_outp_new_(&r535_tmds, disp, id, &dcbE, &outp); + if (ret) + return ret; + } else { + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl; + bool mst, wm; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->sorIndex = ~0; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) { + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62: + dcbE.dpconf.link_bw = 0x06; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70: + dcbE.dpconf.link_bw = 0x0a; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40: + dcbE.dpconf.link_bw = 0x14; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10: + dcbE.dpconf.link_bw = 0x1e; + break; + default: + dcbE.dpconf.link_bw = 0x00; + break; + } + + mst = ctrl->bIsMultistreamSupported; + wm = ctrl->bHasIncreasedWatermarkLimits; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + + if (WARN_ON(!dcbE.dpconf.link_bw)) + return -EINVAL; + + dcbE.dpconf.link_nr = 4; + + ret = nvkm_outp_new_(&r535_dp, disp, id, &dcbE, &outp); + if (ret) + return ret; + + outp->dp.mst = mst; + outp->dp.increased_wm = wm; + } + + + outp->conn = conn; + list_add_tail(&outp->head, &disp->outps); + return 0; +} + +static void +r535_disp_irq(struct nvkm_gsp_event *event, void *repv, u32 repc) +{ + struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.irq); + Nv2080DpIrqNotification *irq = repv; + + if (WARN_ON(repc < sizeof(*irq))) + return; + + nvkm_debug(&disp->engine.subdev, "event: dp irq displayId %08x\n", irq->displayId); + + if (irq->displayId) + nvkm_event_ntfy(&disp->rm.event, fls(irq->displayId) - 1, NVKM_DPYID_IRQ); +} + +static void +r535_disp_hpd(struct nvkm_gsp_event *event, void *repv, u32 repc) +{ + struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.hpd); + Nv2080HotplugNotification *hpd = repv; + + if (WARN_ON(repc < sizeof(*hpd))) + return; + + nvkm_debug(&disp->engine.subdev, "event: hpd plug %08x unplug %08x\n", + hpd->plugDisplayMask, hpd->unplugDisplayMask); + + for (int i = 0; i < 31; i++) { + u32 mask = 0; + + if (hpd->plugDisplayMask & BIT(i)) + mask |= NVKM_DPYID_PLUG; + if (hpd->unplugDisplayMask & BIT(i)) + mask |= NVKM_DPYID_UNPLUG; + + if (mask) + nvkm_event_ntfy(&disp->rm.event, i, mask); + } +} + +static const struct nvkm_event_func +r535_disp_event = { +}; + +static void +r535_disp_intr_head_timing(struct nvkm_disp *disp, int head) +{ + struct nvkm_subdev *subdev = &disp->engine.subdev; + struct nvkm_device *device = subdev->device; + u32 stat = nvkm_rd32(device, 0x611c00 + (head * 0x04)); + + if (stat & 0x00000002) { + nvkm_disp_vblank(disp, head); + + nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000002); + } +} + +static irqreturn_t +r535_disp_intr(struct nvkm_inth *inth) +{ + struct nvkm_disp *disp = container_of(inth, typeof(*disp), engine.subdev.inth); + struct nvkm_subdev *subdev = &disp->engine.subdev; + struct nvkm_device *device = subdev->device; + unsigned long mask = nvkm_rd32(device, 0x611ec0) & 0x000000ff; + int head; + + for_each_set_bit(head, &mask, 8) + r535_disp_intr_head_timing(disp, head); + + return IRQ_HANDLED; +} + +static void +r535_disp_fini(struct nvkm_disp *disp, bool suspend) +{ + if (!disp->engine.subdev.use.enabled) + return; + + nvkm_gsp_rm_free(&disp->rm.object); + + if (!suspend) { + nvkm_gsp_event_dtor(&disp->rm.irq); + nvkm_gsp_event_dtor(&disp->rm.hpd); + nvkm_event_fini(&disp->rm.event); + + nvkm_gsp_rm_free(&disp->rm.objcom); + nvkm_gsp_device_dtor(&disp->rm.device); + nvkm_gsp_client_dtor(&disp->rm.client); + } +} + +static int +r535_disp_init(struct nvkm_disp *disp) +{ + int ret; + + ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, disp->func->root.oclass << 16, + disp->func->root.oclass, 0, &disp->rm.object); + if (ret) + return ret; + + return 0; +} + +static int +r535_disp_oneinit(struct nvkm_disp *disp) +{ + struct nvkm_device *device = disp->engine.subdev.device; + struct nvkm_gsp *gsp = device->gsp; + NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl; + int ret, i; + + /* RAMIN. */ + ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL, &disp->inst); + if (ret) + return ret; + + if (WARN_ON(nvkm_memory_target(disp->inst->memory) != NVKM_MEM_TARGET_VRAM)) + return -EINVAL; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->instMemPhysAddr = nvkm_memory_addr(disp->inst->memory); + ctrl->instMemSize = nvkm_memory_size(disp->inst->memory); + ctrl->instMemAddrSpace = ADDR_FBMEM; + ctrl->instMemCpuCacheAttr = NV_MEMORY_WRITECOMBINED; + + ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); + if (ret) + return ret; + + /* OBJs. */ + ret = nvkm_gsp_client_device_ctor(gsp, &disp->rm.client, &disp->rm.device); + if (ret) + return ret; + + ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, 0x00730000, NV04_DISPLAY_COMMON, 0, + &disp->rm.objcom); + if (ret) + return ret; + + { + NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + disp->wndw.mask = ctrl->windowPresentMask; + disp->wndw.nr = fls(disp->wndw.mask); + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + } + + /* */ + { +#if defined(CONFIG_ACPI) && defined(CONFIG_X86) + NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *ctrl; + struct nvkm_gsp_object *subdevice = &disp->rm.client.gsp->internal.device.subdevice; + + ctrl = nvkm_gsp_rm_ctrl_get(subdevice, + NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->status = 0x56; /* NV_ERR_NOT_SUPPORTED */ + + { + const guid_t NBCI_DSM_GUID = + GUID_INIT(0xD4A50B75, 0x65C7, 0x46F7, + 0xBF, 0xB7, 0x41, 0x51, 0x4C, 0xEA, 0x02, 0x44); + u64 NBCI_DSM_REV = 0x00000102; + const guid_t NVHG_DSM_GUID = + GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48, + 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4); + u64 NVHG_DSM_REV = 0x00000102; + acpi_handle handle = ACPI_HANDLE(device->dev); + + if (handle && acpi_has_method(handle, "_DSM")) { + bool nbci = acpi_check_dsm(handle, &NBCI_DSM_GUID, NBCI_DSM_REV, + 1ULL << 0x00000014); + bool nvhg = acpi_check_dsm(handle, &NVHG_DSM_GUID, NVHG_DSM_REV, + 1ULL << 0x00000014); + + if (nbci || nvhg) { + union acpi_object argv4 = { + .buffer.type = ACPI_TYPE_BUFFER, + .buffer.length = sizeof(ctrl->backLightData), + .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), + }, *obj; + + obj = acpi_evaluate_dsm(handle, nbci ? &NBCI_DSM_GUID : &NVHG_DSM_GUID, + 0x00000102, 0x14, &argv4); + if (!obj) { + acpi_handle_info(handle, "failed to evaluate _DSM\n"); + } else { + for (int i = 0; i < obj->package.count; i++) { + union acpi_object *elt = &obj->package.elements[i]; + u32 size; + + if (elt->integer.value & ~0xffffffffULL) + size = 8; + else + size = 4; + + memcpy(&ctrl->backLightData[ctrl->backLightDataSize], &elt->integer.value, size); + ctrl->backLightDataSize += size; + } + + ctrl->status = 0; + ACPI_FREE(obj); + } + + kfree(argv4.buffer.pointer); + } + } + } + + ret = nvkm_gsp_rm_ctrl_wr(subdevice, ctrl); + if (ret) + return ret; +#endif + } + + /* */ + { + NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ret = nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); + if (ret) + return ret; + } + + /* */ + { + NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + disp->head.nr = ctrl->numHeads; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + } + + /* */ + { + NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + disp->head.mask = ctrl->headMask; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + + for_each_set_bit(i, &disp->head.mask, disp->head.nr) { + ret = nvkm_head_new_(&r535_head, disp, i); + if (ret) + return ret; + } + } + + disp->sor.nr = disp->func->sor.cnt(disp, &disp->sor.mask); + nvkm_debug(&disp->engine.subdev, " SOR(s): %d (%02lx)\n", disp->sor.nr, disp->sor.mask); + for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) { + ret = disp->func->sor.new(disp, i); + if (ret) + return ret; + } + + /* */ + { + NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl; + unsigned long mask; + int i; + + ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + mask = ctrl->displayMask; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + + for_each_set_bit(i, &mask, 32) { + ret = r535_outp_new(disp, i); + if (ret) + return ret; + } + } + + ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event); + if (WARN_ON(ret)) + return ret; + + ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0000, NV2080_NOTIFIERS_HOTPLUG, + r535_disp_hpd, &disp->rm.hpd); + if (ret) + return ret; + + ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0001, NV2080_NOTIFIERS_DP_IRQ, + r535_disp_irq, &disp->rm.irq); + if (ret) + return ret; + + /* RAMHT. */ + ret = nvkm_ramht_new(device, disp->func->ramht_size ? disp->func->ramht_size : + 0x1000, 0, disp->inst, &disp->ramht); + if (ret) + return ret; + + ret = nvkm_gsp_intr_stall(gsp, disp->engine.subdev.type, disp->engine.subdev.inst); + if (ret < 0) + return ret; + + ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &disp->engine.subdev, + r535_disp_intr, &disp->engine.subdev.inth); + if (ret) + return ret; + + nvkm_inth_allow(&disp->engine.subdev.inth); + return 0; +} + +static void +r535_disp_dtor(struct nvkm_disp *disp) +{ + kfree(disp->func); +} + +int +r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp) +{ + struct nvkm_disp_func *rm; + int ret; + + if (!(rm = kzalloc(sizeof(*rm) + 6 * sizeof(rm->user[0]), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_disp_dtor; + rm->oneinit = r535_disp_oneinit; + rm->init = r535_disp_init; + rm->fini = r535_disp_fini; + rm->uevent = hw->uevent; + rm->sor.cnt = r535_sor_cnt; + rm->sor.new = r535_sor_new; + rm->ramht_size = hw->ramht_size; + + rm->root = hw->root; + + for (int i = 0; hw->user[i].ctor; i++) { + switch (hw->user[i].base.oclass & 0xff) { + case 0x73: rm->user[i] = hw->user[i]; break; + case 0x7d: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_core; break; + case 0x7e: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wndw; break; + case 0x7b: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wimm; break; + case 0x7a: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_curs; break; + default: + WARN_ON(1); + continue; + } + } + + ret = nvkm_disp_new_(rm, device, type, inst, pdisp); + if (ret) + kfree(rm); + + mutex_init(&(*pdisp)->super.mutex); //XXX + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c index f5242a6722..dcb9f8ba37 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c @@ -25,6 +25,7 @@ #include "ior.h" #include <core/gpuobj.h> +#include <subdev/gsp.h> #include <subdev/timer.h> #include <nvif/class.h> @@ -88,6 +89,7 @@ tu102_sor = { .state = gv100_sor_state, .power = nv50_sor_power, .clock = gf119_sor_clock, + .bl = >215_sor_bl, .hdmi = &gv100_sor_hdmi, .dp = &tu102_sor_dp, .hda = &gv100_sor_hda, @@ -232,5 +234,8 @@ int tu102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp) { + if (nvkm_gsp_rm(device->gsp)) + return r535_disp_new(&tu102_disp, device, type, inst, pdisp); + return nvkm_disp_new_(&tu102_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c index 3249e5c1c8..2dab6612c4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c @@ -31,6 +31,23 @@ #include <nvif/if0011.h> static int +nvkm_uconn_uevent_gsp(struct nvkm_object *object, u64 token, u32 bits) +{ + union nvif_conn_event_args args; + + args.v0.version = 0; + args.v0.types = 0; + if (bits & NVKM_DPYID_PLUG) + args.v0.types |= NVIF_CONN_EVENT_V0_PLUG; + if (bits & NVKM_DPYID_UNPLUG) + args.v0.types |= NVIF_CONN_EVENT_V0_UNPLUG; + if (bits & NVKM_DPYID_IRQ) + args.v0.types |= NVIF_CONN_EVENT_V0_IRQ; + + return object->client->event(token, &args, sizeof(args.v0)); +} + +static int nvkm_uconn_uevent_aux(struct nvkm_object *object, u64 token, u32 bits) { union nvif_conn_event_args args; @@ -78,13 +95,14 @@ static int nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent) { struct nvkm_conn *conn = nvkm_uconn(object); - struct nvkm_device *device = conn->disp->engine.subdev.device; + struct nvkm_disp *disp = conn->disp; + struct nvkm_device *device = disp->engine.subdev.device; struct nvkm_outp *outp; union nvif_conn_event_args *args = argv; u64 bits = 0; if (!uevent) { - if (conn->info.hpd == DCB_GPIO_UNUSED) + if (!disp->rm.client.gsp && conn->info.hpd == DCB_GPIO_UNUSED) return -ENOSYS; return 0; } @@ -100,6 +118,15 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_ if (&outp->head == &conn->disp->outps) return -EINVAL; + if (disp->rm.client.gsp) { + if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_DPYID_PLUG; + if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_DPYID_UNPLUG; + if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ ) bits |= NVKM_DPYID_IRQ; + + return nvkm_uevent_add(uevent, &disp->rm.event, outp->index, bits, + nvkm_uconn_uevent_gsp); + } + if (outp->dp.aux && !outp->info.location) { if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_I2C_PLUG; if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_I2C_UNPLUG; @@ -121,46 +148,6 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_ nvkm_uconn_uevent_gpio); } -static int -nvkm_uconn_mthd_hpd_status(struct nvkm_conn *conn, void *argv, u32 argc) -{ - struct nvkm_gpio *gpio = conn->disp->engine.subdev.device->gpio; - union nvif_conn_hpd_status_args *args = argv; - - if (argc != sizeof(args->v0) || args->v0.version != 0) - return -ENOSYS; - - args->v0.support = gpio && conn->info.hpd != DCB_GPIO_UNUSED; - args->v0.present = 0; - - if (args->v0.support) { - int ret = nvkm_gpio_get(gpio, 0, DCB_GPIO_UNUSED, conn->info.hpd); - - if (WARN_ON(ret < 0)) { - args->v0.support = false; - return 0; - } - - args->v0.present = ret; - } - - return 0; -} - -static int -nvkm_uconn_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc) -{ - struct nvkm_conn *conn = nvkm_uconn(object); - - switch (mthd) { - case NVIF_CONN_V0_HPD_STATUS: return nvkm_uconn_mthd_hpd_status(conn, argv, argc); - default: - break; - } - - return -EINVAL; -} - static void * nvkm_uconn_dtor(struct nvkm_object *object) { @@ -176,7 +163,6 @@ nvkm_uconn_dtor(struct nvkm_object *object) static const struct nvkm_object_func nvkm_uconn = { .dtor = nvkm_uconn_dtor, - .mthd = nvkm_uconn_mthd, .uevent = nvkm_uconn_uevent, }; @@ -204,6 +190,32 @@ nvkm_uconn_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nv ret = -EBUSY; spin_lock(&disp->client.lock); if (!conn->object.func) { + switch (conn->info.type) { + case DCB_CONNECTOR_VGA : args->v0.type = NVIF_CONN_V0_VGA; break; + case DCB_CONNECTOR_TV_0 : + case DCB_CONNECTOR_TV_1 : + case DCB_CONNECTOR_TV_3 : args->v0.type = NVIF_CONN_V0_TV; break; + case DCB_CONNECTOR_DMS59_0 : + case DCB_CONNECTOR_DMS59_1 : + case DCB_CONNECTOR_DVI_I : args->v0.type = NVIF_CONN_V0_DVI_I; break; + case DCB_CONNECTOR_DVI_D : args->v0.type = NVIF_CONN_V0_DVI_D; break; + case DCB_CONNECTOR_LVDS : args->v0.type = NVIF_CONN_V0_LVDS; break; + case DCB_CONNECTOR_LVDS_SPWG: args->v0.type = NVIF_CONN_V0_LVDS_SPWG; break; + case DCB_CONNECTOR_DMS59_DP0: + case DCB_CONNECTOR_DMS59_DP1: + case DCB_CONNECTOR_DP : + case DCB_CONNECTOR_mDP : + case DCB_CONNECTOR_USB_C : args->v0.type = NVIF_CONN_V0_DP; break; + case DCB_CONNECTOR_eDP : args->v0.type = NVIF_CONN_V0_EDP; break; + case DCB_CONNECTOR_HDMI_0 : + case DCB_CONNECTOR_HDMI_1 : + case DCB_CONNECTOR_HDMI_C : args->v0.type = NVIF_CONN_V0_HDMI; break; + default: + WARN_ON(1); + ret = -EINVAL; + break; + } + nvkm_object_ctor(&nvkm_uconn, oclass, &conn->object); *pobject = &conn->object; ret = 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c index fc283a4a15..377d0e0cef 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c @@ -25,6 +25,8 @@ #include "head.h" #include "ior.h" +#include <subdev/i2c.h> + #include <nvif/if0012.h> static int @@ -44,17 +46,121 @@ nvkm_uoutp_mthd_dp_mst_vcpi(struct nvkm_outp *outp, void *argv, u32 argc) } static int -nvkm_uoutp_mthd_dp_retrain(struct nvkm_outp *outp, void *argv, u32 argc) +nvkm_uoutp_mthd_dp_mst_id_put(struct nvkm_outp *outp, void *argv, u32 argc) { - union nvif_outp_dp_retrain_args *args = argv; + union nvif_outp_dp_mst_id_put_args *args = argv; - if (argc != sizeof(args->vn)) + if (argc != sizeof(args->v0) || args->v0.version != 0) + return -ENOSYS; + if (!outp->func->dp.mst_id_put) + return -EINVAL; + + return outp->func->dp.mst_id_put(outp, args->v0.id); +} + +static int +nvkm_uoutp_mthd_dp_mst_id_get(struct nvkm_outp *outp, void *argv, u32 argc) +{ + union nvif_outp_dp_mst_id_get_args *args = argv; + + if (argc != sizeof(args->v0) || args->v0.version != 0) + return -ENOSYS; + if (!outp->func->dp.mst_id_get) + return -EINVAL; + + return outp->func->dp.mst_id_get(outp, &args->v0.id); +} + +static int +nvkm_uoutp_mthd_dp_sst(struct nvkm_outp *outp, void *argv, u32 argc) +{ + union nvif_outp_dp_sst_args *args = argv; + struct nvkm_disp *disp = outp->disp; + struct nvkm_ior *ior = outp->ior; + + if (argc != sizeof(args->v0) || args->v0.version != 0) return -ENOSYS; - if (!atomic_read(&outp->dp.lt.done)) + if (!ior->func->dp || !nvkm_head_find(disp, args->v0.head)) + return -EINVAL; + if (!ior->func->dp->sst) return 0; - return outp->func->acquire(outp); + return ior->func->dp->sst(ior, args->v0.head, + outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP, + args->v0.watermark, args->v0.hblanksym, args->v0.vblanksym); +} + +static int +nvkm_uoutp_mthd_dp_drive(struct nvkm_outp *outp, void *argv, u32 argc) +{ + union nvif_outp_dp_drive_args *args = argv; + + if (argc != sizeof(args->v0) || args->v0.version != 0) + return -ENOSYS; + if (!outp->func->dp.drive) + return -EINVAL; + + return outp->func->dp.drive(outp, args->v0.lanes, args->v0.pe, args->v0.vs); +} + +static int +nvkm_uoutp_mthd_dp_train(struct nvkm_outp *outp, void *argv, u32 argc) +{ + union nvif_outp_dp_train_args *args = argv; + + if (argc != sizeof(args->v0) || args->v0.version != 0) + return -ENOSYS; + if (!outp->func->dp.train) + return -EINVAL; + + if (!args->v0.retrain) { + memcpy(outp->dp.dpcd, args->v0.dpcd, sizeof(outp->dp.dpcd)); + outp->dp.lttprs = args->v0.lttprs; + outp->dp.lt.nr = args->v0.link_nr; + outp->dp.lt.bw = args->v0.link_bw / 27000; + outp->dp.lt.mst = args->v0.mst; + outp->dp.lt.post_adj = args->v0.post_lt_adj; + } + + return outp->func->dp.train(outp, args->v0.retrain); +} + +static int +nvkm_uoutp_mthd_dp_rates(struct nvkm_outp *outp, void *argv, u32 argc) +{ + union nvif_outp_dp_rates_args *args = argv; + + if (argc != sizeof(args->v0) || args->v0.version != 0) + return -ENOSYS; + if (args->v0.rates > ARRAY_SIZE(outp->dp.rate)) + return -EINVAL; + + for (int i = 0; i < args->v0.rates; i++) { + outp->dp.rate[i].dpcd = args->v0.rate[i].dpcd; + outp->dp.rate[i].rate = args->v0.rate[i].rate; + } + + outp->dp.rates = args->v0.rates; + + if (outp->func->dp.rates) + outp->func->dp.rates(outp); + + return 0; +} + +static int +nvkm_uoutp_mthd_dp_aux_xfer(struct nvkm_outp *outp, void *argv, u32 argc) +{ + union nvif_outp_dp_aux_xfer_args *args = argv; + + if (argc != sizeof(args->v0) || args->v0.version != 0) + return -ENOSYS; + if (!outp->func->dp.aux_xfer) + return -EINVAL; + + return outp->func->dp.aux_xfer(outp, args->v0.type, args->v0.addr, + args->v0.data, &args->v0.size); } static int @@ -64,10 +170,10 @@ nvkm_uoutp_mthd_dp_aux_pwr(struct nvkm_outp *outp, void *argv, u32 argc) if (argc != sizeof(args->v0) || args->v0.version != 0) return -ENOSYS; + if (!outp->func->dp.aux_pwr) + return -EINVAL; - outp->dp.enabled = !!args->v0.state; - nvkm_dp_enable(outp, outp->dp.enabled); - return 0; + return outp->func->dp.aux_pwr(outp, !!args->v0.state); } static int @@ -88,12 +194,20 @@ nvkm_uoutp_mthd_hda_eld(struct nvkm_outp *outp, void *argv, u32 argc) if (argc && args->v0.data[0]) { if (outp->info.type == DCB_OUTPUT_DP) ior->func->dp->audio(ior, args->v0.head, true); + else + if (ior->func->hdmi->audio) + ior->func->hdmi->audio(ior, args->v0.head, true); + ior->func->hda->hpd(ior, args->v0.head, true); ior->func->hda->eld(ior, args->v0.head, args->v0.data, argc); } else { + ior->func->hda->hpd(ior, args->v0.head, false); + if (outp->info.type == DCB_OUTPUT_DP) ior->func->dp->audio(ior, args->v0.head, false); - ior->func->hda->hpd(ior, args->v0.head, false); + else + if (ior->func->hdmi->audio) + ior->func->hdmi->audio(ior, args->v0.head, false); } return 0; @@ -126,84 +240,105 @@ nvkm_uoutp_mthd_infoframe(struct nvkm_outp *outp, void *argv, u32 argc) } static int -nvkm_uoutp_mthd_release(struct nvkm_outp *outp, void *argv, u32 argc) +nvkm_uoutp_mthd_hdmi(struct nvkm_outp *outp, void *argv, u32 argc) { - struct nvkm_head *head = outp->asy.head; + union nvif_outp_hdmi_args *args = argv; struct nvkm_ior *ior = outp->ior; - union nvif_outp_release_args *args = argv; - if (argc != sizeof(args->vn)) + if (argc != sizeof(args->v0) || args->v0.version != 0) return -ENOSYS; - if (ior->func->hdmi && head) { - ior->func->hdmi->infoframe_avi(ior, head->id, NULL, 0); - ior->func->hdmi->infoframe_vsi(ior, head->id, NULL, 0); - ior->func->hdmi->ctrl(ior, head->id, false, 0, 0); + if (!(outp->asy.head = nvkm_head_find(outp->disp, args->v0.head))) + return -EINVAL; + + if (!ior->func->hdmi || + args->v0.max_ac_packet > 0x1f || + args->v0.rekey > 0x7f || + (args->v0.scdc && !ior->func->hdmi->scdc)) + return -EINVAL; + + if (!args->v0.enable) { + ior->func->hdmi->infoframe_avi(ior, args->v0.head, NULL, 0); + ior->func->hdmi->infoframe_vsi(ior, args->v0.head, NULL, 0); + ior->func->hdmi->ctrl(ior, args->v0.head, false, 0, 0); + return 0; } - nvkm_outp_release(outp, NVKM_OUTP_USER); + ior->func->hdmi->ctrl(ior, args->v0.head, args->v0.enable, + args->v0.max_ac_packet, args->v0.rekey); + if (ior->func->hdmi->scdc) + ior->func->hdmi->scdc(ior, args->v0.khz, args->v0.scdc, args->v0.scdc_scrambling, + args->v0.scdc_low_rates); + return 0; } static int -nvkm_uoutp_mthd_acquire_dp(struct nvkm_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE], - u8 link_nr, u8 link_bw, bool hda, bool mst) +nvkm_uoutp_mthd_lvds(struct nvkm_outp *outp, void *argv, u32 argc) { - int ret; + union nvif_outp_lvds_args *args = argv; - ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER, hda); - if (ret) - return ret; + if (argc != sizeof(args->v0) || args->v0.version != 0) + return -ENOSYS; + if (outp->info.type != DCB_OUTPUT_LVDS) + return -EINVAL; - memcpy(outp->dp.dpcd, dpcd, sizeof(outp->dp.dpcd)); - outp->dp.lt.nr = link_nr; - outp->dp.lt.bw = link_bw; - outp->dp.lt.mst = mst; + outp->lvds.dual = !!args->v0.dual; + outp->lvds.bpc8 = !!args->v0.bpc8; return 0; } static int -nvkm_uoutp_mthd_acquire_tmds(struct nvkm_outp *outp, u8 head, u8 hdmi, u8 hdmi_max_ac_packet, - u8 hdmi_rekey, u8 hdmi_scdc, u8 hdmi_hda) +nvkm_uoutp_mthd_bl_set(struct nvkm_outp *outp, void *argv, u32 argc) { - struct nvkm_ior *ior; + union nvif_outp_bl_get_args *args = argv; int ret; - if (!(outp->asy.head = nvkm_head_find(outp->disp, head))) - return -EINVAL; + if (argc != sizeof(args->v0) || args->v0.version != 0) + return -ENOSYS; - ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER, hdmi && hdmi_hda); - if (ret) - return ret; + if (outp->func->bl.set) + ret = outp->func->bl.set(outp, args->v0.level); + else + ret = -EINVAL; - ior = outp->ior; + return ret; +} - if (hdmi) { - if (!ior->func->hdmi || - hdmi_max_ac_packet > 0x1f || hdmi_rekey > 0x7f || - (hdmi_scdc && !ior->func->hdmi->scdc)) { - nvkm_outp_release(outp, NVKM_OUTP_USER); - return -EINVAL; - } +static int +nvkm_uoutp_mthd_bl_get(struct nvkm_outp *outp, void *argv, u32 argc) +{ + union nvif_outp_bl_get_args *args = argv; + int ret; + + if (argc != sizeof(args->v0) || args->v0.version != 0) + return -ENOSYS; - ior->func->hdmi->ctrl(ior, head, hdmi, hdmi_max_ac_packet, hdmi_rekey); - if (ior->func->hdmi->scdc) - ior->func->hdmi->scdc(ior, hdmi_scdc); + if (outp->func->bl.get) { + ret = outp->func->bl.get(outp); + if (ret >= 0) { + args->v0.level = ret; + ret = 0; + } + } else { + ret = -EINVAL; } - return 0; + return ret; } static int -nvkm_uoutp_mthd_acquire_lvds(struct nvkm_outp *outp, bool dual, bool bpc8) +nvkm_uoutp_mthd_release(struct nvkm_outp *outp, void *argv, u32 argc) { - if (outp->info.type != DCB_OUTPUT_LVDS) - return -EINVAL; + union nvif_outp_release_args *args = argv; - outp->lvds.dual = dual; - outp->lvds.bpc8 = bpc8; + if (argc != sizeof(args->vn)) + return -ENOSYS; + if (!outp->ior) + return -EINVAL; - return nvkm_outp_acquire(outp, NVKM_OUTP_USER, false); + outp->func->release(outp); + return 0; } static int @@ -214,30 +349,16 @@ nvkm_uoutp_mthd_acquire(struct nvkm_outp *outp, void *argv, u32 argc) if (argc != sizeof(args->v0) || args->v0.version != 0) return -ENOSYS; - if (outp->ior) + if (outp->ior && args->v0.type <= NVIF_OUTP_ACQUIRE_V0_PIOR) return -EBUSY; - switch (args->v0.proto) { - case NVIF_OUTP_ACQUIRE_V0_RGB_CRT: - ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER, false); + switch (args->v0.type) { + case NVIF_OUTP_ACQUIRE_V0_DAC: + case NVIF_OUTP_ACQUIRE_V0_PIOR: + ret = outp->func->acquire(outp, false); break; - case NVIF_OUTP_ACQUIRE_V0_TMDS: - ret = nvkm_uoutp_mthd_acquire_tmds(outp, args->v0.tmds.head, - args->v0.tmds.hdmi, - args->v0.tmds.hdmi_max_ac_packet, - args->v0.tmds.hdmi_rekey, - args->v0.tmds.hdmi_scdc, - args->v0.tmds.hdmi_hda); - break; - case NVIF_OUTP_ACQUIRE_V0_LVDS: - ret = nvkm_uoutp_mthd_acquire_lvds(outp, args->v0.lvds.dual, args->v0.lvds.bpc8); - break; - case NVIF_OUTP_ACQUIRE_V0_DP: - ret = nvkm_uoutp_mthd_acquire_dp(outp, args->v0.dp.dpcd, - args->v0.dp.link_nr, - args->v0.dp.link_bw, - args->v0.dp.hda != 0, - args->v0.dp.mst != 0); + case NVIF_OUTP_ACQUIRE_V0_SOR: + ret = outp->func->acquire(outp, args->v0.sor.hda); break; default: ret = -EINVAL; @@ -253,6 +374,69 @@ nvkm_uoutp_mthd_acquire(struct nvkm_outp *outp, void *argv, u32 argc) } static int +nvkm_uoutp_mthd_inherit(struct nvkm_outp *outp, void *argv, u32 argc) +{ + union nvif_outp_inherit_args *args = argv; + struct nvkm_ior *ior; + int ret = 0; + + if (argc != sizeof(args->v0) || args->v0.version != 0) + return -ENOSYS; + + /* Ensure an ior is hooked up to this outp already */ + ior = outp->func->inherit(outp); + if (!ior || !ior->arm.head) + return -ENODEV; + + /* With iors, there will be a separate output path for each type of connector - and all of + * them will appear to be hooked up. Figure out which one is actually the one we're using + * based on the protocol we were given over nvif + */ + switch (args->v0.proto) { + case NVIF_OUTP_INHERIT_V0_TMDS: + if (ior->arm.proto != TMDS) + return -ENODEV; + break; + case NVIF_OUTP_INHERIT_V0_DP: + if (ior->arm.proto != DP) + return -ENODEV; + break; + case NVIF_OUTP_INHERIT_V0_LVDS: + if (ior->arm.proto != LVDS) + return -ENODEV; + break; + case NVIF_OUTP_INHERIT_V0_TV: + if (ior->arm.proto != TV) + return -ENODEV; + break; + case NVIF_OUTP_INHERIT_V0_RGB_CRT: + if (ior->arm.proto != CRT) + return -ENODEV; + break; + default: + ret = -EINVAL; + break; + } + + /* Make sure that userspace hasn't already acquired this */ + if (outp->acquired) { + OUTP_ERR(outp, "cannot inherit an already acquired (%02x) outp", outp->acquired); + return -EBUSY; + } + + /* Mark the outp acquired by userspace now that we've confirmed it's already active */ + OUTP_TRACE(outp, "inherit %02x |= %02x %p", outp->acquired, NVKM_OUTP_USER, ior); + nvkm_outp_acquire_ior(outp, NVKM_OUTP_USER, ior); + + args->v0.or = ior->id; + args->v0.link = ior->arm.link; + args->v0.head = ffs(ior->arm.head) - 1; + args->v0.proto = ior->arm.proto_evo; + + return ret; +} + +static int nvkm_uoutp_mthd_load_detect(struct nvkm_outp *outp, void *argv, u32 argc) { union nvif_outp_load_detect_args *args = argv; @@ -261,7 +445,7 @@ nvkm_uoutp_mthd_load_detect(struct nvkm_outp *outp, void *argv, u32 argc) if (argc != sizeof(args->v0) || args->v0.version != 0) return -ENOSYS; - ret = nvkm_outp_acquire(outp, NVKM_OUTP_PRIV, false); + ret = nvkm_outp_acquire_or(outp, NVKM_OUTP_PRIV, false); if (ret == 0) { if (outp->ior->func->sense) { ret = outp->ior->func->sense(outp->ior, args->v0.data); @@ -269,21 +453,64 @@ nvkm_uoutp_mthd_load_detect(struct nvkm_outp *outp, void *argv, u32 argc) } else { ret = -EINVAL; } - nvkm_outp_release(outp, NVKM_OUTP_PRIV); + nvkm_outp_release_or(outp, NVKM_OUTP_PRIV); } return ret; } static int +nvkm_uoutp_mthd_edid_get(struct nvkm_outp *outp, void *argv, u32 argc) +{ + union nvif_outp_edid_get_args *args = argv; + + if (argc != sizeof(args->v0) || args->v0.version != 0) + return -ENOSYS; + if (!outp->func->edid_get) + return -EINVAL; + + args->v0.size = ARRAY_SIZE(args->v0.data); + return outp->func->edid_get(outp, args->v0.data, &args->v0.size); +} + +static int +nvkm_uoutp_mthd_detect(struct nvkm_outp *outp, void *argv, u32 argc) +{ + union nvif_outp_detect_args *args = argv; + int ret; + + if (argc != sizeof(args->v0) || args->v0.version != 0) + return -ENOSYS; + if (!outp->func->detect) + return -EINVAL; + + ret = outp->func->detect(outp); + switch (ret) { + case 0: args->v0.status = NVIF_OUTP_DETECT_V0_NOT_PRESENT; break; + case 1: args->v0.status = NVIF_OUTP_DETECT_V0_PRESENT; break; + default: + args->v0.status = NVIF_OUTP_DETECT_V0_UNKNOWN; + break; + } + + return 0; +} + +static int nvkm_uoutp_mthd_acquired(struct nvkm_outp *outp, u32 mthd, void *argv, u32 argc) { switch (mthd) { - case NVIF_OUTP_V0_RELEASE : return nvkm_uoutp_mthd_release (outp, argv, argc); - case NVIF_OUTP_V0_INFOFRAME : return nvkm_uoutp_mthd_infoframe (outp, argv, argc); - case NVIF_OUTP_V0_HDA_ELD : return nvkm_uoutp_mthd_hda_eld (outp, argv, argc); - case NVIF_OUTP_V0_DP_RETRAIN : return nvkm_uoutp_mthd_dp_retrain (outp, argv, argc); - case NVIF_OUTP_V0_DP_MST_VCPI: return nvkm_uoutp_mthd_dp_mst_vcpi(outp, argv, argc); + case NVIF_OUTP_V0_RELEASE : return nvkm_uoutp_mthd_release (outp, argv, argc); + case NVIF_OUTP_V0_LVDS : return nvkm_uoutp_mthd_lvds (outp, argv, argc); + case NVIF_OUTP_V0_HDMI : return nvkm_uoutp_mthd_hdmi (outp, argv, argc); + case NVIF_OUTP_V0_INFOFRAME : return nvkm_uoutp_mthd_infoframe (outp, argv, argc); + case NVIF_OUTP_V0_HDA_ELD : return nvkm_uoutp_mthd_hda_eld (outp, argv, argc); + case NVIF_OUTP_V0_DP_TRAIN : return nvkm_uoutp_mthd_dp_train (outp, argv, argc); + case NVIF_OUTP_V0_DP_DRIVE : return nvkm_uoutp_mthd_dp_drive (outp, argv, argc); + case NVIF_OUTP_V0_DP_SST : return nvkm_uoutp_mthd_dp_sst (outp, argv, argc); + case NVIF_OUTP_V0_DP_MST_ID_GET: return nvkm_uoutp_mthd_dp_mst_id_get(outp, argv, argc); + case NVIF_OUTP_V0_DP_MST_ID_PUT: return nvkm_uoutp_mthd_dp_mst_id_put(outp, argv, argc); + case NVIF_OUTP_V0_DP_MST_VCPI : return nvkm_uoutp_mthd_dp_mst_vcpi (outp, argv, argc); default: break; } @@ -292,17 +519,25 @@ nvkm_uoutp_mthd_acquired(struct nvkm_outp *outp, u32 mthd, void *argv, u32 argc) } static int -nvkm_uoutp_mthd_noacquire(struct nvkm_outp *outp, u32 mthd, void *argv, u32 argc) +nvkm_uoutp_mthd_noacquire(struct nvkm_outp *outp, u32 mthd, void *argv, u32 argc, bool *invalid) { switch (mthd) { - case NVIF_OUTP_V0_LOAD_DETECT: return nvkm_uoutp_mthd_load_detect(outp, argv, argc); + case NVIF_OUTP_V0_DETECT : return nvkm_uoutp_mthd_detect (outp, argv, argc); + case NVIF_OUTP_V0_EDID_GET : return nvkm_uoutp_mthd_edid_get (outp, argv, argc); + case NVIF_OUTP_V0_INHERIT : return nvkm_uoutp_mthd_inherit (outp, argv, argc); case NVIF_OUTP_V0_ACQUIRE : return nvkm_uoutp_mthd_acquire (outp, argv, argc); + case NVIF_OUTP_V0_LOAD_DETECT: return nvkm_uoutp_mthd_load_detect(outp, argv, argc); + case NVIF_OUTP_V0_BL_GET : return nvkm_uoutp_mthd_bl_get (outp, argv, argc); + case NVIF_OUTP_V0_BL_SET : return nvkm_uoutp_mthd_bl_set (outp, argv, argc); case NVIF_OUTP_V0_DP_AUX_PWR : return nvkm_uoutp_mthd_dp_aux_pwr (outp, argv, argc); + case NVIF_OUTP_V0_DP_AUX_XFER: return nvkm_uoutp_mthd_dp_aux_xfer(outp, argv, argc); + case NVIF_OUTP_V0_DP_RATES : return nvkm_uoutp_mthd_dp_rates (outp, argv, argc); default: break; } - return 1; + *invalid = true; + return 0; } static int @@ -310,12 +545,13 @@ nvkm_uoutp_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc) { struct nvkm_outp *outp = nvkm_uoutp(object); struct nvkm_disp *disp = outp->disp; + bool invalid = false; int ret; mutex_lock(&disp->super.mutex); - ret = nvkm_uoutp_mthd_noacquire(outp, mthd, argv, argc); - if (ret <= 0) + ret = nvkm_uoutp_mthd_noacquire(outp, mthd, argv, argc, &invalid); + if (!invalid) goto done; if (outp->ior) @@ -370,10 +606,60 @@ nvkm_uoutp_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nv ret = -EBUSY; spin_lock(&disp->client.lock); if (!outp->object.func) { + switch (outp->info.type) { + case DCB_OUTPUT_ANALOG: + args->v0.type = NVIF_OUTP_V0_TYPE_DAC; + args->v0.proto = NVIF_OUTP_V0_PROTO_RGB_CRT; + args->v0.rgb_crt.freq_max = outp->info.crtconf.maxfreq; + break; + case DCB_OUTPUT_TMDS: + if (!outp->info.location) { + args->v0.type = NVIF_OUTP_V0_TYPE_SOR; + args->v0.tmds.dual = (outp->info.tmdsconf.sor.link == 3); + } else { + args->v0.type = NVIF_OUTP_V0_TYPE_PIOR; + args->v0.tmds.dual = 0; + } + args->v0.proto = NVIF_OUTP_V0_PROTO_TMDS; + break; + case DCB_OUTPUT_LVDS: + args->v0.type = NVIF_OUTP_V0_TYPE_SOR; + args->v0.proto = NVIF_OUTP_V0_PROTO_LVDS; + args->v0.lvds.acpi_edid = outp->info.lvdsconf.use_acpi_for_edid; + break; + case DCB_OUTPUT_DP: + if (!outp->info.location) { + args->v0.type = NVIF_OUTP_V0_TYPE_SOR; + args->v0.dp.aux = outp->info.i2c_index; + } else { + args->v0.type = NVIF_OUTP_V0_TYPE_PIOR; + args->v0.dp.aux = NVKM_I2C_AUX_EXT(outp->info.extdev); + } + args->v0.proto = NVIF_OUTP_V0_PROTO_DP; + args->v0.dp.mst = outp->dp.mst; + args->v0.dp.increased_wm = outp->dp.increased_wm; + args->v0.dp.link_nr = outp->info.dpconf.link_nr; + args->v0.dp.link_bw = outp->info.dpconf.link_bw * 27000; + break; + default: + WARN_ON(1); + ret = -EINVAL; + goto done; + } + + if (outp->info.location) + args->v0.ddc = NVKM_I2C_BUS_EXT(outp->info.extdev); + else + args->v0.ddc = outp->info.i2c_index; + args->v0.heads = outp->info.heads; + args->v0.conn = outp->info.connector; + nvkm_object_ctor(&nvkm_uoutp, oclass, &outp->object); *pobject = &outp->object; ret = 0; } + +done: spin_unlock(&disp->client.lock); return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c index d619b40a42..fd5ee9f0af 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c @@ -318,14 +318,14 @@ nvkm_falcon_init(struct nvkm_engine *engine) } static void * -nvkm_falcon_dtor(struct nvkm_engine *engine) +nvkm_falcon_dtor_engine(struct nvkm_engine *engine) { return nvkm_falcon(engine); } static const struct nvkm_engine_func nvkm_falcon = { - .dtor = nvkm_falcon_dtor, + .dtor = nvkm_falcon_dtor_engine, .oneinit = nvkm_falcon_oneinit, .init = nvkm_falcon_init, .fini = nvkm_falcon_fini, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild index 5a074b9970..aff92848ab 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild @@ -26,5 +26,7 @@ nvkm-y += nvkm/engine/fifo/tu102.o nvkm-y += nvkm/engine/fifo/ga100.o nvkm-y += nvkm/engine/fifo/ga102.o +nvkm-y += nvkm/engine/fifo/r535.o + nvkm-y += nvkm/engine/fifo/ucgrp.o nvkm-y += nvkm/engine/fifo/uchan.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c index 5db37247dc..22443fe4a3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c @@ -210,6 +210,8 @@ nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data) CASE(SEC2 ); CASE(NVDEC ); CASE(NVENC ); + CASE(NVJPG ); + CASE(OFA ); default: WARN_ON(1); break; @@ -347,8 +349,14 @@ nvkm_fifo_dtor(struct nvkm_engine *engine) nvkm_chid_unref(&fifo->cgid); nvkm_chid_unref(&fifo->chid); + mutex_destroy(&fifo->userd.mutex); + nvkm_event_fini(&fifo->nonstall.event); mutex_destroy(&fifo->mutex); + + if (fifo->func->dtor) + fifo->func->dtor(fifo); + return fifo; } @@ -383,5 +391,8 @@ nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, spin_lock_init(&fifo->lock); mutex_init(&fifo->mutex); + INIT_LIST_HEAD(&fifo->userd.list); + mutex_init(&fifo->userd.mutex); + return nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c index ea53fb3d5d..814db9daa1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c @@ -156,6 +156,9 @@ nvkm_cgrp_vctx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_c atomic_inc(&vctx->vmm->engref[engn->engine->subdev.type]); /* Allocate the HW structures. */ + if (engn->func->ctor2) { + ret = engn->func->ctor2(engn, vctx, chan); + } else if (engn->func->bind) { ret = nvkm_object_bind(vctx->ectx->object, NULL, 0, &vctx->inst); if (ret == 0 && engn->func->ctor) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c index b7c9d6115b..87a62d4ff4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c @@ -275,13 +275,17 @@ nvkm_chan_del(struct nvkm_chan **pchan) nvkm_gpuobj_del(&chan->cache); nvkm_gpuobj_del(&chan->ramfc); - nvkm_memory_unref(&chan->userd.mem); - if (chan->cgrp) { - nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock); + if (!chan->func->id_put) + nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock); + else + chan->func->id_put(chan); + nvkm_cgrp_unref(&chan->cgrp); } + nvkm_memory_unref(&chan->userd.mem); + if (chan->vmm) { nvkm_vmm_part(chan->vmm, chan->inst->memory); nvkm_vmm_unref(&chan->vmm); @@ -438,7 +442,32 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru } /* Allocate channel ID. */ - chan->id = nvkm_chid_get(runl->chid, chan); + if (!chan->func->id_get) { + chan->id = nvkm_chid_get(runl->chid, chan); + if (chan->id >= 0) { + if (func->userd->bar < 0) { + if (ouserd + chan->func->userd->size >= + nvkm_memory_size(userd)) { + RUNL_DEBUG(runl, "ouserd %llx", ouserd); + return -EINVAL; + } + + ret = nvkm_memory_kmap(userd, &chan->userd.mem); + if (ret) { + RUNL_DEBUG(runl, "userd %d", ret); + return ret; + } + + chan->userd.base = ouserd; + } else { + chan->userd.mem = nvkm_memory_ref(fifo->userd.mem); + chan->userd.base = chan->id * chan->func->userd->size; + } + } + } else { + chan->id = chan->func->id_get(chan, userd, ouserd); + } + if (chan->id < 0) { RUNL_ERROR(runl, "!chids"); return -ENOSPC; @@ -448,24 +477,6 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru cgrp->id = chan->id; /* Initialise USERD. */ - if (func->userd->bar < 0) { - if (ouserd + chan->func->userd->size >= nvkm_memory_size(userd)) { - RUNL_DEBUG(runl, "ouserd %llx", ouserd); - return -EINVAL; - } - - ret = nvkm_memory_kmap(userd, &chan->userd.mem); - if (ret) { - RUNL_DEBUG(runl, "userd %d", ret); - return ret; - } - - chan->userd.base = ouserd; - } else { - chan->userd.mem = nvkm_memory_ref(fifo->userd.mem); - chan->userd.base = chan->id * chan->func->userd->size; - } - if (chan->func->userd->clear) chan->func->userd->clear(chan); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h index 85b94f6991..013682a709 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h @@ -17,6 +17,9 @@ struct nvkm_cctx { }; struct nvkm_chan_func { + int (*id_get)(struct nvkm_chan *, struct nvkm_memory *userd, u64 ouserd); + void (*id_put)(struct nvkm_chan *); + const struct nvkm_chan_func_inst { u32 size; bool zero; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c index c56d2a839e..e74493a456 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c @@ -27,6 +27,7 @@ #include "runq.h" #include <core/gpuobj.h> +#include <subdev/gsp.h> #include <subdev/top.h> #include <subdev/vfn.h> @@ -549,6 +550,10 @@ ga100_fifo_nonstall_ctor(struct nvkm_fifo *fifo) struct nvkm_engn *engn = list_first_entry(&runl->engns, typeof(*engn), head); runl->nonstall.vector = engn->func->nonstall(engn); + + /* if no nonstall vector just keep going */ + if (runl->nonstall.vector == -1) + continue; if (runl->nonstall.vector < 0) { RUNL_ERROR(runl, "nonstall %d", runl->nonstall.vector); return runl->nonstall.vector; @@ -607,5 +612,8 @@ int ga100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo) { + if (nvkm_gsp_rm(device->gsp)) + return r535_fifo_new(&ga100_fifo, device, type, inst, pfifo); + return nvkm_fifo_new_(&ga100_fifo, device, type, inst, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c index 2cdf5da339..755235f55b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + #include <nvif/class.h> static const struct nvkm_fifo_func @@ -34,12 +36,15 @@ ga102_fifo = { .engn = &ga100_engn, .engn_ce = &ga100_engn_ce, .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &ga100_cgrp, .force = true }, - .chan = {{ 0, 0, AMPERE_CHANNEL_GPFIFO_B }, &ga100_chan }, + .chan = {{ 0, 0, AMPERE_CHANNEL_GPFIFO_A }, &ga100_chan }, }; int ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo) { + if (nvkm_gsp_rm(device->gsp)) + return r535_fifo_new(&ga102_fifo, device, type, inst, pfifo); + return nvkm_fifo_new_(&ga102_fifo, device, type, inst, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h index 4d448be192..a0f3277605 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h @@ -13,6 +13,8 @@ struct nvkm_runq; struct nvkm_vctx; struct nvkm_fifo_func { + void (*dtor)(struct nvkm_fifo *); + int (*chid_nr)(struct nvkm_fifo *); int (*chid_ctor)(struct nvkm_fifo *, int nr); int (*runq_nr)(struct nvkm_fifo *); @@ -58,6 +60,8 @@ struct nvkm_fifo_func { } chan; }; +int r535_fifo_new(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_fifo **); int nvkm_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_fifo **); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c new file mode 100644 index 0000000000..3454c7d295 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c @@ -0,0 +1,665 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" +#include "cgrp.h" +#include "chan.h" +#include "chid.h" +#include "runl.h" + +#include <core/gpuobj.h> +#include <subdev/gsp.h> +#include <subdev/mmu.h> +#include <subdev/vfn.h> +#include <engine/gr.h> + +#include <nvhw/drf.h> + +#include <nvrm/nvtypes.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h> +#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h> +#include <nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h> +#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h> + +static u32 +r535_chan_doorbell_handle(struct nvkm_chan *chan) +{ + return (chan->cgrp->runl->id << 16) | chan->id; +} + +static void +r535_chan_stop(struct nvkm_chan *chan) +{ +} + +static void +r535_chan_start(struct nvkm_chan *chan) +{ +} + +static void +r535_chan_ramfc_clear(struct nvkm_chan *chan) +{ + struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; + + nvkm_gsp_rm_free(&chan->rm.object); + + dma_free_coherent(fifo->engine.subdev.device->dev, fifo->rm.mthdbuf_size, + chan->rm.mthdbuf.ptr, chan->rm.mthdbuf.addr); + + nvkm_cgrp_vctx_put(chan->cgrp, &chan->rm.grctx); +} + +#define CHID_PER_USERD 8 + +static int +r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv) +{ + struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; + struct nvkm_engn *engn; + struct nvkm_device *device = fifo->engine.subdev.device; + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args; + const int userd_p = chan->id / CHID_PER_USERD; + const int userd_i = chan->id % CHID_PER_USERD; + u32 eT = ~0; + int ret; + + if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) { + ret = nvkm_subdev_oneinit(&device->gr->engine.subdev); + if (ret) + return ret; + } + + nvkm_runl_foreach_engn(engn, chan->cgrp->runl) { + eT = engn->id; + break; + } + + if (WARN_ON(eT == ~0)) + return -EINVAL; + + chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev, + fifo->rm.mthdbuf_size, + &chan->rm.mthdbuf.addr, GFP_KERNEL); + if (!chan->rm.mthdbuf.ptr) + return -ENOMEM; + + args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id, + fifo->func->chan.user.oclass, sizeof(*args), + &chan->rm.object); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->gpFifoOffset = offset; + args->gpFifoEntries = length / 8; + + args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL); + args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE); + args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq); + if (!priv) + args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE); + else + args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE); + args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE); + + args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE); + args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE); + + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT); + args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE); + + args->hVASpace = chan->vmm->rm.object.handle; + args->engineType = eT; + + args->instanceMem.base = chan->inst->addr; + args->instanceMem.size = chan->inst->size; + args->instanceMem.addressSpace = 2; + args->instanceMem.cacheAttrib = 1; + + args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base; + args->userdMem.size = fifo->func->chan.func->userd->size; + args->userdMem.addressSpace = 2; + args->userdMem.cacheAttrib = 1; + + args->ramfcMem.base = chan->inst->addr + 0; + args->ramfcMem.size = 0x200; + args->ramfcMem.addressSpace = 2; + args->ramfcMem.cacheAttrib = 1; + + args->mthdbufMem.base = chan->rm.mthdbuf.addr; + args->mthdbufMem.size = fifo->rm.mthdbuf_size; + args->mthdbufMem.addressSpace = 1; + args->mthdbufMem.cacheAttrib = 0; + + if (!priv) + args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER); + else + args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN); + args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE); + args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE); + + ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args); + if (ret) + return ret; + + if (1) { + NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *ctrl; + + if (1) { + NVA06F_CTRL_BIND_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object, + NVA06F_CTRL_CMD_BIND, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return PTR_ERR(ctrl); + + ctrl->engineType = eT; + + ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl); + if (ret) + return ret; + } + + ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object, + NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return PTR_ERR(ctrl); + + ctrl->bEnable = 1; + ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl); + } + + return ret; +} + +static const struct nvkm_chan_func_ramfc +r535_chan_ramfc = { + .write = r535_chan_ramfc_write, + .clear = r535_chan_ramfc_clear, + .devm = 0xfff, + .priv = true, +}; + +struct r535_chan_userd { + struct nvkm_memory *mem; + struct nvkm_memory *map; + int chid; + u32 used; + + struct list_head head; +} *userd; + +static void +r535_chan_id_put(struct nvkm_chan *chan) +{ + struct nvkm_runl *runl = chan->cgrp->runl; + struct nvkm_fifo *fifo = runl->fifo; + struct r535_chan_userd *userd; + + mutex_lock(&fifo->userd.mutex); + list_for_each_entry(userd, &fifo->userd.list, head) { + if (userd->map == chan->userd.mem) { + u32 chid = chan->userd.base / chan->func->userd->size; + + userd->used &= ~BIT(chid); + if (!userd->used) { + nvkm_memory_unref(&userd->map); + nvkm_memory_unref(&userd->mem); + nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock); + list_del(&userd->head); + kfree(userd); + } + + break; + } + } + mutex_unlock(&fifo->userd.mutex); + +} + +static int +r535_chan_id_get_locked(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd) +{ + const u32 userd_size = CHID_PER_USERD * chan->func->userd->size; + struct nvkm_runl *runl = chan->cgrp->runl; + struct nvkm_fifo *fifo = runl->fifo; + struct r535_chan_userd *userd; + u32 chid; + int ret; + + if (ouserd + chan->func->userd->size >= userd_size || + (ouserd & (chan->func->userd->size - 1))) { + RUNL_DEBUG(runl, "ouserd %llx", ouserd); + return -EINVAL; + } + + chid = div_u64(ouserd, chan->func->userd->size); + + list_for_each_entry(userd, &fifo->userd.list, head) { + if (userd->mem == muserd) { + if (userd->used & BIT(chid)) + return -EBUSY; + break; + } + } + + if (&userd->head == &fifo->userd.list) { + if (nvkm_memory_size(muserd) < userd_size) { + RUNL_DEBUG(runl, "userd too small"); + return -EINVAL; + } + + userd = kzalloc(sizeof(*userd), GFP_KERNEL); + if (!userd) + return -ENOMEM; + + userd->chid = nvkm_chid_get(runl->chid, chan); + if (userd->chid < 0) { + ret = userd->chid; + kfree(userd); + return ret; + } + + userd->mem = nvkm_memory_ref(muserd); + + ret = nvkm_memory_kmap(userd->mem, &userd->map); + if (ret) { + nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock); + kfree(userd); + return ret; + } + + + list_add(&userd->head, &fifo->userd.list); + } + + userd->used |= BIT(chid); + + chan->userd.mem = nvkm_memory_ref(userd->map); + chan->userd.base = ouserd; + + return (userd->chid * CHID_PER_USERD) + chid; +} + +static int +r535_chan_id_get(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd) +{ + struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; + int ret; + + mutex_lock(&fifo->userd.mutex); + ret = r535_chan_id_get_locked(chan, muserd, ouserd); + mutex_unlock(&fifo->userd.mutex); + return ret; +} + +static const struct nvkm_chan_func +r535_chan = { + .id_get = r535_chan_id_get, + .id_put = r535_chan_id_put, + .inst = &gf100_chan_inst, + .userd = &gv100_chan_userd, + .ramfc = &r535_chan_ramfc, + .start = r535_chan_start, + .stop = r535_chan_stop, + .doorbell_handle = r535_chan_doorbell_handle, +}; + +static const struct nvkm_cgrp_func +r535_cgrp = { +}; + +static int +r535_engn_nonstall(struct nvkm_engn *engn) +{ + struct nvkm_subdev *subdev = &engn->engine->subdev; + int ret; + + ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst); + WARN_ON(ret == -ENOENT); + return ret; +} + +static const struct nvkm_engn_func +r535_ce = { + .nonstall = r535_engn_nonstall, +}; + +static int +r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan) +{ + /* RM requires GR context buffers to remain mapped until after the + * channel has been destroyed (as opposed to after the last gr obj + * has been deleted). + * + * Take an extra ref here, which will be released once the channel + * object has been deleted. + */ + refcount_inc(&vctx->refs); + chan->rm.grctx = vctx; + return 0; +} + +static const struct nvkm_engn_func +r535_gr = { + .nonstall = r535_engn_nonstall, + .ctor2 = r535_gr_ctor, +}; + +static int +r535_flcn_bind(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan) +{ + struct nvkm_gsp_client *client = &chan->vmm->rm.client; + NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&chan->vmm->rm.device.subdevice, + NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->hClient = client->object.handle; + ctrl->hObject = chan->rm.object.handle; + ctrl->hChanClient = client->object.handle; + ctrl->virtAddress = vctx->vma->addr; + ctrl->size = vctx->inst->size; + ctrl->engineType = engn->id; + ctrl->ChID = chan->id; + + return nvkm_gsp_rm_ctrl_wr(&chan->vmm->rm.device.subdevice, ctrl); +} + +static int +r535_flcn_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan) +{ + int ret; + + if (WARN_ON(!engn->rm.size)) + return -EINVAL; + + ret = nvkm_gpuobj_new(engn->engine->subdev.device, engn->rm.size, 0, true, NULL, + &vctx->inst); + if (ret) + return ret; + + ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma); + if (ret) + return ret; + + ret = nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0); + if (ret) + return ret; + + return r535_flcn_bind(engn, vctx, chan); +} + +static const struct nvkm_engn_func +r535_flcn = { + .nonstall = r535_engn_nonstall, + .ctor2 = r535_flcn_ctor, +}; + +static void +r535_runl_allow(struct nvkm_runl *runl, u32 engm) +{ +} + +static void +r535_runl_block(struct nvkm_runl *runl, u32 engm) +{ +} + +static const struct nvkm_runl_func +r535_runl = { + .block = r535_runl_block, + .allow = r535_runl_allow, +}; + +static int +r535_fifo_2080_type(enum nvkm_subdev_type type, int inst) +{ + switch (type) { + case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0; + case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst; + case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2; + case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst; + case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst; + case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst; + case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA; + case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW; + default: + break; + } + + WARN_ON(1); + return -EINVAL; +} + +static int +r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype) +{ + switch (rm) { + case RM_ENGINE_TYPE_GR0: + *ptype = NVKM_ENGINE_GR; + return 0; + case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9: + *ptype = NVKM_ENGINE_CE; + return rm - RM_ENGINE_TYPE_COPY0; + case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7: + *ptype = NVKM_ENGINE_NVDEC; + return rm - RM_ENGINE_TYPE_NVDEC0; + case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2: + *ptype = NVKM_ENGINE_NVENC; + return rm - RM_ENGINE_TYPE_NVENC0; + case RM_ENGINE_TYPE_SW: + *ptype = NVKM_ENGINE_SW; + return 0; + case RM_ENGINE_TYPE_SEC2: + *ptype = NVKM_ENGINE_SEC2; + return 0; + case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7: + *ptype = NVKM_ENGINE_NVJPG; + return rm - RM_ENGINE_TYPE_NVJPEG0; + case RM_ENGINE_TYPE_OFA: + *ptype = NVKM_ENGINE_OFA; + return 0; + default: + return -EINVAL; + } +} + +static int +r535_fifo_ectx_size(struct nvkm_fifo *fifo) +{ + NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl; + struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp; + struct nvkm_runl *runl; + struct nvkm_engn *engn; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO, + sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return PTR_ERR(ctrl); + + for (int i = 0; i < ctrl->numConstructedFalcons; i++) { + nvkm_runl_foreach(runl, fifo) { + nvkm_runl_foreach_engn(engn, runl) { + if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) { + engn->rm.size = + ctrl->constructedFalconsTable[i].ctxBufferSize; + break; + } + } + } + } + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return 0; +} + +static int +r535_fifo_runl_ctor(struct nvkm_fifo *fifo) +{ + struct nvkm_subdev *subdev = &fifo->engine.subdev; + struct nvkm_gsp *gsp = subdev->device->gsp; + struct nvkm_runl *runl; + struct nvkm_engn *engn; + u32 cgids = 2048; + u32 chids = 2048; + int ret; + NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl; + + if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) || + (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid))) + return ret; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return PTR_ERR(ctrl); + + for (int i = 0; i < ctrl->numEntries; i++) { + const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE]; + const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST]; + + runl = nvkm_runl_get(fifo, id, addr); + if (!runl) { + runl = nvkm_runl_new(fifo, id, addr, 0); + if (WARN_ON(IS_ERR(runl))) + continue; + } + } + + for (int i = 0; i < ctrl->numEntries; i++) { + const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE]; + const u32 rmid = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RM_ENGINE_TYPE]; + const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST]; + enum nvkm_subdev_type type; + int inst, nv2080; + + runl = nvkm_runl_get(fifo, id, addr); + if (!runl) + continue; + + inst = r535_fifo_engn_type(rmid, &type); + if (inst < 0) { + nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid); + nvkm_runl_del(runl); + continue; + } + + nv2080 = r535_fifo_2080_type(type, inst); + if (nv2080 < 0) { + nvkm_runl_del(runl); + continue; + } + + switch (type) { + case NVKM_ENGINE_CE: + engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst); + break; + case NVKM_ENGINE_GR: + engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst); + break; + case NVKM_ENGINE_NVDEC: + case NVKM_ENGINE_NVENC: + case NVKM_ENGINE_NVJPG: + case NVKM_ENGINE_OFA: + engn = nvkm_runl_add(runl, nv2080, &r535_flcn, type, inst); + break; + case NVKM_ENGINE_SW: + continue; + default: + engn = NULL; + break; + } + + if (!engn) { + nvkm_runl_del(runl); + continue; + } + + engn->rm.desc = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_ENG_DESC]; + } + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + + { + NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + fifo->rm.mthdbuf_size = ctrl->size; + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + } + + return r535_fifo_ectx_size(fifo); +} + +static void +r535_fifo_dtor(struct nvkm_fifo *fifo) +{ + kfree(fifo->func); +} + +int +r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo) +{ + struct nvkm_fifo_func *rm; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_fifo_dtor; + rm->runl_ctor = r535_fifo_runl_ctor; + rm->runl = &r535_runl; + rm->cgrp = hw->cgrp; + rm->cgrp.func = &r535_cgrp; + rm->chan = hw->chan; + rm->chan.func = &r535_chan; + rm->nonstall = &ga100_fifo_nonstall; + rm->nonstall_ctor = ga100_fifo_nonstall_ctor; + + return nvkm_fifo_new_(rm, device, type, inst, pfifo); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h index 5421321f8e..19e6772ead 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h @@ -18,6 +18,7 @@ struct nvkm_engn { bool (*mmu_fault_triggered)(struct nvkm_engn *); int (*ctor)(struct nvkm_engn *, struct nvkm_vctx *); void (*bind)(struct nvkm_engn *, struct nvkm_cctx *, struct nvkm_chan *); + int (*ctor2)(struct nvkm_engn *, struct nvkm_vctx *, struct nvkm_chan *); int (*ramht_add)(struct nvkm_engn *, struct nvkm_object *, struct nvkm_chan *); void (*ramht_del)(struct nvkm_chan *, int hash); } *func; @@ -28,6 +29,11 @@ struct nvkm_engn { int fault; + struct { + u32 desc; + u32 size; + } rm; + struct list_head head; }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c index ea9e151dbb..1d39a6840a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c @@ -25,6 +25,7 @@ #include "runl.h" #include <core/memory.h> +#include <subdev/gsp.h> #include <subdev/mc.h> #include <subdev/vfn.h> @@ -282,5 +283,8 @@ int tu102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo) { + if (nvkm_gsp_rm(device->gsp)) + return r535_fifo_new(&tu102_fifo, device, type, inst, pfifo); + return nvkm_fifo_new_(&tu102_fifo, device, type, inst, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c index 04140e0110..9e56bcc166 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c @@ -317,6 +317,15 @@ nvkm_uchan = { .uevent = nvkm_uchan_uevent, }; +struct nvkm_chan * +nvkm_uchan_chan(struct nvkm_object *object) +{ + if (WARN_ON(object->func != &nvkm_uchan)) + return NULL; + + return nvkm_uchan(object)->chan; +} + int nvkm_uchan_new(struct nvkm_fifo *fifo, struct nvkm_cgrp *cgrp, const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild index b5418f05cc..1555f8c40b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild @@ -41,6 +41,9 @@ nvkm-y += nvkm/engine/gr/gp10b.o nvkm-y += nvkm/engine/gr/gv100.o nvkm-y += nvkm/engine/gr/tu102.o nvkm-y += nvkm/engine/gr/ga102.o +nvkm-y += nvkm/engine/gr/ad102.o + +nvkm-y += nvkm/engine/gr/r535.o nvkm-y += nvkm/engine/gr/ctxnv40.o nvkm-y += nvkm/engine/gr/ctxnv50.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c new file mode 100644 index 0000000000..7bfa6240d2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c @@ -0,0 +1,46 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "gf100.h" + +#include <subdev/gsp.h> + +#include <nvif/class.h> + +static const struct gf100_gr_func +ad102_gr = { + .sclass = { + { -1, -1, FERMI_TWOD_A }, + { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, + { -1, -1, ADA_A }, + { -1, -1, ADA_COMPUTE_A }, + {} + } +}; + +int +ad102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_gr_new(&ad102_gr, device, type, inst, pgr); + + return -ENODEV; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c index 0096ad401b..f5e68f09df 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c @@ -160,7 +160,11 @@ static int nvkm_gr_init(struct nvkm_engine *engine) { struct nvkm_gr *gr = nvkm_gr(engine); - return gr->func->init(gr); + + if (gr->func->init) + return gr->func->init(gr); + + return 0; } static int diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c index 00cd70abad..d285c597af 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c @@ -23,6 +23,7 @@ #include "ctxgf100.h" #include <core/firmware.h> +#include <subdev/gsp.h> #include <subdev/acr.h> #include <subdev/timer.h> #include <subdev/vfn.h> @@ -350,5 +351,8 @@ ga102_gr_fwif[] = { int ga102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { + if (nvkm_gsp_rm(device->gsp)) + return r535_gr_new(&ga102_gr, device, type, inst, pgr); + return gf100_gr_new_(ga102_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 3648868bb9..c494a1ff2d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -2032,18 +2032,18 @@ gf100_gr_oneinit(struct nvkm_gr *base) } /* Allocate global context buffers. */ - ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, gr->func->grctx->pagepool_size, - 0x100, false, &gr->pagepool); + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST_SR_LOST, + gr->func->grctx->pagepool_size, 0x100, false, &gr->pagepool); if (ret) return ret; - ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, gr->func->grctx->bundle_size, + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST_SR_LOST, gr->func->grctx->bundle_size, 0x100, false, &gr->bundle_cb); if (ret) return ret; - ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, gr->func->grctx->attrib_cb_size(gr), - 0x1000, false, &gr->attrib_cb); + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST_SR_LOST, + gr->func->grctx->attrib_cb_size(gr), 0x1000, false, &gr->attrib_cb); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index 54f686ba39..b0e0c93050 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h @@ -445,4 +445,6 @@ void gp108_gr_acr_bld_patch(struct nvkm_acr *, u32, s64); int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gr **); +int r535_gr_new(const struct gf100_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_gr **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c new file mode 100644 index 0000000000..f4bed3eb1e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c @@ -0,0 +1,508 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "gf100.h" + +#include <core/memory.h> +#include <subdev/gsp.h> +#include <subdev/mmu/vmm.h> +#include <engine/fifo/priv.h> + +#include <nvif/if900d.h> + +#include <nvhw/drf.h> + +#include <nvrm/nvtypes.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> +#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h> + +#define r535_gr(p) container_of((p), struct r535_gr, base) + +#define R515_GR_MAX_CTXBUFS 9 + +struct r535_gr { + struct nvkm_gr base; + + struct { + u16 bufferId; + u32 size; + u8 page; + u8 align; + bool global; + bool init; + bool ro; + } ctxbuf[R515_GR_MAX_CTXBUFS]; + int ctxbuf_nr; + + struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS]; +}; + +struct r535_gr_chan { + struct nvkm_object object; + struct r535_gr *gr; + + struct nvkm_vmm *vmm; + struct nvkm_chan *chan; + + struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS]; + struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; +}; + +struct r535_gr_obj { + struct nvkm_object object; + struct nvkm_gsp_object rm; +}; + +static void * +r535_gr_obj_dtor(struct nvkm_object *object) +{ + struct r535_gr_obj *obj = container_of(object, typeof(*obj), object); + + nvkm_gsp_rm_free(&obj->rm); + return obj; +} + +static const struct nvkm_object_func +r535_gr_obj = { + .dtor = r535_gr_obj_dtor, +}; + +static int +r535_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object); + struct r535_gr_obj *obj; + + if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_gr_obj, oclass, &obj->object); + *pobject = &obj->object; + + return nvkm_gsp_rm_alloc(&chan->chan->rm.object, oclass->handle, oclass->base.oclass, 0, + &obj->rm); +} + +static void * +r535_gr_chan_dtor(struct nvkm_object *object) +{ + struct r535_gr_chan *grc = container_of(object, typeof(*grc), object); + struct r535_gr *gr = grc->gr; + + for (int i = 0; i < gr->ctxbuf_nr; i++) { + nvkm_vmm_put(grc->vmm, &grc->vma[i]); + nvkm_memory_unref(&grc->mem[i]); + } + + nvkm_vmm_unref(&grc->vmm); + return grc; +} + +static const struct nvkm_object_func +r535_gr_chan = { + .dtor = r535_gr_chan_dtor, +}; + +static int +r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm, + struct nvkm_memory **pmem, struct nvkm_vma **pvma, + struct nvkm_gsp_object *chan) +{ + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_device *device = subdev->device; + NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice, + NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return PTR_ERR(ctrl); + + ctrl->engineType = 1; + ctrl->hChanClient = vmm->rm.client.object.handle; + ctrl->hObject = chan->handle; + + for (int i = 0; i < gr->ctxbuf_nr; i++) { + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry = + &ctrl->promoteEntry[ctrl->entryCount]; + const bool alloc = golden || !gr->ctxbuf[i].global; + int ret; + + entry->bufferId = gr->ctxbuf[i].bufferId; + entry->bInitialize = gr->ctxbuf[i].init && alloc; + + if (alloc) { + ret = nvkm_memory_new(device, gr->ctxbuf[i].init ? + NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST, + gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page, + gr->ctxbuf[i].init, &pmem[i]); + if (WARN_ON(ret)) + return ret; + + if (gr->ctxbuf[i].bufferId == + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) + entry->bNonmapped = 1; + } else { + if (gr->ctxbuf[i].bufferId == + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP) + continue; + + pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]); + } + + if (!entry->bNonmapped) { + struct gf100_vmm_map_v0 args = { + .priv = 1, + .ro = gr->ctxbuf[i].ro, + }; + + mutex_lock(&vmm->mutex.vmm); + ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align, + nvkm_memory_size(pmem[i]), &pvma[i]); + mutex_unlock(&vmm->mutex.vmm); + if (ret) + return ret; + + ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args)); + if (ret) + return ret; + + entry->gpuVirtAddr = pvma[i]->addr; + } + + if (entry->bInitialize) { + entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]); + entry->size = gr->ctxbuf[i].size; + entry->physAttr = 4; + } + + nvkm_debug(subdev, + "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n", + entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size, + entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped); + + ctrl->entryCount++; + } + + return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl); +} + +static int +r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass, + struct nvkm_object **pobject) +{ + struct r535_gr *gr = r535_gr(base); + struct r535_gr_chan *grc; + int ret; + + if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object); + grc->gr = gr; + grc->vmm = nvkm_vmm_ref(chan->vmm); + grc->chan = chan; + *pobject = &grc->object; + + ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object); + if (ret) + return ret; + + return 0; +} + +static u64 +r535_gr_units(struct nvkm_gr *gr) +{ + struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp; + + return (gsp->gr.tpcs << 8) | gsp->gr.gpcs; +} + +static int +r535_gr_oneinit(struct nvkm_gr *base) +{ + NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info; + struct r535_gr *gr = container_of(base, typeof(*gr), base); + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_gsp *gsp = device->gsp; + struct nvkm_mmu *mmu = device->mmu; + struct { + struct nvkm_memory *inst; + struct nvkm_vmm *vmm; + struct nvkm_gsp_object chan; + struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; + } golden = {}; + int ret; + + /* Allocate a channel to use for golden context init. */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst); + if (ret) + goto done; + + ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm); + if (ret) + goto done; + + ret = mmu->func->promote_vmm(golden.vmm); + if (ret) + goto done; + + { + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args; + + args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, 0xf1f00000, + device->fifo->func->chan.user.oclass, + sizeof(*args), &golden.chan); + if (IS_ERR(args)) { + ret = PTR_ERR(args); + goto done; + } + + args->gpFifoOffset = 0; + args->gpFifoEntries = 0x1000 / 8; + args->flags = + NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL) | + NVDEF(NVOS04, FLAGS, VPR, FALSE) | + NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE) | + NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, 0) | + NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE) | + NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE) | + NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE) | + NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, 0) | + NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE) | + NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, 0) | + NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE) | + NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE) | + NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE) | + NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE) | + NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE) | + NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE) | + NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE) | + NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT) | + NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE) | + NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE); + args->hVASpace = golden.vmm->rm.object.handle; + args->engineType = 1; + args->instanceMem.base = nvkm_memory_addr(golden.inst); + args->instanceMem.size = 0x1000; + args->instanceMem.addressSpace = 2; + args->instanceMem.cacheAttrib = 1; + args->ramfcMem.base = nvkm_memory_addr(golden.inst); + args->ramfcMem.size = 0x200; + args->ramfcMem.addressSpace = 2; + args->ramfcMem.cacheAttrib = 1; + args->userdMem.base = nvkm_memory_addr(golden.inst) + 0x1000; + args->userdMem.size = 0x200; + args->userdMem.addressSpace = 2; + args->userdMem.cacheAttrib = 1; + args->mthdbufMem.base = nvkm_memory_addr(golden.inst) + 0x2000; + args->mthdbufMem.size = 0x5000; + args->mthdbufMem.addressSpace = 2; + args->mthdbufMem.cacheAttrib = 1; + args->internalFlags = + NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN) | + NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE) | + NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE); + + ret = nvkm_gsp_rm_alloc_wr(&golden.chan, args); + if (ret) + goto done; + } + + /* Fetch context buffer info from RM and allocate each of them here to use + * during golden context init (or later as a global context buffer). + * + * Also build the information that'll be used to create channel contexts. + */ + info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO, + sizeof(*info)); + if (WARN_ON(IS_ERR(info))) { + ret = PTR_ERR(info); + goto done; + } + + for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) { + static const struct { + u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */ + u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */ + bool global; + bool init; + bool ro; + } map[] = { +#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \ + .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \ + .global = (G), .init = (I), .ro = (R) } +#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R)) + /* global init ro */ + _A( GRAPHICS, MAIN, false, true, false), + _B( PATCH, false, true, false), + _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false), + _B( PAGEPOOL, true, false, false), + _B( ATTRIBUTE_CB, true, false, false), + _B( RTV_CB_GLOBAL, true, false, false), + _B( FECS_EVENT, true, true, false), + _B( PRIV_ACCESS_MAP, true, true, true), +#undef _B +#undef _A + }; + u32 size = info->engineContextBuffersInfo[0].engine[i].size; + u8 align, page; + int id; + + for (id = 0; id < ARRAY_SIZE(map); id++) { + if (map[id].id0 == i) + break; + } + + nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i, + size, (id < ARRAY_SIZE(map)) ? "*" : ""); + if (id >= ARRAY_SIZE(map)) + continue; + + if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN) + size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */ + + if (size >= 1 << 21) page = 21; + else if (size >= 1 << 16) page = 16; + else page = 12; + + if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB) + align = order_base_2(size); + else + align = page; + + if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf))) + continue; + + gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1; + gr->ctxbuf[gr->ctxbuf_nr].size = size; + gr->ctxbuf[gr->ctxbuf_nr].page = page; + gr->ctxbuf[gr->ctxbuf_nr].align = align; + gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global; + gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init; + gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro; + gr->ctxbuf_nr++; + + if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) { + if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf))) + continue; + + gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1]; + gr->ctxbuf[gr->ctxbuf_nr].bufferId = + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP; + gr->ctxbuf_nr++; + } + } + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info); + + /* Promote golden context to RM. */ + ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan); + if (ret) + goto done; + + /* Allocate 3D class on channel to trigger golden context init in RM. */ + { + int i; + + for (i = 0; gr->base.func->sclass[i].ctor; i++) { + if ((gr->base.func->sclass[i].oclass & 0xff) == 0x97) { + struct nvkm_gsp_object threed; + + ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000, + gr->base.func->sclass[i].oclass, 0, + &threed); + if (ret) + goto done; + + nvkm_gsp_rm_free(&threed); + break; + } + } + + if (WARN_ON(!gr->base.func->sclass[i].ctor)) { + ret = -EINVAL; + goto done; + } + } + +done: + nvkm_gsp_rm_free(&golden.chan); + for (int i = gr->ctxbuf_nr - 1; i >= 0; i--) + nvkm_vmm_put(golden.vmm, &golden.vma[i]); + nvkm_vmm_unref(&golden.vmm); + nvkm_memory_unref(&golden.inst); + return ret; + +} + +static void * +r535_gr_dtor(struct nvkm_gr *base) +{ + struct r535_gr *gr = r535_gr(base); + + while (gr->ctxbuf_nr) + nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]); + + kfree(gr->base.func); + return gr; +} + +int +r535_gr_new(const struct gf100_gr_func *hw, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) +{ + struct nvkm_gr_func *rm; + struct r535_gr *gr; + int nclass; + + for (nclass = 0; hw->sclass[nclass].oclass; nclass++); + + if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_gr_dtor; + rm->oneinit = r535_gr_oneinit; + rm->units = r535_gr_units; + rm->chan_new = r535_gr_chan_new; + + for (int i = 0; i < nclass; i++) { + rm->sclass[i].minver = hw->sclass[i].minver; + rm->sclass[i].maxver = hw->sclass[i].maxver; + rm->sclass[i].oclass = hw->sclass[i].oclass; + rm->sclass[i].ctor = r535_gr_obj_ctor; + } + + if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) { + kfree(rm); + return -ENOMEM; + } + + *pgr = &gr->base; + + return nvkm_gr_ctor(rm, device, type, inst, true, &gr->base); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c index a7775aa185..b7a458e904 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c @@ -22,6 +22,8 @@ #include "gf100.h" #include "ctxgf100.h" +#include <subdev/gsp.h> + #include <nvif/class.h> void @@ -216,5 +218,8 @@ tu102_gr_fwif[] = { int tu102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { + if (nvkm_gsp_rm(device->gsp)) + return r535_gr_new(&tu102_gr, device, type, inst, pgr); + return gf100_gr_new_(tu102_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild index f05e79670d..2b0e923cb7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild @@ -1,4 +1,9 @@ # SPDX-License-Identifier: MIT nvkm-y += nvkm/engine/nvdec/base.o nvkm-y += nvkm/engine/nvdec/gm107.o +nvkm-y += nvkm/engine/nvdec/tu102.o +nvkm-y += nvkm/engine/nvdec/ga100.o nvkm-y += nvkm/engine/nvdec/ga102.o +nvkm-y += nvkm/engine/nvdec/ad102.o + +nvkm-y += nvkm/engine/nvdec/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c new file mode 100644 index 0000000000..d72b3aae9a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c @@ -0,0 +1,44 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/gsp.h> + +#include <nvif/class.h> + +static const struct nvkm_engine_func +ad102_nvdec = { + .sclass = { + { -1, -1, NVC9B0_VIDEO_DECODER }, + {} + } +}; + +int +ad102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_nvdec **pnvdec) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_nvdec_new(&ad102_nvdec, device, type, inst, pnvdec); + + return -ENODEV; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c index 1f6e3b32ba..7d1c6791ae 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c @@ -33,6 +33,7 @@ nvkm_nvdec_dtor(struct nvkm_engine *engine) static const struct nvkm_engine_func nvkm_nvdec = { .dtor = nvkm_nvdec_dtor, + .sclass = { {} }, }; int @@ -58,4 +59,4 @@ nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device, return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev, nvdec->engine.subdev.name, addr, &nvdec->falcon); -}; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c new file mode 100644 index 0000000000..932934227b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c @@ -0,0 +1,44 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/gsp.h> + +#include <nvif/class.h> + +static const struct nvkm_engine_func +ga100_nvdec = { + .sclass = { + { -1, -1, NVC6B0_VIDEO_DECODER }, + {} + } +}; + +int +ga100_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_nvdec **pnvdec) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_nvdec_new(&ga100_nvdec, device, type, inst, pnvdec); + + return -ENODEV; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c index 37d8c3c0f3..022a9c8243 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c @@ -21,8 +21,17 @@ */ #include "priv.h" -#include <subdev/mc.h> -#include <subdev/timer.h> +#include <subdev/gsp.h> + +#include <nvif/class.h> + +static const struct nvkm_engine_func +ga102_nvdec_gsp = { + .sclass = { + { -1, -1, NVC7B0_VIDEO_DECODER }, + {} + } +}; static const struct nvkm_falcon_func ga102_nvdec_flcn = { @@ -57,5 +66,8 @@ int ga102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec) { + if (nvkm_gsp_rm(device->gsp)) + return r535_nvdec_new(&ga102_nvdec_gsp, device, type, inst, pnvdec); + return nvkm_nvdec_new_(ga102_nvdec_fwif, device, type, inst, 0x848000, pnvdec); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c index 564f7e8960..51c9d0e68e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c @@ -44,7 +44,7 @@ gm107_nvdec_nofw(struct nvkm_nvdec *nvdec, int ver, return 0; } -static const struct nvkm_nvdec_fwif +const struct nvkm_nvdec_fwif gm107_nvdec_fwif[] = { { -1, gm107_nvdec_nofw, &gm107_nvdec }, {} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h index 61e1f7aaa5..f506ae83bf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h @@ -5,6 +5,8 @@ struct nvkm_nvdec_func { const struct nvkm_falcon_func *flcn; + + struct nvkm_sclass sclass[]; }; struct nvkm_nvdec_fwif { @@ -14,6 +16,11 @@ struct nvkm_nvdec_fwif { const struct nvkm_nvdec_func *func; }; +extern const struct nvkm_nvdec_fwif gm107_nvdec_fwif[]; + int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *, enum nvkm_subdev_type, int, u32 addr, struct nvkm_nvdec **); + +int r535_nvdec_new(const struct nvkm_engine_func *, struct nvkm_device *, + enum nvkm_subdev_type, int, struct nvkm_nvdec **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c new file mode 100644 index 0000000000..75a24f3e66 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c @@ -0,0 +1,110 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/object.h> +#include <subdev/gsp.h> +#include <engine/fifo.h> + +#include <nvrm/nvtypes.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> + +struct r535_nvdec_obj { + struct nvkm_object object; + struct nvkm_gsp_object rm; +}; + +static void * +r535_nvdec_obj_dtor(struct nvkm_object *object) +{ + struct r535_nvdec_obj *obj = container_of(object, typeof(*obj), object); + + nvkm_gsp_rm_free(&obj->rm); + return obj; +} + +static const struct nvkm_object_func +r535_nvdec_obj = { + .dtor = r535_nvdec_obj_dtor, +}; + +static int +r535_nvdec_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); + struct r535_nvdec_obj *obj; + NV_BSP_ALLOCATION_PARAMETERS *args; + + if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_nvdec_obj, oclass, &obj->object); + *pobject = &obj->object; + + args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, + sizeof(*args), &obj->rm); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->size = sizeof(*args); + args->engineInstance = oclass->engine->subdev.inst; + + return nvkm_gsp_rm_alloc_wr(&obj->rm, args); +} + +static void * +r535_nvdec_dtor(struct nvkm_engine *engine) +{ + struct nvkm_nvdec *nvdec = nvkm_nvdec(engine); + + kfree(nvdec->engine.func); + return nvdec; +} + +int +r535_nvdec_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec) +{ + struct nvkm_engine_func *rm; + int nclass; + + for (nclass = 0; hw->sclass[nclass].oclass; nclass++); + + if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_nvdec_dtor; + for (int i = 0; i < nclass; i++) { + rm->sclass[i].minver = hw->sclass[i].minver; + rm->sclass[i].maxver = hw->sclass[i].maxver; + rm->sclass[i].oclass = hw->sclass[i].oclass; + rm->sclass[i].ctor = r535_nvdec_obj_ctor; + } + + if (!(*pnvdec = kzalloc(sizeof(**pnvdec), GFP_KERNEL))) { + kfree(rm); + return -ENOMEM; + } + + return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvdec)->engine); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c new file mode 100644 index 0000000000..808c8e010b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c @@ -0,0 +1,44 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/gsp.h> + +#include <nvif/class.h> + +static const struct nvkm_engine_func +tu102_nvdec = { + .sclass = { + { -1, -1, NVC4B0_VIDEO_DECODER }, + {} + } +}; + +int +tu102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_nvdec **pnvdec) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_nvdec_new(&tu102_nvdec, device, type, inst, pnvdec); + + return nvkm_nvdec_new_(gm107_nvdec_fwif, device, type, inst, 0, pnvdec); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild index 75bf4436bf..2c1495b730 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild @@ -1,3 +1,8 @@ # SPDX-License-Identifier: MIT nvkm-y += nvkm/engine/nvenc/base.o nvkm-y += nvkm/engine/nvenc/gm107.o +nvkm-y += nvkm/engine/nvenc/tu102.o +nvkm-y += nvkm/engine/nvenc/ga102.o +nvkm-y += nvkm/engine/nvenc/ad102.o + +nvkm-y += nvkm/engine/nvenc/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c new file mode 100644 index 0000000000..1b4619ff9e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c @@ -0,0 +1,44 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/gsp.h> + +#include <nvif/class.h> + +static const struct nvkm_engine_func +ad102_nvenc = { + .sclass = { + { -1, -1, NVC9B7_VIDEO_ENCODER }, + {} + } +}; + +int +ad102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_nvenc **pnvenc) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_nvenc_new(&ad102_nvenc, device, type, inst, pnvenc); + + return -ENODEV; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c index cf5dcfda7b..d45dbb42a0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c @@ -34,6 +34,7 @@ nvkm_nvenc_dtor(struct nvkm_engine *engine) static const struct nvkm_engine_func nvkm_nvenc = { .dtor = nvkm_nvenc_dtor, + .sclass = { {} }, }; int @@ -59,4 +60,4 @@ nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device, return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev, nvenc->engine.subdev.name, 0, &nvenc->falcon); -}; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c new file mode 100644 index 0000000000..6463ab8e58 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c @@ -0,0 +1,44 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/gsp.h> + +#include <nvif/class.h> + +static const struct nvkm_engine_func +ga102_nvenc = { + .sclass = { + { -1, -1, NVC7B7_VIDEO_ENCODER }, + {} + } +}; + +int +ga102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_nvenc **pnvenc) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_nvenc_new(&ga102_nvenc, device, type, inst, pnvenc); + + return -ENODEV; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c index ad27d8b975..922abb647a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c @@ -38,7 +38,7 @@ gm107_nvenc_nofw(struct nvkm_nvenc *nvenc, int ver, return 0; } -static const struct nvkm_nvenc_fwif +const struct nvkm_nvenc_fwif gm107_nvenc_fwif[] = { { -1, gm107_nvenc_nofw, &gm107_nvenc }, {} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h index 4130a2bfbb..7917affc65 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h @@ -14,6 +14,11 @@ struct nvkm_nvenc_fwif { const struct nvkm_nvenc_func *func; }; +extern const struct nvkm_nvenc_fwif gm107_nvenc_fwif[]; + int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_nvenc **pnvenc); + +int r535_nvenc_new(const struct nvkm_engine_func *, struct nvkm_device *, + enum nvkm_subdev_type, int, struct nvkm_nvenc **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c new file mode 100644 index 0000000000..c8a2a9196c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c @@ -0,0 +1,110 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/object.h> +#include <subdev/gsp.h> +#include <engine/fifo.h> + +#include <nvrm/nvtypes.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> + +struct r535_nvenc_obj { + struct nvkm_object object; + struct nvkm_gsp_object rm; +}; + +static void * +r535_nvenc_obj_dtor(struct nvkm_object *object) +{ + struct r535_nvenc_obj *obj = container_of(object, typeof(*obj), object); + + nvkm_gsp_rm_free(&obj->rm); + return obj; +} + +static const struct nvkm_object_func +r535_nvenc_obj = { + .dtor = r535_nvenc_obj_dtor, +}; + +static int +r535_nvenc_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); + struct r535_nvenc_obj *obj; + NV_MSENC_ALLOCATION_PARAMETERS *args; + + if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_nvenc_obj, oclass, &obj->object); + *pobject = &obj->object; + + args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, + sizeof(*args), &obj->rm); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->size = sizeof(*args); + args->engineInstance = oclass->engine->subdev.inst; + + return nvkm_gsp_rm_alloc_wr(&obj->rm, args); +} + +static void * +r535_nvenc_dtor(struct nvkm_engine *engine) +{ + struct nvkm_nvenc *nvenc = nvkm_nvenc(engine); + + kfree(nvenc->engine.func); + return nvenc; +} + +int +r535_nvenc_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc) +{ + struct nvkm_engine_func *rm; + int nclass; + + for (nclass = 0; hw->sclass[nclass].oclass; nclass++); + + if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_nvenc_dtor; + for (int i = 0; i < nclass; i++) { + rm->sclass[i].minver = hw->sclass[i].minver; + rm->sclass[i].maxver = hw->sclass[i].maxver; + rm->sclass[i].oclass = hw->sclass[i].oclass; + rm->sclass[i].ctor = r535_nvenc_obj_ctor; + } + + if (!(*pnvenc = kzalloc(sizeof(**pnvenc), GFP_KERNEL))) { + kfree(rm); + return -ENOMEM; + } + + return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvenc)->engine); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c new file mode 100644 index 0000000000..933864423b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c @@ -0,0 +1,44 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/gsp.h> + +#include <nvif/class.h> + +static const struct nvkm_engine_func +tu102_nvenc = { + .sclass = { + { -1, -1, NVC4B7_VIDEO_ENCODER }, + {} + } +}; + +int +tu102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_nvenc **pnvenc) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_nvenc_new(&tu102_nvenc, device, type, inst, pnvenc); + + return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild new file mode 100644 index 0000000000..1408f664ad --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: MIT +nvkm-y += nvkm/engine/nvjpg/ga100.o +nvkm-y += nvkm/engine/nvjpg/ad102.o + +nvkm-y += nvkm/engine/nvjpg/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c new file mode 100644 index 0000000000..62705dc649 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c @@ -0,0 +1,44 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/gsp.h> + +#include <nvif/class.h> + +static const struct nvkm_engine_func +ad102_nvjpg = { + .sclass = { + { -1, -1, NVC9D1_VIDEO_NVJPG }, + {} + } +}; + +int +ad102_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pengine) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_nvjpg_new(&ad102_nvjpg, device, type, inst, pengine); + + return -ENODEV; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c new file mode 100644 index 0000000000..f550eb07da --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c @@ -0,0 +1,44 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/gsp.h> + +#include <nvif/class.h> + +static const struct nvkm_engine_func +ga100_nvjpg = { + .sclass = { + { -1, -1, NVC4D1_VIDEO_NVJPG }, + {} + } +}; + +int +ga100_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pengine) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_nvjpg_new(&ga100_nvjpg, device, type, inst, pengine); + + return -ENODEV; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h new file mode 100644 index 0000000000..1e80cf7003 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_NVJPG_PRIV_H__ +#define __NVKM_NVJPG_PRIV_H__ +#include <engine/nvjpg.h> + +int r535_nvjpg_new(const struct nvkm_engine_func *, struct nvkm_device *, + enum nvkm_subdev_type, int, struct nvkm_engine **); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c new file mode 100644 index 0000000000..1babddc4eb --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c @@ -0,0 +1,107 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/object.h> +#include <subdev/gsp.h> +#include <engine/fifo.h> + +#include <nvrm/nvtypes.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> + +struct r535_nvjpg_obj { + struct nvkm_object object; + struct nvkm_gsp_object rm; +}; + +static void * +r535_nvjpg_obj_dtor(struct nvkm_object *object) +{ + struct r535_nvjpg_obj *obj = container_of(object, typeof(*obj), object); + + nvkm_gsp_rm_free(&obj->rm); + return obj; +} + +static const struct nvkm_object_func +r535_nvjpg_obj = { + .dtor = r535_nvjpg_obj_dtor, +}; + +static int +r535_nvjpg_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); + struct r535_nvjpg_obj *obj; + NV_NVJPG_ALLOCATION_PARAMETERS *args; + + if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_nvjpg_obj, oclass, &obj->object); + *pobject = &obj->object; + + args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, + sizeof(*args), &obj->rm); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->size = sizeof(*args); + args->engineInstance = oclass->engine->subdev.inst; + + return nvkm_gsp_rm_alloc_wr(&obj->rm, args); +} + +static void * +r535_nvjpg_dtor(struct nvkm_engine *engine) +{ + kfree(engine->func); + return engine; +} + +int +r535_nvjpg_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) +{ + struct nvkm_engine_func *rm; + int nclass, ret; + + for (nclass = 0; hw->sclass[nclass].oclass; nclass++); + + if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_nvjpg_dtor; + for (int i = 0; i < nclass; i++) { + rm->sclass[i].minver = hw->sclass[i].minver; + rm->sclass[i].maxver = hw->sclass[i].maxver; + rm->sclass[i].oclass = hw->sclass[i].oclass; + rm->sclass[i].ctor = r535_nvjpg_obj_ctor; + } + + ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild new file mode 100644 index 0000000000..99f1713d7e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: MIT +nvkm-y += nvkm/engine/ofa/ga100.o +nvkm-y += nvkm/engine/ofa/ga102.o +nvkm-y += nvkm/engine/ofa/ad102.o + +nvkm-y += nvkm/engine/ofa/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c new file mode 100644 index 0000000000..7ac87ef26a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c @@ -0,0 +1,44 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/gsp.h> + +#include <nvif/class.h> + +static const struct nvkm_engine_func +ad102_ofa = { + .sclass = { + { -1, -1, NVC9FA_VIDEO_OFA }, + {} + } +}; + +int +ad102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pengine) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_ofa_new(&ad102_ofa, device, type, inst, pengine); + + return -ENODEV; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c new file mode 100644 index 0000000000..ef474f61a1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c @@ -0,0 +1,44 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/gsp.h> + +#include <nvif/class.h> + +static const struct nvkm_engine_func +ga100_ofa = { + .sclass = { + { -1, -1, NVC6FA_VIDEO_OFA }, + {} + } +}; + +int +ga100_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pengine) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_ofa_new(&ga100_ofa, device, type, inst, pengine); + + return -ENODEV; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c new file mode 100644 index 0000000000..bea2555299 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c @@ -0,0 +1,44 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/gsp.h> + +#include <nvif/class.h> + +static const struct nvkm_engine_func +ga102_ofa = { + .sclass = { + { -1, -1, NVC7FA_VIDEO_OFA }, + {} + } +}; + +int +ga102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pengine) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_ofa_new(&ga102_ofa, device, type, inst, pengine); + + return -ENODEV; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h new file mode 100644 index 0000000000..caf29e6bdd --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_OFA_PRIV_H__ +#define __NVKM_OFA_PRIV_H__ +#include <engine/ofa.h> + +int r535_ofa_new(const struct nvkm_engine_func *, struct nvkm_device *, + enum nvkm_subdev_type, int, struct nvkm_engine **); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c new file mode 100644 index 0000000000..438dc692ee --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c @@ -0,0 +1,107 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/object.h> +#include <subdev/gsp.h> +#include <subdev/mmu.h> +#include <engine/fifo.h> + +#include <nvrm/nvtypes.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> + +struct r535_ofa_obj { + struct nvkm_object object; + struct nvkm_gsp_object rm; +}; + +static void * +r535_ofa_obj_dtor(struct nvkm_object *object) +{ + struct r535_ofa_obj *obj = container_of(object, typeof(*obj), object); + + nvkm_gsp_rm_free(&obj->rm); + return obj; +} + +static const struct nvkm_object_func +r535_ofa_obj = { + .dtor = r535_ofa_obj_dtor, +}; + +static int +r535_ofa_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); + struct r535_ofa_obj *obj; + NV_OFA_ALLOCATION_PARAMETERS *args; + + if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_ofa_obj, oclass, &obj->object); + *pobject = &obj->object; + + args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, + sizeof(*args), &obj->rm); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->size = sizeof(*args); + + return nvkm_gsp_rm_alloc_wr(&obj->rm, args); +} + +static void * +r535_ofa_dtor(struct nvkm_engine *engine) +{ + kfree(engine->func); + return engine; +} + +int +r535_ofa_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) +{ + struct nvkm_engine_func *rm; + int nclass, ret; + + for (nclass = 0; hw->sclass[nclass].oclass; nclass++); + + if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_ofa_dtor; + for (int i = 0; i < nclass; i++) { + rm->sclass[i].minver = hw->sclass[i].minver; + rm->sclass[i].maxver = hw->sclass[i].maxver; + rm->sclass[i].oclass = hw->sclass[i].oclass; + rm->sclass[i].ctor = r535_ofa_obj_ctor; + } + + ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c index 8fe0444f76..131db2645f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c @@ -462,7 +462,7 @@ nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon, args->v0.id = di; args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom); - strncpy(args->v0.name, dom->name, sizeof(args->v0.name) - 1); + strscpy(args->v0.name, dom->name, sizeof(args->v0.name)); /* Currently only global counters (PCOUNTER) are implemented * but this will be different for local counters (MP). */ @@ -513,8 +513,7 @@ nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon, snprintf(args->v0.name, sizeof(args->v0.name), "/%s/%02x", dom->name, si); } else { - strncpy(args->v0.name, sig->name, - sizeof(args->v0.name) - 1); + strscpy(args->v0.name, sig->name, sizeof(args->v0.name)); } args->v0.signal = si; @@ -572,7 +571,7 @@ nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon, args->v0.source = sig->source[si]; args->v0.mask = src->mask; - strncpy(args->v0.name, src->name, sizeof(args->v0.name) - 1); + strscpy(args->v0.name, src->name, sizeof(args->v0.name)); } if (++si < source_nr) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h index 6ae25d3e7f..c011227f70 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h @@ -82,7 +82,7 @@ struct nvkm_perfdom { u8 mode; u32 clk; u16 signal_nr; - struct nvkm_perfsig signal[]; + struct nvkm_perfsig signal[] __counted_by(signal_nr); }; struct nvkm_funcdom { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild index 19feadb1f6..b43b7e5e27 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild @@ -4,3 +4,5 @@ nvkm-y += nvkm/engine/sec2/gp102.o nvkm-y += nvkm/engine/sec2/gp108.o nvkm-y += nvkm/engine/sec2/tu102.o nvkm-y += nvkm/engine/sec2/ga102.o + +nvkm-y += nvkm/engine/sec2/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/ga102.c index 945abb8156..54be7596b0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/ga102.c @@ -21,6 +21,7 @@ */ #include "priv.h" #include <subdev/acr.h> +#include <subdev/gsp.h> #include <subdev/vfn.h> #include <nvfw/flcn.h> @@ -193,5 +194,10 @@ ga102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, /* TOP info wasn't updated on Turing to reflect the PRI * address change for some reason. We override it here. */ - return nvkm_sec2_new_(ga102_sec2_fwif, device, type, inst, 0x840000, psec2); + const u32 addr = 0x840000; + + if (nvkm_gsp_rm(device->gsp)) + return r535_sec2_new(&ga102_sec2, device, type, inst, addr, psec2); + + return nvkm_sec2_new_(ga102_sec2_fwif, device, type, inst, addr, psec2); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h index 172d2705c1..e158a40a4f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h @@ -4,6 +4,9 @@ #include <engine/sec2.h> struct nvkm_acr_lsfw; +int r535_sec2_new(const struct nvkm_sec2_func *, + struct nvkm_device *, enum nvkm_subdev_type, int, u32 addr, struct nvkm_sec2 **); + struct nvkm_sec2_func { const struct nvkm_falcon_func *flcn; u8 unit_unload; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/r535.c new file mode 100644 index 0000000000..83a6bad596 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/r535.c @@ -0,0 +1,54 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +static void * +r535_sec2_dtor(struct nvkm_engine *engine) +{ + struct nvkm_sec2 *sec2 = nvkm_sec2(engine); + + nvkm_falcon_dtor(&sec2->falcon); + return sec2; +} + +static const struct nvkm_engine_func +r535_sec2 = { + .dtor = r535_sec2_dtor, +}; + +int +r535_sec2_new(const struct nvkm_sec2_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, u32 addr, struct nvkm_sec2 **psec2) +{ + struct nvkm_sec2 *sec2; + int ret; + + if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL))) + return -ENOMEM; + + ret = nvkm_engine_ctor(&r535_sec2, device, type, inst, true, &sec2->engine); + if (ret) + return ret; + + return nvkm_falcon_ctor(func->flcn, &sec2->engine.subdev, sec2->engine.subdev.name, + addr, &sec2->falcon); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c index 0afc4b2fa5..20452046d7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c @@ -21,6 +21,7 @@ */ #include "priv.h" #include <subdev/acr.h> +#include <subdev/gsp.h> #include <nvfw/sec2.h> @@ -82,5 +83,10 @@ tu102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, /* TOP info wasn't updated on Turing to reflect the PRI * address change for some reason. We override it here. */ - return nvkm_sec2_new_(tu102_sec2_fwif, device, type, inst, 0x840000, psec2); + const u32 addr = 0x840000; + + if (nvkm_gsp_rm(device->gsp)) + return r535_sec2_new(&tu102_sec2, device, type, inst, addr, psec2); + + return nvkm_sec2_new_(tu102_sec2_fwif, device, type, inst, addr, psec2); } diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild index 9ffe7b921c..d6b0155644 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild @@ -8,5 +8,6 @@ nvkm-y += nvkm/falcon/v1.o nvkm-y += nvkm/falcon/gm200.o nvkm-y += nvkm/falcon/gp102.o +nvkm-y += nvkm/falcon/tu102.o nvkm-y += nvkm/falcon/ga100.o nvkm-y += nvkm/falcon/ga102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c index 235149f73a..3b790865ae 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c @@ -25,6 +25,22 @@ #include <subdev/timer.h> #include <subdev/top.h> +void +nvkm_falcon_intr_retrigger(struct nvkm_falcon *falcon) +{ + if (falcon->func->intr_retrigger) + falcon->func->intr_retrigger(falcon); +} + +bool +nvkm_falcon_riscv_active(struct nvkm_falcon *falcon) +{ + if (!falcon->func->riscv_active) + return false; + + return falcon->func->riscv_active(falcon); +} + static const struct nvkm_falcon_func_dma * nvkm_falcon_dma(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base) { diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c b/drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c index 49fd329439..5db94fb10a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c @@ -21,6 +21,12 @@ */ #include "priv.h" +void +ga100_flcn_intr_retrigger(struct nvkm_falcon *falcon) +{ + nvkm_falcon_wr32(falcon, 0x3e8, 0x00000001); +} + int ga100_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *src_base_src) { diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c b/drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c index 0ff450fe35..834afa45f2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c @@ -24,6 +24,12 @@ #include <subdev/mc.h> #include <subdev/timer.h> +bool +ga102_flcn_riscv_active(struct nvkm_falcon *falcon) +{ + return (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x388) & 0x00000080) != 0; +} + static bool ga102_flcn_dma_done(struct nvkm_falcon *falcon) { diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/tu102.c b/drivers/gpu/drm/nouveau/nvkm/falcon/tu102.c new file mode 100644 index 0000000000..3999182194 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/tu102.c @@ -0,0 +1,28 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +bool +tu102_flcn_riscv_active(struct nvkm_falcon *falcon) +{ + return (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x240) & 0x00000001) != 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c index 45dcf493e9..c7d38609bb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c @@ -20,6 +20,7 @@ * OTHER DEALINGS IN THE SOFTWARE. */ #include "priv.h" +#include <subdev/gsp.h> #include <nvfw/acr.h> @@ -322,5 +323,8 @@ int ga102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_acr **pacr) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_acr_new_(ga102_acr_fwif, device, type, inst, pacr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c index c22d551c00..565e9a070b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c @@ -201,5 +201,8 @@ int tu102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_acr **pacr) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_acr_new_(tu102_acr_fwif, device, type, inst, pacr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild index 8faee3317a..9754c68725 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild @@ -7,3 +7,5 @@ nvkm-y += nvkm/subdev/bar/gk20a.o nvkm-y += nvkm/subdev/bar/gm107.o nvkm-y += nvkm/subdev/bar/gm20b.o nvkm-y += nvkm/subdev/bar/tu102.o + +nvkm-y += nvkm/subdev/bar/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c index d017a1b5e5..91bc53be97 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c @@ -93,8 +93,16 @@ static int nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_bar *bar = nvkm_bar(subdev); + + if (!subdev->use.enabled) + return 0; + if (bar->func->bar1.fini) bar->func->bar1.fini(bar); + + if (!suspend) /* Handled by instmem. */ + nvkm_bar_bar2_fini(subdev->device); + return 0; } @@ -120,7 +128,7 @@ static void * nvkm_bar_dtor(struct nvkm_subdev *subdev) { struct nvkm_bar *bar = nvkm_bar(subdev); - nvkm_bar_bar2_fini(subdev->device); + return bar->func->dtor(bar); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h index daebfc991c..d0168e0b78 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h @@ -4,6 +4,9 @@ #define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev) #include <subdev/bar.h> +int r535_bar_new_(const struct nvkm_bar_func *, + struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **); + void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_bar *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c new file mode 100644 index 0000000000..3a30bea30e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c @@ -0,0 +1,185 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "gf100.h" + +#include <core/mm.h> +#include <subdev/fb.h> +#include <subdev/gsp.h> +#include <subdev/instmem.h> +#include <subdev/mmu/vmm.h> + +#include <nvrm/nvtypes.h> +#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h> +#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h> +#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h> + +static void +r535_bar_flush(struct nvkm_bar *bar) +{ + ioread32_native(bar->flushBAR2); +} + +static void +r535_bar_bar2_wait(struct nvkm_bar *base) +{ +} + +static int +r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr) +{ + rpc_update_bar_pde_v15_00 *rpc; + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE, sizeof(*rpc)); + if (WARN_ON(IS_ERR_OR_NULL(rpc))) + return -EIO; + + rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2; + rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */ + rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu! + + return nvkm_gsp_rpc_wr(gsp, rpc, true); +} + +static void +r535_bar_bar2_fini(struct nvkm_bar *bar) +{ + struct nvkm_gsp *gsp = bar->subdev.device->gsp; + + bar->flushBAR2 = bar->flushBAR2PhysMode; + nvkm_done(bar->flushFBZero); + + WARN_ON(r535_bar_bar2_update_pde(gsp, 0)); +} + +static void +r535_bar_bar2_init(struct nvkm_bar *bar) +{ + struct nvkm_device *device = bar->subdev.device; + struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm; + struct nvkm_gsp *gsp = device->gsp; + + WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr)); + vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb; + + if (!bar->flushFBZero) { + struct nvkm_memory *fbZero; + int ret; + + ret = nvkm_ram_wrap(device, 0, 0x1000, &fbZero); + if (ret == 0) { + ret = nvkm_memory_kmap(fbZero, &bar->flushFBZero); + nvkm_memory_unref(&fbZero); + } + WARN_ON(ret); + } + + bar->bar2 = true; + bar->flushBAR2 = nvkm_kmap(bar->flushFBZero); + WARN_ON(!bar->flushBAR2); +} + +static void +r535_bar_bar1_wait(struct nvkm_bar *base) +{ +} + +static void +r535_bar_bar1_fini(struct nvkm_bar *base) +{ +} + +static void +r535_bar_bar1_init(struct nvkm_bar *bar) +{ + struct nvkm_device *device = bar->subdev.device; + struct nvkm_gsp *gsp = device->gsp; + struct nvkm_vmm *vmm = gf100_bar(bar)->bar[1].vmm; + struct nvkm_memory *pd3; + int ret; + + ret = nvkm_ram_wrap(device, gsp->bar.rm_bar1_pdb, 0x1000, &pd3); + if (WARN_ON(ret)) + return; + + nvkm_memory_unref(&vmm->pd->pt[0]->memory); + + ret = nvkm_memory_kmap(pd3, &vmm->pd->pt[0]->memory); + nvkm_memory_unref(&pd3); + if (WARN_ON(ret)) + return; + + vmm->pd->pt[0]->addr = nvkm_memory_addr(vmm->pd->pt[0]->memory); +} + +static void * +r535_bar_dtor(struct nvkm_bar *bar) +{ + void *data = gf100_bar_dtor(bar); + + nvkm_memory_unref(&bar->flushFBZero); + + if (bar->flushBAR2PhysMode) + iounmap(bar->flushBAR2PhysMode); + + kfree(bar->func); + return data; +} + +int +r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar) +{ + struct nvkm_bar_func *rm; + struct nvkm_bar *bar; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_bar_dtor; + rm->oneinit = hw->oneinit; + rm->bar1.init = r535_bar_bar1_init; + rm->bar1.fini = r535_bar_bar1_fini; + rm->bar1.wait = r535_bar_bar1_wait; + rm->bar1.vmm = hw->bar1.vmm; + rm->bar2.init = r535_bar_bar2_init; + rm->bar2.fini = r535_bar_bar2_fini; + rm->bar2.wait = r535_bar_bar2_wait; + rm->bar2.vmm = hw->bar2.vmm; + rm->flush = r535_bar_flush; + + ret = gf100_bar_new_(rm, device, type, inst, &bar); + if (ret) { + kfree(rm); + return ret; + } + *pbar = bar; + + bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE); + if (!bar->flushBAR2PhysMode) + return -ENOMEM; + + bar->flushBAR2 = bar->flushBAR2PhysMode; + + gf100_bar(*pbar)->bar2_halve = true; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c index c25ab407b8..b4196edad5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c @@ -22,6 +22,7 @@ #include "gf100.h" #include <core/memory.h> +#include <subdev/gsp.h> #include <subdev/timer.h> static void @@ -95,5 +96,8 @@ int tu102_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar) { + if (nvkm_gsp_rm(device->gsp)) + return r535_bar_new_(&tu102_bar, device, type, inst, pbar); + return gf100_bar_new_(&tu102_bar, device, type, inst, pbar); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c index 6c318e41bd..91f486ee4c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c @@ -46,6 +46,14 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size) return true; } +void * +nvbios_pointer(struct nvkm_bios *bios, u32 addr) +{ + if (likely(nvbios_addr(bios, &addr, 0))) + return &bios->data[addr]; + return NULL; +} + u8 nvbios_rd08(struct nvkm_bios *bios, u32 addr) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c index 19188683c8..8c2bf1c16f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c @@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name) return (void *)fw; } +static void +shadow_fw_release(void *fw) +{ + release_firmware(fw); +} + static const struct nvbios_source shadow_fw = { .name = "firmware", .init = shadow_fw_init, - .fini = (void(*)(void *))release_firmware, + .fini = shadow_fw_release, .read = shadow_fw_read, .rw = false, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c index 80b5aaceea..8e1e0b057a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c @@ -24,6 +24,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + static void gf100_bus_intr(struct nvkm_bus *bus) { @@ -72,5 +74,8 @@ int gf100_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_bus **pbus) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_bus_new_(&gf100_bus, device, type, inst, pbus); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild index d1abb64841..5f97bffca9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild @@ -16,3 +16,5 @@ nvkm-y += nvkm/subdev/devinit/gm200.o nvkm-y += nvkm/subdev/devinit/gv100.o nvkm-y += nvkm/subdev/devinit/tu102.o nvkm-y += nvkm/subdev/devinit/ga100.o + +nvkm-y += nvkm/subdev/devinit/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c index 6b280b05c4..5f0b12a1fc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c @@ -24,6 +24,7 @@ #include <subdev/bios.h> #include <subdev/bios/pll.h> #include <subdev/clk/pll.h> +#include <subdev/gsp.h> static int ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq) @@ -62,8 +63,19 @@ ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq) return ret; } +static void +ga100_devinit_disable(struct nvkm_devinit *init) +{ + struct nvkm_device *device = init->subdev.device; + u32 r820c04 = nvkm_rd32(device, 0x820c04); + + if (r820c04 & 0x00000001) + nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0); +} + static const struct nvkm_devinit_func ga100_devinit = { + .disable = ga100_devinit_disable, .init = nv50_devinit_init, .post = tu102_devinit_post, .pll_set = ga100_devinit_pll_set, @@ -73,5 +85,8 @@ int ga100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit) { + if (nvkm_gsp_rm(device->gsp)) + return r535_devinit_new(&ga100_devinit, device, type, inst, pinit); + return nv50_devinit_new_(&ga100_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h index a648482d06..06bbfdcc78 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h @@ -4,6 +4,9 @@ #define nvkm_devinit(p) container_of((p), struct nvkm_devinit, subdev) #include <subdev/devinit.h> +int r535_devinit_new(const struct nvkm_devinit_func *, + struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); + struct nvkm_devinit_func { void *(*dtor)(struct nvkm_devinit *); void (*preinit)(struct nvkm_devinit *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c new file mode 100644 index 0000000000..666eb93b17 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c @@ -0,0 +1,51 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "nv50.h" + +static void * +r535_devinit_dtor(struct nvkm_devinit *devinit) +{ + kfree(devinit->func); + return devinit; +} + +int +r535_devinit_new(const struct nvkm_devinit_func *hw, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_devinit **pdevinit) +{ + struct nvkm_devinit_func *rm; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_devinit_dtor; + rm->post = hw->post; + rm->disable = hw->disable; + + ret = nv50_devinit_new_(rm, device, type, inst, pdevinit); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c index 81a1ad2c88..f406b1525a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c @@ -24,6 +24,7 @@ #include <subdev/bios.h> #include <subdev/bios/pll.h> #include <subdev/clk/pll.h> +#include <subdev/gsp.h> static int tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq) @@ -83,17 +84,9 @@ tu102_devinit_wait(struct nvkm_device *device) } int -tu102_devinit_post(struct nvkm_devinit *base, bool post) +tu102_devinit_post(struct nvkm_devinit *init, bool post) { - struct nv50_devinit *init = nv50_devinit(base); - int ret; - - ret = tu102_devinit_wait(init->base.subdev.device); - if (ret) - return ret; - - gm200_devinit_preos(init, post); - return 0; + return tu102_devinit_wait(init->subdev.device); } static const struct nvkm_devinit_func @@ -108,5 +101,8 @@ int tu102_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit) { + if (nvkm_gsp_rm(device->gsp)) + return r535_devinit_new(&tu102_devinit, device, type, inst, pinit); + return nv50_devinit_new_(&tu102_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c index 967efaddae..5390417a58 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c @@ -22,6 +22,7 @@ #include "priv.h" #include <core/memory.h> +#include <subdev/gsp.h> #include <subdev/mc.h> #include <subdev/mmu.h> #include <subdev/vfn.h> @@ -175,7 +176,12 @@ int tu102_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fault **pfault) { - int ret = nvkm_fault_new_(&tu102_fault, device, type, inst, pfault); + int ret; + + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + + ret = nvkm_fault_new_(&tu102_fault, device, type, inst, pfault); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild index 394c305e75..d1611ad3bf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild @@ -36,6 +36,8 @@ nvkm-y += nvkm/subdev/fb/tu102.o nvkm-y += nvkm/subdev/fb/ga100.o nvkm-y += nvkm/subdev/fb/ga102.o +nvkm-y += nvkm/subdev/fb/r535.o + nvkm-y += nvkm/subdev/fb/ram.o nvkm-y += nvkm/subdev/fb/ramnv04.o nvkm-y += nvkm/subdev/fb/ramnv10.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c index 12037fd4fd..e9e7c1d5c4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c @@ -22,6 +22,8 @@ #include "gf100.h" #include "ram.h" +#include <subdev/gsp.h> + static const struct nvkm_fb_func ga100_fb = { .dtor = gf100_fb_dtor, @@ -38,5 +40,8 @@ ga100_fb = { int ga100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { + if (nvkm_gsp_rm(device->gsp)) + return r535_fb_new(&ga100_fb, device, type, inst, pfb); + return gf100_fb_new_(&ga100_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c index 76f6877b54..25f82b372b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c @@ -22,6 +22,7 @@ #include "gf100.h" #include "ram.h" +#include <subdev/gsp.h> #include <engine/nvdec.h> static u64 @@ -59,6 +60,9 @@ ga102_fb = { int ga102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { + if (nvkm_gsp_rm(device->gsp)) + return r535_fb_new(&ga102_fb, device, type, inst, pfb); + return gf100_fb_new_(&ga102_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h index 77d6a8c108..35c55dfba2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h @@ -6,6 +6,9 @@ #include <subdev/therm.h> struct nvkm_bios; +int r535_fb_new(const struct nvkm_fb_func *, + struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); + struct nvkm_fb_func { void *(*dtor)(struct nvkm_fb *); u32 (*tags)(struct nvkm_fb *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c new file mode 100644 index 0000000000..d325150101 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c @@ -0,0 +1,87 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" +#include "ram.h" + +#include <subdev/gsp.h> + +static const struct nvkm_ram_func +r535_fb_ram = { +}; + +static int +r535_fb_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) +{ + struct nvkm_gsp *gsp = fb->subdev.device->gsp; + struct nvkm_ram *ram; + int ret; + + if (!(ram = *pram = kzalloc(sizeof(*ram), GFP_KERNEL))) + return -ENOMEM; + + ram->func = &r535_fb_ram; + ram->fb = fb; + ram->type = NVKM_RAM_TYPE_UNKNOWN; /*TODO: pull this from GSP. */ + ram->size = gsp->fb.size; + ram->stolen = false; + mutex_init(&ram->mutex); + + for (int i = 0; i < gsp->fb.region_nr; i++) { + ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, + gsp->fb.region[i].addr >> NVKM_RAM_MM_SHIFT, + gsp->fb.region[i].size >> NVKM_RAM_MM_SHIFT, + 1); + if (ret) + return ret; + } + + return 0; +} + +static void * +r535_fb_dtor(struct nvkm_fb *fb) +{ + kfree(fb->func); + return fb; +} + +int +r535_fb_new(const struct nvkm_fb_func *hw, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) +{ + struct nvkm_fb_func *rm; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_fb_dtor; + rm->sysmem.flush_page_init = hw->sysmem.flush_page_init; + rm->vidmem.size = hw->vidmem.size; + rm->ram_new = r535_fb_ram_new; + + ret = nvkm_fb_new_(rm, device, type, inst, pfb); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c index 5c34416cb6..c826980bf7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c @@ -88,12 +88,20 @@ nvkm_vram_dtor(struct nvkm_memory *memory) struct nvkm_vram *vram = nvkm_vram(memory); struct nvkm_mm_node *next = vram->mn; struct nvkm_mm_node *node; - mutex_lock(&vram->ram->mutex); - while ((node = next)) { - next = node->next; - nvkm_mm_free(&vram->ram->vram, &node); + + if (next) { + if (likely(next->nl_entry.next)){ + mutex_lock(&vram->ram->mutex); + while ((node = next)) { + next = node->next; + nvkm_mm_free(&vram->ram->vram, &node); + } + mutex_unlock(&vram->ram->mutex); + } else { + kfree(vram->mn); + } } - mutex_unlock(&vram->ram->mutex); + return vram; } @@ -109,6 +117,34 @@ nvkm_vram = { }; int +nvkm_ram_wrap(struct nvkm_device *device, u64 addr, u64 size, + struct nvkm_memory **pmemory) +{ + struct nvkm_ram *ram; + struct nvkm_vram *vram; + + if (!device->fb || !(ram = device->fb->ram)) + return -ENODEV; + ram = device->fb->ram; + + if (!(vram = kzalloc(sizeof(*vram), GFP_KERNEL))) + return -ENOMEM; + + nvkm_memory_ctor(&nvkm_vram, &vram->memory); + vram->ram = ram; + vram->page = NVKM_RAM_MM_SHIFT; + *pmemory = &vram->memory; + + vram->mn = kzalloc(sizeof(*vram->mn), GFP_KERNEL); + if (!vram->mn) + return -ENOMEM; + + vram->mn->offset = addr >> NVKM_RAM_MM_SHIFT; + vram->mn->length = size >> NVKM_RAM_MM_SHIFT; + return 0; +} + +int nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size, bool contig, bool back, struct nvkm_memory **pmemory) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c index bcc23d4c81..f7d2a749ce 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c @@ -22,6 +22,8 @@ #include "gf100.h" #include "ram.h" +#include <subdev/gsp.h> + bool tu102_fb_vpr_scrub_required(struct nvkm_fb *fb) { @@ -46,6 +48,9 @@ tu102_fb = { int tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { + if (nvkm_gsp_rm(device->gsp)) + return r535_fb_new(&tu102_fb, device, type, inst, pfb); + return gf100_fb_new_(&tu102_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c index 7dc99492f5..d621edbdff 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c @@ -23,6 +23,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + static u32 gm107_fuse_read(struct nvkm_fuse *fuse, u32 addr) { @@ -39,5 +41,8 @@ int gm107_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fuse **pfuse) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_fuse_new_(&gm107_fuse, device, type, inst, pfuse); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c index 4a96f926b6..4dbffae21d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + static void ga102_gpio_reset(struct nvkm_gpio *gpio, u8 match) { @@ -115,5 +117,8 @@ int ga102_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gpio **pgpio) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_gpio_new_(&ga102_gpio, device, type, inst, pgpio); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c index c0e4cdb455..5f7063d557 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c @@ -23,6 +23,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + static void gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo) { @@ -71,5 +73,8 @@ int gk104_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gpio **pgpio) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_gpio_new_(&gk104_gpio, device, type, inst, pgpio); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild index 7f61a1ed15..16bf2f1bb7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild @@ -1,4 +1,12 @@ # SPDX-License-Identifier: MIT nvkm-y += nvkm/subdev/gsp/base.o +nvkm-y += nvkm/subdev/gsp/fwsec.o + nvkm-y += nvkm/subdev/gsp/gv100.o +nvkm-y += nvkm/subdev/gsp/tu102.o +nvkm-y += nvkm/subdev/gsp/tu116.o +nvkm-y += nvkm/subdev/gsp/ga100.o nvkm-y += nvkm/subdev/gsp/ga102.o +nvkm-y += nvkm/subdev/gsp/ad102.o + +nvkm-y += nvkm/subdev/gsp/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c new file mode 100644 index 0000000000..c849c6299c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c @@ -0,0 +1,57 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +static const struct nvkm_gsp_func +ad102_gsp_r535_113_01 = { + .flcn = &ga102_gsp_flcn, + .fwsec = &ga102_gsp_fwsec, + + .sig_section = ".fwsignature_ad10x", + + .wpr_heap.os_carveout_size = 20 << 20, + .wpr_heap.base_size = 8 << 20, + .wpr_heap.min_size = 84 << 20, + + .booter.ctor = ga102_gsp_booter_ctor, + + .dtor = r535_gsp_dtor, + .oneinit = tu102_gsp_oneinit, + .init = r535_gsp_init, + .fini = r535_gsp_fini, + .reset = ga102_gsp_reset, + + .rm = &r535_gsp_rm, +}; + +static struct nvkm_gsp_fwif +ad102_gsps[] = { + { 0, r535_gsp_load, &ad102_gsp_r535_113_01, "535.113.01", true }, + {} +}; + +int +ad102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) +{ + return nvkm_gsp_new_(ad102_gsps, device, type, inst, pgsp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c index 591ac95c26..da1bebb896 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -20,15 +20,74 @@ * OTHER DEALINGS IN THE SOFTWARE. */ #include "priv.h" -#include <core/falcon.h> -#include <core/firmware.h> -#include <subdev/acr.h> -#include <subdev/top.h> + +int +nvkm_gsp_intr_nonstall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst) +{ + for (int i = 0; i < gsp->intr_nr; i++) { + if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) + return gsp->intr[i].nonstall; + } + + return -ENOENT; +} + +int +nvkm_gsp_intr_stall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst) +{ + for (int i = 0; i < gsp->intr_nr; i++) { + if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) { + if (gsp->intr[i].stall != ~0) + return gsp->intr[i].stall; + + return -EINVAL; + } + } + + return -ENOENT; +} + +static int +nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend) +{ + struct nvkm_gsp *gsp = nvkm_gsp(subdev); + + if (!gsp->func->fini) + return 0; + + return gsp->func->fini(gsp, suspend); +} + +static int +nvkm_gsp_init(struct nvkm_subdev *subdev) +{ + struct nvkm_gsp *gsp = nvkm_gsp(subdev); + + if (!gsp->func->init) + return 0; + + return gsp->func->init(gsp); +} + +static int +nvkm_gsp_oneinit(struct nvkm_subdev *subdev) +{ + struct nvkm_gsp *gsp = nvkm_gsp(subdev); + + if (!gsp->func->oneinit) + return 0; + + return gsp->func->oneinit(gsp); +} static void * nvkm_gsp_dtor(struct nvkm_subdev *subdev) { struct nvkm_gsp *gsp = nvkm_gsp(subdev); + + if (gsp->func && gsp->func->dtor) + gsp->func->dtor(gsp); + nvkm_falcon_dtor(&gsp->falcon); return gsp; } @@ -36,6 +95,9 @@ nvkm_gsp_dtor(struct nvkm_subdev *subdev) static const struct nvkm_subdev_func nvkm_gsp = { .dtor = nvkm_gsp_dtor, + .oneinit = nvkm_gsp_oneinit, + .init = nvkm_gsp_init, + .fini = nvkm_gsp_fini, }; int @@ -54,6 +116,8 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device, return PTR_ERR(fwif); gsp->func = fwif->func; + gsp->rm = gsp->func->rm; - return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0, &gsp->falcon); + return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0x110000, + &gsp->falcon); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c new file mode 100644 index 0000000000..330d72b1a4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c @@ -0,0 +1,359 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/bios.h> +#include <subdev/bios/pmu.h> + +#include <nvfw/fw.h> + +union nvfw_falcon_appif_hdr { + struct nvfw_falcon_appif_hdr_v1 { + u8 ver; + u8 hdr; + u8 len; + u8 cnt; + } v1; +}; + +union nvfw_falcon_appif { + struct nvfw_falcon_appif_v1 { +#define NVFW_FALCON_APPIF_ID_DMEMMAPPER 0x00000004 + u32 id; + u32 dmem_base; + } v1; +}; + +union nvfw_falcon_appif_dmemmapper { + struct { + u32 signature; + u16 version; + u16 size; + u32 cmd_in_buffer_offset; + u32 cmd_in_buffer_size; + u32 cmd_out_buffer_offset; + u32 cmd_out_buffer_size; + u32 nvf_img_data_buffer_offset; + u32 nvf_img_data_buffer_size; + u32 printf_buffer_hdr; + u32 ucode_build_time_stamp; + u32 ucode_signature; +#define NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS 0x00000015 +#define NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB 0x00000019 + u32 init_cmd; + u32 ucode_feature; + u32 ucode_cmd_mask0; + u32 ucode_cmd_mask1; + u32 multi_tgt_tbl; + } v3; +}; + +struct nvfw_fwsec_frts_cmd { + struct { + u32 ver; + u32 hdr; + u64 addr; + u32 size; + u32 flags; + } read_vbios; + struct { + u32 ver; + u32 hdr; + u32 addr; + u32 size; +#define NVFW_FRTS_CMD_REGION_TYPE_FB 0x00000002 + u32 type; + } frts_region; +}; + +static int +nvkm_gsp_fwsec_patch(struct nvkm_gsp *gsp, struct nvkm_falcon_fw *fw, u32 if_offset, u32 init_cmd) +{ + union nvfw_falcon_appif_hdr *hdr = (void *)(fw->fw.img + fw->dmem_base_img + if_offset); + const u8 *dmem = fw->fw.img + fw->dmem_base_img; + int i; + + if (WARN_ON(hdr->v1.ver != 1)) + return -EINVAL; + + for (i = 0; i < hdr->v1.cnt; i++) { + union nvfw_falcon_appif *app = (void *)((u8 *)hdr + hdr->v1.hdr + i * hdr->v1.len); + union nvfw_falcon_appif_dmemmapper *dmemmap; + struct nvfw_fwsec_frts_cmd *frtscmd; + + if (app->v1.id != NVFW_FALCON_APPIF_ID_DMEMMAPPER) + continue; + + dmemmap = (void *)(dmem + app->v1.dmem_base); + dmemmap->v3.init_cmd = init_cmd; + + frtscmd = (void *)(dmem + dmemmap->v3.cmd_in_buffer_offset); + + frtscmd->read_vbios.ver = 1; + frtscmd->read_vbios.hdr = sizeof(frtscmd->read_vbios); + frtscmd->read_vbios.addr = 0; + frtscmd->read_vbios.size = 0; + frtscmd->read_vbios.flags = 2; + + if (init_cmd == NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS) { + frtscmd->frts_region.ver = 1; + frtscmd->frts_region.hdr = sizeof(frtscmd->frts_region); + frtscmd->frts_region.addr = gsp->fb.wpr2.frts.addr >> 12; + frtscmd->frts_region.size = gsp->fb.wpr2.frts.size >> 12; + frtscmd->frts_region.type = NVFW_FRTS_CMD_REGION_TYPE_FB; + } + + break; + } + + if (WARN_ON(i == hdr->v1.cnt)) + return -EINVAL; + + return 0; +} + +union nvfw_falcon_ucode_desc { + struct nvkm_falcon_ucode_desc_v2 { + u32 Hdr; + u32 StoredSize; + u32 UncompressedSize; + u32 VirtualEntry; + u32 InterfaceOffset; + u32 IMEMPhysBase; + u32 IMEMLoadSize; + u32 IMEMVirtBase; + u32 IMEMSecBase; + u32 IMEMSecSize; + u32 DMEMOffset; + u32 DMEMPhysBase; + u32 DMEMLoadSize; + u32 altIMEMLoadSize; + u32 altDMEMLoadSize; + } v2; + + struct nvkm_falcon_ucode_desc_v3 { + u32 Hdr; + u32 StoredSize; + u32 PKCDataOffset; + u32 InterfaceOffset; + u32 IMEMPhysBase; + u32 IMEMLoadSize; + u32 IMEMVirtBase; + u32 DMEMPhysBase; + u32 DMEMLoadSize; + u16 EngineIdMask; + u8 UcodeId; + u8 SignatureCount; + u16 SignatureVersions; + u16 Reserved; + } v3; +}; + +static int +nvkm_gsp_fwsec_v2(struct nvkm_gsp *gsp, const char *name, + const struct nvkm_falcon_ucode_desc_v2 *desc, u32 size, u32 init_cmd, + struct nvkm_falcon_fw *fw) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + const struct firmware *bl; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_bl_desc *bld; + int ret; + + /* Build ucode. */ + ret = nvkm_falcon_fw_ctor(gsp->func->fwsec, name, subdev->device, true, + (u8 *)desc + size, desc->IMEMLoadSize + desc->DMEMLoadSize, + &gsp->falcon, fw); + if (WARN_ON(ret)) + return ret; + + fw->nmem_base_img = 0; + fw->nmem_base = desc->IMEMPhysBase; + fw->nmem_size = desc->IMEMLoadSize - desc->IMEMSecSize; + + fw->imem_base_img = 0; + fw->imem_base = desc->IMEMSecBase; + fw->imem_size = desc->IMEMSecSize; + + fw->dmem_base_img = desc->DMEMOffset; + fw->dmem_base = desc->DMEMPhysBase; + fw->dmem_size = desc->DMEMLoadSize; + + /* Bootloader. */ + ret = nvkm_firmware_get(subdev, "acr/bl", 0, &bl); + if (ret) + return ret; + + hdr = nvfw_bin_hdr(subdev, bl->data); + bld = nvfw_bl_desc(subdev, bl->data + hdr->header_offset); + + fw->boot_addr = bld->start_tag << 8; + fw->boot_size = bld->code_size; + fw->boot = kmemdup(bl->data + hdr->data_offset + bld->code_off, fw->boot_size, GFP_KERNEL); + if (!fw->boot) + ret = -ENOMEM; + + nvkm_firmware_put(bl); + + /* Patch in interface data. */ + return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd); +} + +static int +nvkm_gsp_fwsec_v3(struct nvkm_gsp *gsp, const char *name, + const struct nvkm_falcon_ucode_desc_v3 *desc, u32 size, u32 init_cmd, + struct nvkm_falcon_fw *fw) +{ + struct nvkm_device *device = gsp->subdev.device; + struct nvkm_bios *bios = device->bios; + int ret; + + /* Build ucode. */ + ret = nvkm_falcon_fw_ctor(gsp->func->fwsec, name, device, true, + (u8 *)desc + size, desc->IMEMLoadSize + desc->DMEMLoadSize, + &gsp->falcon, fw); + if (WARN_ON(ret)) + return ret; + + fw->imem_base_img = 0; + fw->imem_base = desc->IMEMPhysBase; + fw->imem_size = desc->IMEMLoadSize; + fw->dmem_base_img = desc->IMEMLoadSize; + fw->dmem_base = desc->DMEMPhysBase; + fw->dmem_size = ALIGN(desc->DMEMLoadSize, 256); + fw->dmem_sign = desc->PKCDataOffset; + fw->boot_addr = 0; + fw->fuse_ver = desc->SignatureVersions; + fw->ucode_id = desc->UcodeId; + fw->engine_id = desc->EngineIdMask; + + /* Patch in signature. */ + ret = nvkm_falcon_fw_sign(fw, fw->dmem_base_img + desc->PKCDataOffset, 96 * 4, + nvbios_pointer(bios, 0), desc->SignatureCount, + (u8 *)desc + 0x2c - (u8 *)nvbios_pointer(bios, 0), 0, 0); + if (WARN_ON(ret)) + return ret; + + /* Patch in interface data. */ + return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd); +} + +static int +nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_bios *bios = device->bios; + const union nvfw_falcon_ucode_desc *desc; + struct nvbios_pmuE flcn_ucode; + u8 idx, ver, hdr; + u32 data; + u16 size, vers; + struct nvkm_falcon_fw fw = {}; + u32 mbox0 = 0; + int ret; + + /* Lookup in VBIOS. */ + for (idx = 0; (data = nvbios_pmuEp(bios, idx, &ver, &hdr, &flcn_ucode)); idx++) { + if (flcn_ucode.type == 0x85) + break; + } + + if (WARN_ON(!data)) + return -EINVAL; + + /* Deteremine version. */ + desc = nvbios_pointer(bios, flcn_ucode.data); + if (WARN_ON(!(desc->v2.Hdr & 0x00000001))) + return -EINVAL; + + size = (desc->v2.Hdr & 0xffff0000) >> 16; + vers = (desc->v2.Hdr & 0x0000ff00) >> 8; + + switch (vers) { + case 2: ret = nvkm_gsp_fwsec_v2(gsp, name, &desc->v2, size, init_cmd, &fw); break; + case 3: ret = nvkm_gsp_fwsec_v3(gsp, name, &desc->v3, size, init_cmd, &fw); break; + default: + nvkm_error(subdev, "%s(v%d): version unknown\n", name, vers); + return -EINVAL; + } + + if (ret) { + nvkm_error(subdev, "%s(v%d): %d\n", name, vers, ret); + return ret; + } + + /* Boot. */ + ret = nvkm_falcon_fw_boot(&fw, subdev, true, &mbox0, NULL, 0, 0); + nvkm_falcon_fw_dtor(&fw); + if (ret) + return ret; + + return 0; +} + +int +nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + int ret; + u32 err; + + ret = nvkm_gsp_fwsec(gsp, "fwsec-sb", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB); + if (ret) + return ret; + + /* Verify. */ + err = nvkm_rd32(device, 0x001400 + (0xf * 4)) & 0x0000ffff; + if (err) { + nvkm_error(subdev, "fwsec-sb: 0x%04x\n", err); + return -EIO; + } + + return 0; +} + +int +nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + int ret; + u32 err, wpr2_lo, wpr2_hi; + + ret = nvkm_gsp_fwsec(gsp, "fwsec-frts", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS); + if (ret) + return ret; + + /* Verify. */ + err = nvkm_rd32(device, 0x001400 + (0xe * 4)) >> 16; + if (err) { + nvkm_error(subdev, "fwsec-frts: 0x%04x\n", err); + return -EIO; + } + + wpr2_lo = nvkm_rd32(device, 0x1fa824); + wpr2_hi = nvkm_rd32(device, 0x1fa828); + nvkm_debug(subdev, "fwsec-frts: WPR2 @ %08x - %08x\n", wpr2_lo, wpr2_hi); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c new file mode 100644 index 0000000000..223f68b532 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c @@ -0,0 +1,74 @@ +/* + * Copyright 2022 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +static const struct nvkm_falcon_func +ga100_gsp_flcn = { + .disable = gm200_flcn_disable, + .enable = gm200_flcn_enable, + .addr2 = 0x1000, + .riscv_irqmask = 0x2b4, + .reset_eng = gp102_flcn_reset_eng, + .reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing, + .bind_inst = gm200_flcn_bind_inst, + .bind_stat = gm200_flcn_bind_stat, + .bind_intr = true, + .imem_pio = &gm200_flcn_imem_pio, + .dmem_pio = &gm200_flcn_dmem_pio, + .riscv_active = tu102_flcn_riscv_active, + .intr_retrigger = ga100_flcn_intr_retrigger, +}; + +static const struct nvkm_gsp_func +ga100_gsp_r535_113_01 = { + .flcn = &ga100_gsp_flcn, + .fwsec = &tu102_gsp_fwsec, + + .sig_section = ".fwsignature_ga100", + + .wpr_heap.base_size = 8 << 20, + .wpr_heap.min_size = 64 << 20, + + .booter.ctor = tu102_gsp_booter_ctor, + + .dtor = r535_gsp_dtor, + .oneinit = tu102_gsp_oneinit, + .init = r535_gsp_init, + .fini = r535_gsp_fini, + .reset = tu102_gsp_reset, + + .rm = &r535_gsp_rm, +}; + +static struct nvkm_gsp_fwif +ga100_gsps[] = { + { 0, r535_gsp_load, &ga100_gsp_r535_113_01, "535.113.01" }, + { -1, gv100_gsp_nofw, &gv100_gsp }, + {} +}; + +int +ga100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) +{ + return nvkm_gsp_new_(ga100_gsps, device, type, inst, pgsp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c index a3996ceca9..4c4b4168a2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c @@ -21,33 +21,165 @@ */ #include "priv.h" -static const struct nvkm_falcon_func +#include <nvfw/flcn.h> +#include <nvfw/fw.h> +#include <nvfw/hs.h> + +int +ga102_gsp_reset(struct nvkm_gsp *gsp) +{ + int ret; + + ret = gsp->falcon.func->reset_eng(&gsp->falcon); + if (ret) + return ret; + + nvkm_falcon_mask(&gsp->falcon, 0x1668, 0x00000111, 0x00000111); + return 0; +} + +int +ga102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob, + struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + const struct nvkm_falcon_fw_func *func = &ga102_flcn_fw; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_hs_header_v2 *hshdr; + const struct nvfw_hs_load_header_v2 *lhdr; + u32 loc, sig, cnt, *meta; + int ret; + + hdr = nvfw_bin_hdr(subdev, blob->data); + hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset); + meta = (u32 *)(blob->data + hshdr->meta_data_offset); + loc = *(u32 *)(blob->data + hshdr->patch_loc); + sig = *(u32 *)(blob->data + hshdr->patch_sig); + cnt = *(u32 *)(blob->data + hshdr->num_sig); + + ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true, + blob->data + hdr->data_offset, hdr->data_size, falcon, fw); + if (ret) + goto done; + + ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data, + cnt, hshdr->sig_prod_offset + sig, 0, 0); + if (ret) + goto done; + + lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset); + + fw->imem_base_img = lhdr->app[0].offset; + fw->imem_base = 0; + fw->imem_size = lhdr->app[0].size; + + fw->dmem_base_img = lhdr->os_data_offset; + fw->dmem_base = 0; + fw->dmem_size = lhdr->os_data_size; + fw->dmem_sign = loc - lhdr->os_data_offset; + + fw->boot_addr = lhdr->app[0].offset; + + fw->fuse_ver = meta[0]; + fw->engine_id = meta[1]; + fw->ucode_id = meta[2]; + +done: + if (ret) + nvkm_falcon_fw_dtor(fw); + + return ret; +} + +static int +ga102_gsp_fwsec_signature(struct nvkm_falcon_fw *fw, u32 *src_base_src) +{ + struct nvkm_falcon *falcon = fw->falcon; + struct nvkm_device *device = falcon->owner->device; + u32 sig_fuse_version = fw->fuse_ver; + u32 reg_fuse_version; + int idx = 0; + + FLCN_DBG(falcon, "brom: %08x %08x", fw->engine_id, fw->ucode_id); + FLCN_DBG(falcon, "sig_fuse_version: %08x", sig_fuse_version); + + if (fw->engine_id & 0x00000400) { + reg_fuse_version = nvkm_rd32(device, 0x8241c0 + (fw->ucode_id - 1) * 4); + } else { + WARN_ON(1); + return -ENOSYS; + } + + FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version); + reg_fuse_version = BIT(fls(reg_fuse_version)); + FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version); + if (!(reg_fuse_version & fw->fuse_ver)) + return -EINVAL; + + while (!(reg_fuse_version & sig_fuse_version & 1)) { + idx += (sig_fuse_version & 1); + reg_fuse_version >>= 1; + sig_fuse_version >>= 1; + } + + return idx; +} + +const struct nvkm_falcon_fw_func +ga102_gsp_fwsec = { + .signature = ga102_gsp_fwsec_signature, + .reset = gm200_flcn_fw_reset, + .load = ga102_flcn_fw_load, + .boot = ga102_flcn_fw_boot, +}; + +const struct nvkm_falcon_func ga102_gsp_flcn = { .disable = gm200_flcn_disable, .enable = gm200_flcn_enable, .select = ga102_flcn_select, .addr2 = 0x1000, + .riscv_irqmask = 0x528, .reset_eng = gp102_flcn_reset_eng, .reset_prep = ga102_flcn_reset_prep, .reset_wait_mem_scrubbing = ga102_flcn_reset_wait_mem_scrubbing, .imem_dma = &ga102_flcn_dma, .dmem_dma = &ga102_flcn_dma, + .riscv_active = ga102_flcn_riscv_active, + .intr_retrigger = ga100_flcn_intr_retrigger, }; static const struct nvkm_gsp_func -ga102_gsp = { +ga102_gsp_r535_113_01 = { .flcn = &ga102_gsp_flcn, + .fwsec = &ga102_gsp_fwsec, + + .sig_section = ".fwsignature_ga10x", + + .wpr_heap.os_carveout_size = 20 << 20, + .wpr_heap.base_size = 8 << 20, + .wpr_heap.min_size = 84 << 20, + + .booter.ctor = ga102_gsp_booter_ctor, + + .dtor = r535_gsp_dtor, + .oneinit = tu102_gsp_oneinit, + .init = r535_gsp_init, + .fini = r535_gsp_fini, + .reset = ga102_gsp_reset, + + .rm = &r535_gsp_rm, }; -static int -ga102_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) -{ - return 0; -} +static const struct nvkm_gsp_func +ga102_gsp = { + .flcn = &ga102_gsp_flcn, +}; static struct nvkm_gsp_fwif ga102_gsps[] = { - { -1, ga102_gsp_nofw, &ga102_gsp }, + { 0, r535_gsp_load, &ga102_gsp_r535_113_01, "535.113.01" }, + { -1, gv100_gsp_nofw, &ga102_gsp }, {} }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c index da6a809cd3..62d9289bca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c @@ -34,12 +34,12 @@ gv100_gsp_flcn = { .dmem_pio = &gm200_flcn_dmem_pio, }; -static const struct nvkm_gsp_func +const struct nvkm_gsp_func gv100_gsp = { .flcn = &gv100_gsp_flcn, }; -static int +int gv100_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) { return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h index 89749a4020..9f4a62375a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -4,16 +4,67 @@ #include <subdev/gsp.h> enum nvkm_acr_lsf_id; -struct nvkm_gsp_func { - const struct nvkm_falcon_func *flcn; -}; +int nvkm_gsp_fwsec_frts(struct nvkm_gsp *); +int nvkm_gsp_fwsec_sb(struct nvkm_gsp *); struct nvkm_gsp_fwif { int version; int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *); const struct nvkm_gsp_func *func; + const char *ver; + bool enable; }; +int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); +int r535_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); + +struct nvkm_gsp_func { + const struct nvkm_falcon_func *flcn; + const struct nvkm_falcon_fw_func *fwsec; + + char *sig_section; + + struct { + u32 os_carveout_size; + u32 base_size; + u64 min_size; + } wpr_heap; + + struct { + int (*ctor)(struct nvkm_gsp *, const char *name, const struct firmware *, + struct nvkm_falcon *, struct nvkm_falcon_fw *); + } booter; + + void (*dtor)(struct nvkm_gsp *); + int (*oneinit)(struct nvkm_gsp *); + int (*init)(struct nvkm_gsp *); + int (*fini)(struct nvkm_gsp *, bool suspend); + int (*reset)(struct nvkm_gsp *); + + const struct nvkm_gsp_rm *rm; +}; + +extern const struct nvkm_falcon_func tu102_gsp_flcn; +extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec; +int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *, + struct nvkm_falcon *, struct nvkm_falcon_fw *); +int tu102_gsp_oneinit(struct nvkm_gsp *); +int tu102_gsp_reset(struct nvkm_gsp *); + +extern const struct nvkm_falcon_func ga102_gsp_flcn; +extern const struct nvkm_falcon_fw_func ga102_gsp_fwsec; +int ga102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *, + struct nvkm_falcon *, struct nvkm_falcon_fw *); +int ga102_gsp_reset(struct nvkm_gsp *); + +void r535_gsp_dtor(struct nvkm_gsp *); +int r535_gsp_oneinit(struct nvkm_gsp *); +int r535_gsp_init(struct nvkm_gsp *); +int r535_gsp_fini(struct nvkm_gsp *, bool suspend); +extern const struct nvkm_gsp_rm r535_gsp_rm; + int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); + +extern const struct nvkm_gsp_func gv100_gsp; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c new file mode 100644 index 0000000000..a41735ab60 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -0,0 +1,2355 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/pci.h> +#include <subdev/timer.h> +#include <subdev/vfn.h> +#include <engine/fifo/chan.h> +#include <engine/sec2.h> + +#include <nvfw/fw.h> + +#include <nvrm/nvtypes.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> +#include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h> +#include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h> +#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h> +#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h> +#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h> +#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h> +#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h> +#include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h> +#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h> +#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h> +#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h> +#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h> +#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h> +#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h> + +#include <linux/acpi.h> + +#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE +#define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16 + +struct r535_gsp_msg { + u8 auth_tag_buffer[16]; + u8 aad_buffer[16]; + u32 checksum; + u32 sequence; + u32 elem_count; + u32 pad; + u8 data[]; +}; + +#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data) + +static int +r535_rpc_status_to_errno(uint32_t rpc_status) +{ + switch (rpc_status) { + case 0x55: /* NV_ERR_NOT_READY */ + case 0x66: /* NV_ERR_TIMEOUT_RETRY */ + return -EAGAIN; + case 0x51: /* NV_ERR_NO_MEMORY */ + return -ENOMEM; + default: + return -EINVAL; + } +} + +static void * +r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime) +{ + struct r535_gsp_msg *mqe; + u32 size, rptr = *gsp->msgq.rptr; + int used; + u8 *msg; + u32 len; + + size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + repc, GSP_PAGE_SIZE); + if (WARN_ON(!size || size >= gsp->msgq.cnt)) + return ERR_PTR(-EINVAL); + + do { + u32 wptr = *gsp->msgq.wptr; + + used = wptr + gsp->msgq.cnt - rptr; + if (used >= gsp->msgq.cnt) + used -= gsp->msgq.cnt; + if (used >= size) + break; + + usleep_range(1, 2); + } while (--(*ptime)); + + if (WARN_ON(!*ptime)) + return ERR_PTR(-ETIMEDOUT); + + mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + rptr * 0x1000); + + if (prepc) { + *prepc = (used * GSP_PAGE_SIZE) - sizeof(*mqe); + return mqe->data; + } + + msg = kvmalloc(repc, GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe); + len = min_t(u32, repc, len); + memcpy(msg, mqe->data, len); + + rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE); + if (rptr == gsp->msgq.cnt) + rptr = 0; + + repc -= len; + + if (repc) { + mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000); + memcpy(msg + len, mqe, repc); + + rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE); + } + + mb(); + (*gsp->msgq.rptr) = rptr; + return msg; +} + +static void * +r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 repc, int *ptime) +{ + return r535_gsp_msgq_wait(gsp, repc, NULL, ptime); +} + +static int +r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv) +{ + struct r535_gsp_msg *cmd = container_of(argv, typeof(*cmd), data); + struct r535_gsp_msg *cqe; + u32 argc = cmd->checksum; + u64 *ptr = (void *)cmd; + u64 *end; + u64 csum = 0; + int free, time = 1000000; + u32 wptr, size; + u32 off = 0; + + argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE); + + end = (u64 *)((char *)ptr + argc); + cmd->pad = 0; + cmd->checksum = 0; + cmd->sequence = gsp->cmdq.seq++; + cmd->elem_count = DIV_ROUND_UP(argc, 0x1000); + + while (ptr < end) + csum ^= *ptr++; + + cmd->checksum = upper_32_bits(csum) ^ lower_32_bits(csum); + + wptr = *gsp->cmdq.wptr; + do { + do { + free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1; + if (free >= gsp->cmdq.cnt) + free -= gsp->cmdq.cnt; + if (free >= 1) + break; + + usleep_range(1, 2); + } while(--time); + + if (WARN_ON(!time)) { + kvfree(cmd); + return -ETIMEDOUT; + } + + cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000); + size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE); + memcpy(cqe, (u8 *)cmd + off, size); + + wptr += DIV_ROUND_UP(size, 0x1000); + if (wptr == gsp->cmdq.cnt) + wptr = 0; + + off += size; + argc -= size; + } while(argc); + + nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr); + wmb(); + (*gsp->cmdq.wptr) = wptr; + mb(); + + nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000); + + kvfree(cmd); + return 0; +} + +static void * +r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 argc) +{ + struct r535_gsp_msg *cmd; + u32 size = GSP_MSG_HDR_SIZE + argc; + + size = ALIGN(size, GSP_MSG_MIN_SIZE); + cmd = kvzalloc(size, GFP_KERNEL); + if (!cmd) + return ERR_PTR(-ENOMEM); + + cmd->checksum = argc; + return cmd->data; +} + +struct nvfw_gsp_rpc { + u32 header_version; + u32 signature; + u32 length; + u32 function; + u32 rpc_result; + u32 rpc_result_private; + u32 sequence; + union { + u32 spare; + u32 cpuRmGfid; + }; + u8 data[]; +}; + +static void +r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg) +{ + kvfree(msg); +} + +static void +r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl) +{ + if (gsp->subdev.debug >= lvl) { + nvkm_printk__(&gsp->subdev, lvl, info, + "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n", + msg->function, msg->length, msg->length - sizeof(*msg), + msg->rpc_result, msg->rpc_result_private); + print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1, + msg->data, msg->length - sizeof(*msg), true); + } +} + +static struct nvfw_gsp_rpc * +r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 repc) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvfw_gsp_rpc *msg; + int time = 4000000, i; + u32 size; + +retry: + msg = r535_gsp_msgq_wait(gsp, sizeof(*msg), &size, &time); + if (IS_ERR_OR_NULL(msg)) + return msg; + + msg = r535_gsp_msgq_recv(gsp, msg->length, &time); + if (IS_ERR_OR_NULL(msg)) + return msg; + + if (msg->rpc_result) { + r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR); + r535_gsp_msg_done(gsp, msg); + return ERR_PTR(-EINVAL); + } + + r535_gsp_msg_dump(gsp, msg, NV_DBG_TRACE); + + if (fn && msg->function == fn) { + if (repc) { + if (msg->length < sizeof(*msg) + repc) { + nvkm_error(subdev, "msg len %d < %zd\n", + msg->length, sizeof(*msg) + repc); + r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR); + r535_gsp_msg_done(gsp, msg); + return ERR_PTR(-EIO); + } + + return msg; + } + + r535_gsp_msg_done(gsp, msg); + return NULL; + } + + for (i = 0; i < gsp->msgq.ntfy_nr; i++) { + struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i]; + + if (ntfy->fn == msg->function) { + if (ntfy->func) + ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg)); + break; + } + } + + if (i == gsp->msgq.ntfy_nr) + r535_gsp_msg_dump(gsp, msg, NV_DBG_WARN); + + r535_gsp_msg_done(gsp, msg); + if (fn) + goto retry; + + if (*gsp->msgq.rptr != *gsp->msgq.wptr) + goto retry; + + return NULL; +} + +static int +r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv) +{ + int ret = 0; + + mutex_lock(&gsp->msgq.mutex); + if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) { + ret = -ENOSPC; + } else { + gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn; + gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func; + gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv; + gsp->msgq.ntfy_nr++; + } + mutex_unlock(&gsp->msgq.mutex); + return ret; +} + +static int +r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn) +{ + void *repv; + + mutex_lock(&gsp->cmdq.mutex); + repv = r535_gsp_msg_recv(gsp, fn, 0); + mutex_unlock(&gsp->cmdq.mutex); + if (IS_ERR(repv)) + return PTR_ERR(repv); + + return 0; +} + +static void * +r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) +{ + struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data); + struct nvfw_gsp_rpc *msg; + u32 fn = rpc->function; + void *repv = NULL; + int ret; + + if (gsp->subdev.debug >= NV_DBG_TRACE) { + nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function, + rpc->length, rpc->length - sizeof(*rpc)); + print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1, + rpc->data, rpc->length - sizeof(*rpc), true); + } + + ret = r535_gsp_cmdq_push(gsp, rpc); + if (ret) + return ERR_PTR(ret); + + if (wait) { + msg = r535_gsp_msg_recv(gsp, fn, repc); + if (!IS_ERR_OR_NULL(msg)) + repv = msg->data; + else + repv = msg; + } + + return repv; +} + +static void +r535_gsp_event_dtor(struct nvkm_gsp_event *event) +{ + struct nvkm_gsp_device *device = event->device; + struct nvkm_gsp_client *client = device->object.client; + struct nvkm_gsp *gsp = client->gsp; + + mutex_lock(&gsp->client_id.mutex); + if (event->func) { + list_del(&event->head); + event->func = NULL; + } + mutex_unlock(&gsp->client_id.mutex); + + nvkm_gsp_rm_free(&event->object); + event->device = NULL; +} + +static int +r535_gsp_device_event_get(struct nvkm_gsp_event *event) +{ + struct nvkm_gsp_device *device = event->device; + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->event = event->id; + ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl); +} + +static int +r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id, + nvkm_gsp_event_func func, struct nvkm_gsp_event *event) +{ + struct nvkm_gsp_client *client = device->object.client; + struct nvkm_gsp *gsp = client->gsp; + NV0005_ALLOC_PARAMETERS *args; + int ret; + + args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle, + NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args), + &event->object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->hParentClient = client->object.handle; + args->hSrcResource = 0; + args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX; + args->notifyIndex = NV01_EVENT_CLIENT_RM | id; + args->data = NULL; + + ret = nvkm_gsp_rm_alloc_wr(&event->object, args); + if (ret) + return ret; + + event->device = device; + event->id = id; + + ret = r535_gsp_device_event_get(event); + if (ret) { + nvkm_gsp_event_dtor(event); + return ret; + } + + mutex_lock(&gsp->client_id.mutex); + event->func = func; + list_add(&event->head, &client->events); + mutex_unlock(&gsp->client_id.mutex); + return 0; +} + +static void +r535_gsp_device_dtor(struct nvkm_gsp_device *device) +{ + nvkm_gsp_rm_free(&device->subdevice); + nvkm_gsp_rm_free(&device->object); +} + +static int +r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device) +{ + NV2080_ALLOC_PARAMETERS *args; + + return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args), + &device->subdevice); +} + +static int +r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device) +{ + NV0080_ALLOC_PARAMETERS *args; + int ret; + + args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args), + &device->object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->hClientShare = client->object.handle; + + ret = nvkm_gsp_rm_alloc_wr(&device->object, args); + if (ret) + return ret; + + ret = r535_gsp_subdevice_ctor(device); + if (ret) + nvkm_gsp_rm_free(&device->object); + + return ret; +} + +static void +r535_gsp_client_dtor(struct nvkm_gsp_client *client) +{ + struct nvkm_gsp *gsp = client->gsp; + + nvkm_gsp_rm_free(&client->object); + + mutex_lock(&gsp->client_id.mutex); + idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff); + mutex_unlock(&gsp->client_id.mutex); + + client->gsp = NULL; +} + +static int +r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) +{ + NV0000_ALLOC_PARAMETERS *args; + int ret; + + mutex_lock(&gsp->client_id.mutex); + ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL); + mutex_unlock(&gsp->client_id.mutex); + if (ret < 0) + return ret; + + client->gsp = gsp; + client->object.client = client; + INIT_LIST_HEAD(&client->events); + + args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args), + &client->object); + if (IS_ERR(args)) { + r535_gsp_client_dtor(client); + return ret; + } + + args->hClient = client->object.handle; + args->processID = ~0; + + ret = nvkm_gsp_rm_alloc_wr(&client->object, args); + if (ret) { + r535_gsp_client_dtor(client); + return ret; + } + + return 0; +} + +static int +r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object) +{ + struct nvkm_gsp_client *client = object->client; + struct nvkm_gsp *gsp = client->gsp; + rpc_free_v03_00 *rpc; + + nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n", + client->object.handle, object->handle); + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc)); + if (WARN_ON(IS_ERR_OR_NULL(rpc))) + return -EIO; + + rpc->params.hRoot = client->object.handle; + rpc->params.hObjectParent = 0; + rpc->params.hObjectOld = object->handle; + return nvkm_gsp_rpc_wr(gsp, rpc, true); +} + +static void +r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *repv) +{ + rpc_gsp_rm_alloc_v03_00 *rpc = container_of(repv, typeof(*rpc), params); + + nvkm_gsp_rpc_done(object->client->gsp, rpc); +} + +static void * +r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc) +{ + rpc_gsp_rm_alloc_v03_00 *rpc = container_of(argv, typeof(*rpc), params); + struct nvkm_gsp *gsp = object->client->gsp; + void *ret; + + rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc) + repc); + if (IS_ERR_OR_NULL(rpc)) + return rpc; + + if (rpc->status) { + ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status)); + if (PTR_ERR(ret) != -EAGAIN) + nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status); + } else { + ret = repc ? rpc->params : NULL; + } + + nvkm_gsp_rpc_done(gsp, rpc); + + return ret; +} + +static void * +r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, u32 argc) +{ + struct nvkm_gsp_client *client = object->client; + struct nvkm_gsp *gsp = client->gsp; + rpc_gsp_rm_alloc_v03_00 *rpc; + + nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x cls:0x%08x argc:%d\n", + client->object.handle, object->parent->handle, object->handle, oclass, argc); + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, sizeof(*rpc) + argc); + if (IS_ERR(rpc)) + return rpc; + + rpc->hClient = client->object.handle; + rpc->hParent = object->parent->handle; + rpc->hObject = object->handle; + rpc->hClass = oclass; + rpc->status = 0; + rpc->paramsSize = argc; + return rpc->params; +} + +static void +r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv) +{ + rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params); + + if (!repv) + return; + nvkm_gsp_rpc_done(object->client->gsp, rpc); +} + +static int +r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc) +{ + rpc_gsp_rm_control_v03_00 *rpc = container_of((*argv), typeof(*rpc), params); + struct nvkm_gsp *gsp = object->client->gsp; + int ret = 0; + + rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc); + if (IS_ERR_OR_NULL(rpc)) { + *argv = NULL; + return PTR_ERR(rpc); + } + + if (rpc->status) { + ret = r535_rpc_status_to_errno(rpc->status); + if (ret != -EAGAIN) + nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n", + object->client->object.handle, object->handle, rpc->cmd, rpc->status); + } + + if (repc) + *argv = rpc->params; + else + nvkm_gsp_rpc_done(gsp, rpc); + + return ret; +} + +static void * +r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc) +{ + struct nvkm_gsp_client *client = object->client; + struct nvkm_gsp *gsp = client->gsp; + rpc_gsp_rm_control_v03_00 *rpc; + + nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x argc:%d\n", + client->object.handle, object->handle, cmd, argc); + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, sizeof(*rpc) + argc); + if (IS_ERR(rpc)) + return rpc; + + rpc->hClient = client->object.handle; + rpc->hObject = object->handle; + rpc->cmd = cmd; + rpc->status = 0; + rpc->paramsSize = argc; + return rpc->params; +} + +static void +r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv) +{ + struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data); + + r535_gsp_msg_done(gsp, rpc); +} + +static void * +r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc) +{ + struct nvfw_gsp_rpc *rpc; + + rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64))); + if (IS_ERR(rpc)) + return ERR_CAST(rpc); + + rpc->header_version = 0x03000000; + rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V'; + rpc->function = fn; + rpc->rpc_result = 0xffffffff; + rpc->rpc_result_private = 0xffffffff; + rpc->length = sizeof(*rpc) + argc; + return rpc->data; +} + +static void * +r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) +{ + struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data); + struct r535_gsp_msg *cmd = container_of((void *)rpc, typeof(*cmd), data); + const u32 max_msg_size = (16 * 0x1000) - sizeof(struct r535_gsp_msg); + const u32 max_rpc_size = max_msg_size - sizeof(*rpc); + u32 rpc_size = rpc->length - sizeof(*rpc); + void *repv; + + mutex_lock(&gsp->cmdq.mutex); + if (rpc_size > max_rpc_size) { + const u32 fn = rpc->function; + + /* Adjust length, and send initial RPC. */ + rpc->length = sizeof(*rpc) + max_rpc_size; + cmd->checksum = rpc->length; + + repv = r535_gsp_rpc_send(gsp, argv, false, 0); + if (IS_ERR(repv)) + goto done; + + argv += max_rpc_size; + rpc_size -= max_rpc_size; + + /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */ + while (rpc_size) { + u32 size = min(rpc_size, max_rpc_size); + void *next; + + next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size); + if (IS_ERR(next)) { + repv = next; + goto done; + } + + memcpy(next, argv, size); + + repv = r535_gsp_rpc_send(gsp, next, false, 0); + if (IS_ERR(repv)) + goto done; + + argv += size; + rpc_size -= size; + } + + /* Wait for reply. */ + if (wait) { + rpc = r535_gsp_msg_recv(gsp, fn, repc); + if (!IS_ERR_OR_NULL(rpc)) + repv = rpc->data; + else + repv = rpc; + } else { + repv = NULL; + } + } else { + repv = r535_gsp_rpc_send(gsp, argv, wait, repc); + } + +done: + mutex_unlock(&gsp->cmdq.mutex); + return repv; +} + +const struct nvkm_gsp_rm +r535_gsp_rm = { + .rpc_get = r535_gsp_rpc_get, + .rpc_push = r535_gsp_rpc_push, + .rpc_done = r535_gsp_rpc_done, + + .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get, + .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push, + .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done, + + .rm_alloc_get = r535_gsp_rpc_rm_alloc_get, + .rm_alloc_push = r535_gsp_rpc_rm_alloc_push, + .rm_alloc_done = r535_gsp_rpc_rm_alloc_done, + + .rm_free = r535_gsp_rpc_rm_free, + + .client_ctor = r535_gsp_client_ctor, + .client_dtor = r535_gsp_client_dtor, + + .device_ctor = r535_gsp_device_ctor, + .device_dtor = r535_gsp_device_dtor, + + .event_ctor = r535_gsp_device_event_ctor, + .event_dtor = r535_gsp_event_dtor, +}; + +static void +r535_gsp_msgq_work(struct work_struct *work) +{ + struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work); + + mutex_lock(&gsp->cmdq.mutex); + if (*gsp->msgq.rptr != *gsp->msgq.wptr) + r535_gsp_msg_recv(gsp, 0, 0); + mutex_unlock(&gsp->cmdq.mutex); +} + +static irqreturn_t +r535_gsp_intr(struct nvkm_inth *inth) +{ + struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth); + struct nvkm_subdev *subdev = &gsp->subdev; + u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008); + u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 + + gsp->falcon.func->riscv_irqmask); + u32 stat = intr & inte; + + if (!stat) { + nvkm_debug(subdev, "inte %08x %08x\n", intr, inte); + return IRQ_NONE; + } + + if (stat & 0x00000040) { + nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040); + schedule_work(&gsp->msgq.work); + stat &= ~0x00000040; + } + + if (stat) { + nvkm_error(subdev, "intr %08x\n", stat); + nvkm_falcon_wr32(&gsp->falcon, 0x014, stat); + nvkm_falcon_wr32(&gsp->falcon, 0x004, stat); + } + + nvkm_falcon_intr_retrigger(&gsp->falcon); + return IRQ_HANDLED; +} + +static int +r535_gsp_intr_get_table(struct nvkm_gsp *gsp) +{ + NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl; + int ret = 0; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl)); + if (WARN_ON(ret)) { + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return ret; + } + + for (unsigned i = 0; i < ctrl->tableLen; i++) { + enum nvkm_subdev_type type; + int inst; + + nvkm_debug(&gsp->subdev, + "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i, + ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask, + ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall); + + switch (ctrl->table[i].engineIdx) { + case MC_ENGINE_IDX_GSP: + type = NVKM_SUBDEV_GSP; + inst = 0; + break; + case MC_ENGINE_IDX_DISP: + type = NVKM_ENGINE_DISP; + inst = 0; + break; + case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9: + type = NVKM_ENGINE_CE; + inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0; + break; + case MC_ENGINE_IDX_GR0: + type = NVKM_ENGINE_GR; + inst = 0; + break; + case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7: + type = NVKM_ENGINE_NVDEC; + inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0; + break; + case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2: + type = NVKM_ENGINE_NVENC; + inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC; + break; + case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7: + type = NVKM_ENGINE_NVJPG; + inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0; + break; + case MC_ENGINE_IDX_OFA0: + type = NVKM_ENGINE_OFA; + inst = 0; + break; + default: + continue; + } + + if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) { + ret = -ENOSPC; + break; + } + + gsp->intr[gsp->intr_nr].type = type; + gsp->intr[gsp->intr_nr].inst = inst; + gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall; + gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall; + gsp->intr_nr++; + } + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return ret; +} + +static int +r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) +{ + GspStaticConfigInfo *rpc; + int last_usable = -1; + + rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + gsp->internal.client.object.client = &gsp->internal.client; + gsp->internal.client.object.parent = NULL; + gsp->internal.client.object.handle = rpc->hInternalClient; + gsp->internal.client.gsp = gsp; + + gsp->internal.device.object.client = &gsp->internal.client; + gsp->internal.device.object.parent = &gsp->internal.client.object; + gsp->internal.device.object.handle = rpc->hInternalDevice; + + gsp->internal.device.subdevice.client = &gsp->internal.client; + gsp->internal.device.subdevice.parent = &gsp->internal.device.object; + gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; + + gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; + gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; + + for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) { + NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = + &rpc->fbRegionInfoParams.fbRegion[i]; + + nvkm_debug(&gsp->subdev, "fb region %d: " + "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i, + reg->base, reg->limit, reg->reserved, reg->performance, + reg->supportCompressed, reg->supportISO, reg->bProtected); + + if (!reg->reserved && !reg->bProtected) { + if (reg->supportCompressed && reg->supportISO && + !WARN_ON_ONCE(gsp->fb.region_nr >= ARRAY_SIZE(gsp->fb.region))) { + const u64 size = (reg->limit + 1) - reg->base; + + gsp->fb.region[gsp->fb.region_nr].addr = reg->base; + gsp->fb.region[gsp->fb.region_nr].size = size; + gsp->fb.region_nr++; + } + + last_usable = i; + } + } + + if (last_usable >= 0) { + u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1; + + gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base; + } + + for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) { + if (rpc->gpcInfo.gpcMask & BIT(gpc)) { + gsp->gr.tpcs += hweight32(rpc->tpcInfo[gpc].tpcMask); + gsp->gr.gpcs++; + } + } + + nvkm_gsp_rpc_done(gsp, rpc); + return 0; +} + +static void +nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem) +{ + if (mem->data) { + /* + * Poison the buffer to catch any unexpected access from + * GSP-RM if the buffer was prematurely freed. + */ + memset(mem->data, 0xFF, mem->size); + + dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr); + memset(mem, 0, sizeof(*mem)); + } +} + +static int +nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem) +{ + mem->size = size; + mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); + if (WARN_ON(!mem->data)) + return -ENOMEM; + + return 0; +} + +static int +r535_gsp_postinit(struct nvkm_gsp *gsp) +{ + struct nvkm_device *device = gsp->subdev.device; + int ret; + + ret = r535_gsp_rpc_get_gsp_static_info(gsp); + if (WARN_ON(ret)) + return ret; + + INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work); + + ret = r535_gsp_intr_get_table(gsp); + if (WARN_ON(ret)) + return ret; + + ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst); + if (WARN_ON(ret < 0)) + return ret; + + ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev, + r535_gsp_intr, &gsp->subdev.inth); + if (WARN_ON(ret)) + return ret; + + nvkm_inth_allow(&gsp->subdev.inth); + nvkm_wr32(device, 0x110004, 0x00000040); + + /* Release the DMA buffers that were needed only for boot and init */ + nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw); + nvkm_gsp_mem_dtor(gsp, &gsp->libos); + nvkm_gsp_mem_dtor(gsp, &gsp->rmargs); + nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta); + + return ret; +} + +static int +r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend) +{ + rpc_unloading_guest_driver_v1F_07 *rpc; + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(*rpc)); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + if (suspend) { + rpc->bInPMTransition = 1; + rpc->bGc6Entering = 0; + rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; + } else { + rpc->bInPMTransition = 0; + rpc->bGc6Entering = 0; + rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0; + } + + return nvkm_gsp_rpc_wr(gsp, rpc, true); +} + +/* dword only */ +struct nv_gsp_registry_entries { + const char *name; + u32 value; +}; + +static const struct nv_gsp_registry_entries r535_registry_entries[] = { + { "RMSecBusResetEnable", 1 }, + { "RMForcePcieConfigSave", 1 }, +}; +#define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries) + +static int +r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) +{ + PACKED_REGISTRY_TABLE *rpc; + char *strings; + int str_offset; + int i; + size_t rpc_size = struct_size(rpc, entries, NV_GSP_REG_NUM_ENTRIES); + + /* add strings + null terminator */ + for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) + rpc_size += strlen(r535_registry_entries[i].name) + 1; + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, rpc_size); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + rpc->numEntries = NV_GSP_REG_NUM_ENTRIES; + + str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]); + strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES]; + for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) { + int name_len = strlen(r535_registry_entries[i].name) + 1; + + rpc->entries[i].nameOffset = str_offset; + rpc->entries[i].type = 1; + rpc->entries[i].data = r535_registry_entries[i].value; + rpc->entries[i].length = 4; + memcpy(strings, r535_registry_entries[i].name, name_len); + strings += name_len; + str_offset += name_len; + } + rpc->size = str_offset; + + return nvkm_gsp_rpc_wr(gsp, rpc, false); +} + +#if defined(CONFIG_ACPI) && defined(CONFIG_X86) +static void +r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) +{ + const guid_t NVOP_DSM_GUID = + GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B, + 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0); + u64 NVOP_DSM_REV = 0x00000100; + union acpi_object argv4 = { + .buffer.type = ACPI_TYPE_BUFFER, + .buffer.length = 4, + .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), + }, *obj; + + caps->status = 0xffff; + + if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a))) + return; + + obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4); + if (!obj) + return; + + if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || + WARN_ON(obj->buffer.length != 4)) + return; + + caps->status = 0; + caps->optimusCaps = *(u32 *)obj->buffer.pointer; + + ACPI_FREE(obj); + + kfree(argv4.buffer.pointer); +} + +static void +r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) +{ + const guid_t JT_DSM_GUID = + GUID_INIT(0xCBECA351L, 0x067B, 0x4924, + 0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34); + u64 JT_DSM_REV = 0x00000103; + u32 caps; + union acpi_object argv4 = { + .buffer.type = ACPI_TYPE_BUFFER, + .buffer.length = sizeof(caps), + .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), + }, *obj; + + jt->status = 0xffff; + + obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4); + if (!obj) + return; + + if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || + WARN_ON(obj->buffer.length != 4)) + return; + + jt->status = 0; + jt->jtCaps = *(u32 *)obj->buffer.pointer; + jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; + jt->bSBIOSCaps = 0; + + ACPI_FREE(obj); + + kfree(argv4.buffer.pointer); +} + +static void +r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode, + MUX_METHOD_DATA_ELEMENT *part) +{ + union acpi_object mux_arg = { ACPI_TYPE_INTEGER }; + struct acpi_object_list input = { 1, &mux_arg }; + acpi_handle iter = NULL, handle_mux = NULL; + acpi_status status; + unsigned long long value; + + mode->status = 0xffff; + part->status = 0xffff; + + do { + status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter); + if (ACPI_FAILURE(status) || !iter) + return; + + status = acpi_evaluate_integer(iter, "_ADR", NULL, &value); + if (ACPI_FAILURE(status) || value != id) + continue; + + handle_mux = iter; + } while (!handle_mux); + + if (!handle_mux) + return; + + /* I -think- 0 means "acquire" according to nvidia's driver source */ + input.pointer->integer.type = ACPI_TYPE_INTEGER; + input.pointer->integer.value = 0; + + status = acpi_evaluate_integer(handle_mux, "MXDM", &input, &value); + if (ACPI_SUCCESS(status)) { + mode->acpiId = id; + mode->mode = value; + mode->status = 0; + } + + status = acpi_evaluate_integer(handle_mux, "MXDS", &input, &value); + if (ACPI_SUCCESS(status)) { + part->acpiId = id; + part->mode = value; + part->status = 0; + } +} + +static void +r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux) +{ + mux->tableLen = dod->acpiIdListLen / sizeof(dod->acpiIdList[0]); + + for (int i = 0; i < mux->tableLen; i++) { + r535_gsp_acpi_mux_id(handle, dod->acpiIdList[i], &mux->acpiIdMuxModeTable[i], + &mux->acpiIdMuxPartTable[i]); + } +} + +static void +r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod) +{ + acpi_status status; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *_DOD; + + dod->status = 0xffff; + + status = acpi_evaluate_object(handle, "_DOD", NULL, &output); + if (ACPI_FAILURE(status)) + return; + + _DOD = output.pointer; + + if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) || + WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList))) + return; + + for (int i = 0; i < _DOD->package.count; i++) { + if (WARN_ON(_DOD->package.elements[i].type != ACPI_TYPE_INTEGER)) + return; + + dod->acpiIdList[i] = _DOD->package.elements[i].integer.value; + dod->acpiIdListLen += sizeof(dod->acpiIdList[0]); + } + + dod->status = 0; + kfree(output.pointer); +} +#endif + +static void +r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi) +{ +#if defined(CONFIG_ACPI) && defined(CONFIG_X86) + acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev); + + if (!handle) + return; + + acpi->bValid = 1; + + r535_gsp_acpi_dod(handle, &acpi->dodMethodData); + if (acpi->dodMethodData.status == 0) + r535_gsp_acpi_mux(handle, &acpi->dodMethodData, &acpi->muxMethodData); + + r535_gsp_acpi_jt(handle, &acpi->jtMethodData); + r535_gsp_acpi_caps(handle, &acpi->capsMethodData); +#endif +} + +static int +r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp) +{ + struct nvkm_device *device = gsp->subdev.device; + struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device); + GspSystemInfo *info; + + if (WARN_ON(device->type == NVKM_DEVICE_TEGRA)) + return -ENOSYS; + + info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info)); + if (IS_ERR(info)) + return PTR_ERR(info); + + info->gpuPhysAddr = device->func->resource_addr(device, 0); + info->gpuPhysFbAddr = device->func->resource_addr(device, 1); + info->gpuPhysInstAddr = device->func->resource_addr(device, 3); + info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev); + info->maxUserVa = TASK_SIZE; + info->pciConfigMirrorBase = 0x088000; + info->pciConfigMirrorSize = 0x001000; + r535_gsp_acpi_info(gsp, &info->acpiMethodData); + + return nvkm_gsp_rpc_wr(gsp, info, false); +} + +static int +r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc) +{ + struct nvkm_gsp *gsp = priv; + struct nvkm_subdev *subdev = &gsp->subdev; + rpc_os_error_log_v17_00 *msg = repv; + + if (WARN_ON(repc < sizeof(*msg))) + return -EINVAL; + + nvkm_error(subdev, "Xid:%d %s\n", msg->exceptType, msg->errString); + return 0; +} + +static int +r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) +{ + rpc_rc_triggered_v17_02 *msg = repv; + struct nvkm_gsp *gsp = priv; + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_chan *chan; + unsigned long flags; + + if (WARN_ON(repc < sizeof(*msg))) + return -EINVAL; + + nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n", + msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope, + msg->partitionAttributionId); + + chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags); + if (!chan) { + nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid); + return 0; + } + + nvkm_chan_error(chan, false); + nvkm_chan_put(&chan, flags); + return 0; +} + +static int +r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc) +{ + struct nvkm_gsp *gsp = priv; + struct nvkm_subdev *subdev = &gsp->subdev; + + WARN_ON(repc != 0); + + nvkm_error(subdev, "mmu fault queued\n"); + return 0; +} + +static int +r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc) +{ + struct nvkm_gsp *gsp = priv; + struct nvkm_gsp_client *client; + struct nvkm_subdev *subdev = &gsp->subdev; + rpc_post_event_v17_00 *msg = repv; + + if (WARN_ON(repc < sizeof(*msg))) + return -EINVAL; + if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize)) + return -EINVAL; + + nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n", + msg->hClient, msg->hEvent, msg->notifyIndex, msg->data, + msg->status, msg->eventDataSize, msg->bNotifyList); + + mutex_lock(&gsp->client_id.mutex); + client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff); + if (client) { + struct nvkm_gsp_event *event; + bool handled = false; + + list_for_each_entry(event, &client->events, head) { + if (event->object.handle == msg->hEvent) { + event->func(event, msg->eventData, msg->eventDataSize); + handled = true; + } + } + + if (!handled) { + nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n", + msg->hClient, msg->hEvent); + } + } else { + nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient); + } + mutex_unlock(&gsp->client_id.mutex); + return 0; +} + +/** + * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP + * + * The GSP sequencer is a list of I/O commands that the GSP can send to + * the driver to perform for various purposes. The most common usage is to + * perform a special mid-initialization reset. + */ +static int +r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) +{ + struct nvkm_gsp *gsp = priv; + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + rpc_run_cpu_sequencer_v17_00 *seq = repv; + int ptr = 0, ret; + + nvkm_debug(subdev, "seq: %08x %08x\n", seq->bufferSizeDWord, seq->cmdIndex); + + while (ptr < seq->cmdIndex) { + GSP_SEQUENCER_BUFFER_CMD *cmd = (void *)&seq->commandBuffer[ptr]; + + ptr += 1; + ptr += GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(cmd->opCode); + + switch (cmd->opCode) { + case GSP_SEQ_BUF_OPCODE_REG_WRITE: { + u32 addr = cmd->payload.regWrite.addr; + u32 data = cmd->payload.regWrite.val; + + nvkm_trace(subdev, "seq wr32 %06x %08x\n", addr, data); + nvkm_wr32(device, addr, data); + } + break; + case GSP_SEQ_BUF_OPCODE_REG_MODIFY: { + u32 addr = cmd->payload.regModify.addr; + u32 mask = cmd->payload.regModify.mask; + u32 data = cmd->payload.regModify.val; + + nvkm_trace(subdev, "seq mask %06x %08x %08x\n", addr, mask, data); + nvkm_mask(device, addr, mask, data); + } + break; + case GSP_SEQ_BUF_OPCODE_REG_POLL: { + u32 addr = cmd->payload.regPoll.addr; + u32 mask = cmd->payload.regPoll.mask; + u32 data = cmd->payload.regPoll.val; + u32 usec = cmd->payload.regPoll.timeout ?: 4000000; + //u32 error = cmd->payload.regPoll.error; + + nvkm_trace(subdev, "seq poll %06x %08x %08x %d\n", addr, mask, data, usec); + nvkm_rd32(device, addr); + nvkm_usec(device, usec, + if ((nvkm_rd32(device, addr) & mask) == data) + break; + ); + } + break; + case GSP_SEQ_BUF_OPCODE_DELAY_US: { + u32 usec = cmd->payload.delayUs.val; + + nvkm_trace(subdev, "seq usec %d\n", usec); + udelay(usec); + } + break; + case GSP_SEQ_BUF_OPCODE_REG_STORE: { + u32 addr = cmd->payload.regStore.addr; + u32 slot = cmd->payload.regStore.index; + + seq->regSaveArea[slot] = nvkm_rd32(device, addr); + nvkm_trace(subdev, "seq save %08x -> %d: %08x\n", addr, slot, + seq->regSaveArea[slot]); + } + break; + case GSP_SEQ_BUF_OPCODE_CORE_RESET: + nvkm_trace(subdev, "seq core reset\n"); + nvkm_falcon_reset(&gsp->falcon); + nvkm_falcon_mask(&gsp->falcon, 0x624, 0x00000080, 0x00000080); + nvkm_falcon_wr32(&gsp->falcon, 0x10c, 0x00000000); + break; + case GSP_SEQ_BUF_OPCODE_CORE_START: + nvkm_trace(subdev, "seq core start\n"); + if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000040) + nvkm_falcon_wr32(&gsp->falcon, 0x130, 0x00000002); + else + nvkm_falcon_wr32(&gsp->falcon, 0x100, 0x00000002); + break; + case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: + nvkm_trace(subdev, "seq core wait halt\n"); + nvkm_msec(device, 2000, + if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000010) + break; + ); + break; + case GSP_SEQ_BUF_OPCODE_CORE_RESUME: { + struct nvkm_sec2 *sec2 = device->sec2; + u32 mbox0; + + nvkm_trace(subdev, "seq core resume\n"); + + ret = gsp->func->reset(gsp); + if (WARN_ON(ret)) + return ret; + + nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); + nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); + + nvkm_falcon_start(&sec2->falcon); + + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x1180f8) & 0x04000000) + break; + ) < 0) + return -ETIMEDOUT; + + mbox0 = nvkm_falcon_rd32(&sec2->falcon, 0x040); + if (WARN_ON(mbox0)) { + nvkm_error(&gsp->subdev, "seq core resume sec2: 0x%x\n", mbox0); + return -EIO; + } + + nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); + + if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) + return -EIO; + } + break; + default: + nvkm_error(subdev, "unknown sequencer opcode %08x\n", cmd->opCode); + return -EINVAL; + } + } + + return 0; +} + +static int +r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + u32 wpr2_hi; + int ret; + + wpr2_hi = nvkm_rd32(device, 0x1fa828); + if (!wpr2_hi) { + nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n"); + return 0; + } + + ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); + if (WARN_ON(ret)) + return ret; + + wpr2_hi = nvkm_rd32(device, 0x1fa828); + if (WARN_ON(wpr2_hi)) + return -EIO; + + return 0; +} + +static int +r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) +{ + int ret; + + ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); + if (ret) + return ret; + + nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); + + if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) + return -EIO; + + return 0; +} + +static int +r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp) +{ + GspFwWprMeta *meta; + int ret; + + ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta); + if (ret) + return ret; + + meta = gsp->wpr_meta.data; + + meta->magic = GSP_FW_WPR_META_MAGIC; + meta->revision = GSP_FW_WPR_META_REVISION; + + meta->sysmemAddrOfRadix3Elf = gsp->radix3.mem[0].addr; + meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size; + + meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; + meta->sizeOfBootloader = gsp->boot.fw.size; + meta->bootloaderCodeOffset = gsp->boot.code_offset; + meta->bootloaderDataOffset = gsp->boot.data_offset; + meta->bootloaderManifestOffset = gsp->boot.manifest_offset; + + meta->sysmemAddrOfSignature = gsp->sig.addr; + meta->sizeOfSignature = gsp->sig.size; + + meta->gspFwRsvdStart = gsp->fb.heap.addr; + meta->nonWprHeapOffset = gsp->fb.heap.addr; + meta->nonWprHeapSize = gsp->fb.heap.size; + meta->gspFwWprStart = gsp->fb.wpr2.addr; + meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr; + meta->gspFwHeapSize = gsp->fb.wpr2.heap.size; + meta->gspFwOffset = gsp->fb.wpr2.elf.addr; + meta->bootBinOffset = gsp->fb.wpr2.boot.addr; + meta->frtsOffset = gsp->fb.wpr2.frts.addr; + meta->frtsSize = gsp->fb.wpr2.frts.size; + meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000); + meta->fbSize = gsp->fb.size; + meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr; + meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; + meta->bootCount = 0; + meta->partitionRpcAddr = 0; + meta->partitionRpcRequestOffset = 0; + meta->partitionRpcReplyOffset = 0; + meta->verified = 0; + return 0; +} + +static int +r535_gsp_shared_init(struct nvkm_gsp *gsp) +{ + struct { + msgqTxHeader tx; + msgqRxHeader rx; + } *cmdq, *msgq; + int ret, i; + + gsp->shm.cmdq.size = 0x40000; + gsp->shm.msgq.size = 0x40000; + + gsp->shm.ptes.nr = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT; + gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); + gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); + + ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size + + gsp->shm.cmdq.size + + gsp->shm.msgq.size, + &gsp->shm.mem); + if (ret) + return ret; + + gsp->shm.ptes.ptr = gsp->shm.mem.data; + gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size; + gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size; + + for (i = 0; i < gsp->shm.ptes.nr; i++) + gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT); + + cmdq = gsp->shm.cmdq.ptr; + cmdq->tx.version = 0; + cmdq->tx.size = gsp->shm.cmdq.size; + cmdq->tx.entryOff = GSP_PAGE_SIZE; + cmdq->tx.msgSize = GSP_PAGE_SIZE; + cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize; + cmdq->tx.writePtr = 0; + cmdq->tx.flags = 1; + cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr); + + msgq = gsp->shm.msgq.ptr; + + gsp->cmdq.cnt = cmdq->tx.msgCount; + gsp->cmdq.wptr = &cmdq->tx.writePtr; + gsp->cmdq.rptr = &msgq->rx.readPtr; + gsp->msgq.cnt = cmdq->tx.msgCount; + gsp->msgq.wptr = &msgq->tx.writePtr; + gsp->msgq.rptr = &cmdq->rx.readPtr; + return 0; +} + +static int +r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) +{ + GSP_ARGUMENTS_CACHED *args; + int ret; + + if (!resume) { + ret = r535_gsp_shared_init(gsp); + if (ret) + return ret; + + ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs); + if (ret) + return ret; + } + + args = gsp->rmargs.data; + args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr; + args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr; + args->messageQueueInitArguments.cmdQueueOffset = + (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data; + args->messageQueueInitArguments.statQueueOffset = + (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data; + + if (!resume) { + args->srInitArguments.oldLevel = 0; + args->srInitArguments.flags = 0; + args->srInitArguments.bInPMTransition = 0; + } else { + args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; + args->srInitArguments.flags = 0; + args->srInitArguments.bInPMTransition = 1; + } + + return 0; +} + +static inline u64 +r535_gsp_libos_id8(const char *name) +{ + u64 id = 0; + + for (int i = 0; i < sizeof(id) && *name; i++, name++) + id = (id << 8) | *name; + + return id; +} + +/** + * create_pte_array() - creates a PTE array of a physically contiguous buffer + * @ptes: pointer to the array + * @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned) + * @size: size of the buffer + * + * GSP-RM sometimes expects physically-contiguous buffers to have an array of + * "PTEs" for each page in that buffer. Although in theory that allows for + * the buffer to be physically discontiguous, GSP-RM does not currently + * support that. + * + * In this case, the PTEs are DMA addresses of each page of the buffer. Since + * the buffer is physically contiguous, calculating all the PTEs is simple + * math. + * + * See memdescGetPhysAddrsForGpu() + */ +static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size) +{ + unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE); + unsigned int i; + + for (i = 0; i < num_pages; i++) + ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT); +} + +/** + * r535_gsp_libos_init() -- create the libos arguments structure + * + * The logging buffers are byte queues that contain encoded printf-like + * messages from GSP-RM. They need to be decoded by a special application + * that can parse the buffers. + * + * The 'loginit' buffer contains logs from early GSP-RM init and + * exception dumps. The 'logrm' buffer contains the subsequent logs. Both are + * written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE. + * + * The physical address map for the log buffer is stored in the buffer + * itself, starting with offset 1. Offset 0 contains the "put" pointer. + * + * The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is + * configured for a larger page size (e.g. 64K pages), we need to give + * the GSP an array of 4K pages. Fortunately, since the buffer is + * physically contiguous, it's simple math to calculate the addresses. + * + * The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently + * ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the + * buffers to be physically contiguous anyway. + * + * The memory allocated for the arguments must remain until the GSP sends the + * init_done RPC. + * + * See _kgspInitLibosLoggingStructures (allocates memory for buffers) + * See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array) + */ +static int +r535_gsp_libos_init(struct nvkm_gsp *gsp) +{ + LibosMemoryRegionInitArgument *args; + int ret; + + ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos); + if (ret) + return ret; + + args = gsp->libos.data; + + ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit); + if (ret) + return ret; + + args[0].id8 = r535_gsp_libos_id8("LOGINIT"); + args[0].pa = gsp->loginit.addr; + args[0].size = gsp->loginit.size; + args[0].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; + args[0].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; + create_pte_array(gsp->loginit.data + sizeof(u64), gsp->loginit.addr, gsp->loginit.size); + + ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logintr); + if (ret) + return ret; + + args[1].id8 = r535_gsp_libos_id8("LOGINTR"); + args[1].pa = gsp->logintr.addr; + args[1].size = gsp->logintr.size; + args[1].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; + args[1].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; + create_pte_array(gsp->logintr.data + sizeof(u64), gsp->logintr.addr, gsp->logintr.size); + + ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logrm); + if (ret) + return ret; + + args[2].id8 = r535_gsp_libos_id8("LOGRM"); + args[2].pa = gsp->logrm.addr; + args[2].size = gsp->logrm.size; + args[2].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; + args[2].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; + create_pte_array(gsp->logrm.data + sizeof(u64), gsp->logrm.addr, gsp->logrm.size); + + ret = r535_gsp_rmargs_init(gsp, false); + if (ret) + return ret; + + args[3].id8 = r535_gsp_libos_id8("RMARGS"); + args[3].pa = gsp->rmargs.addr; + args[3].size = gsp->rmargs.size; + args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; + args[3].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; + return 0; +} + +void +nvkm_gsp_sg_free(struct nvkm_device *device, struct sg_table *sgt) +{ + struct scatterlist *sgl; + int i; + + dma_unmap_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); + + for_each_sgtable_sg(sgt, sgl, i) { + struct page *page = sg_page(sgl); + + __free_page(page); + } + + sg_free_table(sgt); +} + +int +nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt) +{ + const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE); + struct scatterlist *sgl; + int ret, i; + + ret = sg_alloc_table(sgt, pages, GFP_KERNEL); + if (ret) + return ret; + + for_each_sgtable_sg(sgt, sgl, i) { + struct page *page = alloc_page(GFP_KERNEL); + + if (!page) { + nvkm_gsp_sg_free(device, sgt); + return -ENOMEM; + } + + sg_set_page(sgl, page, PAGE_SIZE, 0); + } + + ret = dma_map_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); + if (ret) + nvkm_gsp_sg_free(device, sgt); + + return ret; +} + +static void +nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3) +{ + for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) + nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]); +} + +/** + * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list + * + * The GSP uses a three-level page table, called radix3, to map the firmware. + * Each 64-bit "pointer" in the table is either the bus address of an entry in + * the next table (for levels 0 and 1) or the bus address of the next page in + * the GSP firmware image itself. + * + * Level 0 contains a single entry in one page that points to the first page + * of level 1. + * + * Level 1, since it's also only one page in size, contains up to 512 entries, + * one for each page in Level 2. + * + * Level 2 can be up to 512 pages in size, and each of those entries points to + * the next page of the firmware image. Since there can be up to 512*512 + * pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB. + * + * Internally, the GSP has its window into system memory, but the base + * physical address of the aperture is not 0. In fact, it varies depending on + * the GPU architecture. Since the GPU is a PCI device, this window is + * accessed via DMA and is therefore bound by IOMMU translation. The end + * result is that GSP-RM must translate the bus addresses in the table to GSP + * physical addresses. All this should happen transparently. + * + * Returns 0 on success, or negative error code + * + * See kgspCreateRadix3_IMPL + */ +static int +nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, + struct nvkm_gsp_radix3 *rx3) +{ + u64 addr; + + for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) { + u64 *ptes; + size_t bufsize; + int ret, idx; + + bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); + ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]); + if (ret) + return ret; + + ptes = rx3->mem[i].data; + if (i == 2) { + struct scatterlist *sgl; + + for_each_sgtable_dma_sg(sgt, sgl, idx) { + for (int j = 0; j < sg_dma_len(sgl) / GSP_PAGE_SIZE; j++) + *ptes++ = sg_dma_address(sgl) + (GSP_PAGE_SIZE * j); + } + } else { + for (int j = 0; j < size / GSP_PAGE_SIZE; j++) + *ptes++ = addr + GSP_PAGE_SIZE * j; + } + + size = rx3->mem[i].size; + addr = rx3->mem[i].addr; + } + + return 0; +} + +int +r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) +{ + u32 mbox0 = 0xff, mbox1 = 0xff; + int ret; + + if (!gsp->running) + return 0; + + if (suspend) { + GspFwWprMeta *meta = gsp->wpr_meta.data; + u64 len = meta->gspFwWprEnd - meta->gspFwWprStart; + GspFwSRMeta *sr; + + ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt); + if (ret) + return ret; + + ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3); + if (ret) + return ret; + + ret = nvkm_gsp_mem_ctor(gsp, sizeof(*sr), &gsp->sr.meta); + if (ret) + return ret; + + sr = gsp->sr.meta.data; + sr->magic = GSP_FW_SR_META_MAGIC; + sr->revision = GSP_FW_SR_META_REVISION; + sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.mem[0].addr; + sr->sizeOfSuspendResumeData = len; + + mbox0 = lower_32_bits(gsp->sr.meta.addr); + mbox1 = upper_32_bits(gsp->sr.meta.addr); + } + + ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); + if (WARN_ON(ret)) + return ret; + + nvkm_msec(gsp->subdev.device, 2000, + if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000) + break; + ); + + nvkm_falcon_reset(&gsp->falcon); + + ret = nvkm_gsp_fwsec_sb(gsp); + WARN_ON(ret); + + ret = r535_gsp_booter_unload(gsp, mbox0, mbox1); + WARN_ON(ret); + + gsp->running = false; + return 0; +} + +int +r535_gsp_init(struct nvkm_gsp *gsp) +{ + u32 mbox0, mbox1; + int ret; + + if (!gsp->sr.meta.data) { + mbox0 = lower_32_bits(gsp->wpr_meta.addr); + mbox1 = upper_32_bits(gsp->wpr_meta.addr); + } else { + r535_gsp_rmargs_init(gsp, true); + + mbox0 = lower_32_bits(gsp->sr.meta.addr); + mbox1 = upper_32_bits(gsp->sr.meta.addr); + } + + /* Execute booter to handle (eventually...) booting GSP-RM. */ + ret = r535_gsp_booter_load(gsp, mbox0, mbox1); + if (WARN_ON(ret)) + goto done; + + ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE); + if (ret) + goto done; + + gsp->running = true; + +done: + if (gsp->sr.meta.data) { + nvkm_gsp_mem_dtor(gsp, &gsp->sr.meta); + nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3); + nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); + return ret; + } + + if (ret == 0) + ret = r535_gsp_postinit(gsp); + + return ret; +} + +static int +r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp) +{ + const struct firmware *fw = gsp->fws.bl; + const struct nvfw_bin_hdr *hdr; + RM_RISCV_UCODE_DESC *desc; + int ret; + + hdr = nvfw_bin_hdr(&gsp->subdev, fw->data); + desc = (void *)fw->data + hdr->header_offset; + + ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw); + if (ret) + return ret; + + memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size); + + gsp->boot.code_offset = desc->monitorCodeOffset; + gsp->boot.data_offset = desc->monitorDataOffset; + gsp->boot.manifest_offset = desc->manifestOffset; + gsp->boot.app_version = desc->appVersion; + return 0; +} + +static const struct nvkm_firmware_func +r535_gsp_fw = { + .type = NVKM_FIRMWARE_IMG_SGT, +}; + +static int +r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u64 *psize) +{ + const u8 *img = gsp->fws.rm->data; + const struct elf64_hdr *ehdr = (const struct elf64_hdr *)img; + const struct elf64_shdr *shdr = (const struct elf64_shdr *)&img[ehdr->e_shoff]; + const char *names = &img[shdr[ehdr->e_shstrndx].sh_offset]; + + for (int i = 0; i < ehdr->e_shnum; i++, shdr++) { + if (!strcmp(&names[shdr->sh_name], name)) { + *pdata = &img[shdr->sh_offset]; + *psize = shdr->sh_size; + return 0; + } + } + + nvkm_error(&gsp->subdev, "section '%s' not found\n", name); + return -ENOENT; +} + +static void +r535_gsp_dtor_fws(struct nvkm_gsp *gsp) +{ + nvkm_firmware_put(gsp->fws.bl); + gsp->fws.bl = NULL; + nvkm_firmware_put(gsp->fws.booter.unload); + gsp->fws.booter.unload = NULL; + nvkm_firmware_put(gsp->fws.booter.load); + gsp->fws.booter.load = NULL; + nvkm_firmware_put(gsp->fws.rm); + gsp->fws.rm = NULL; +} + +void +r535_gsp_dtor(struct nvkm_gsp *gsp) +{ + idr_destroy(&gsp->client_id.idr); + mutex_destroy(&gsp->client_id.mutex); + + nvkm_gsp_radix3_dtor(gsp, &gsp->radix3); + nvkm_gsp_mem_dtor(gsp, &gsp->sig); + nvkm_firmware_dtor(&gsp->fw); + + nvkm_falcon_fw_dtor(&gsp->booter.unload); + nvkm_falcon_fw_dtor(&gsp->booter.load); + + mutex_destroy(&gsp->msgq.mutex); + mutex_destroy(&gsp->cmdq.mutex); + + r535_gsp_dtor_fws(gsp); + + nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem); + nvkm_gsp_mem_dtor(gsp, &gsp->loginit); + nvkm_gsp_mem_dtor(gsp, &gsp->logintr); + nvkm_gsp_mem_dtor(gsp, &gsp->logrm); +} + +int +r535_gsp_oneinit(struct nvkm_gsp *gsp) +{ + struct nvkm_device *device = gsp->subdev.device; + const u8 *data; + u64 size; + int ret; + + mutex_init(&gsp->cmdq.mutex); + mutex_init(&gsp->msgq.mutex); + + ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load, + &device->sec2->falcon, &gsp->booter.load); + if (ret) + return ret; + + ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload, + &device->sec2->falcon, &gsp->booter.unload); + if (ret) + return ret; + + /* Load GSP firmware from ELF image into DMA-accessible memory. */ + ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size); + if (ret) + return ret; + + ret = nvkm_firmware_ctor(&r535_gsp_fw, "gsp-rm", device, data, size, &gsp->fw); + if (ret) + return ret; + + /* Load relevant signature from ELF image. */ + ret = r535_gsp_elf_section(gsp, gsp->func->sig_section, &data, &size); + if (ret) + return ret; + + ret = nvkm_gsp_mem_ctor(gsp, ALIGN(size, 256), &gsp->sig); + if (ret) + return ret; + + memcpy(gsp->sig.data, data, size); + + /* Build radix3 page table for ELF image. */ + ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); + if (ret) + return ret; + + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, + r535_gsp_msg_run_cpu_sequencer, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, + r535_gsp_msg_rc_triggered, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, + r535_gsp_msg_mmu_fault_queued, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL); + ret = r535_gsp_rm_boot_ctor(gsp); + if (ret) + return ret; + + /* Release FW images - we've copied them to DMA buffers now. */ + r535_gsp_dtor_fws(gsp); + + /* Calculate FB layout. */ + gsp->fb.wpr2.frts.size = 0x100000; + gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size; + + gsp->fb.wpr2.boot.size = gsp->boot.fw.size; + gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000); + + gsp->fb.wpr2.elf.size = gsp->fw.len; + gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000); + + { + u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30); + + gsp->fb.wpr2.heap.size = + gsp->func->wpr_heap.os_carveout_size + + gsp->func->wpr_heap.base_size + + ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) + + ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20); + + gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size); + } + + gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000); + gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000); + + gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000); + gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr; + + gsp->fb.heap.size = 0x100000; + gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size; + + ret = nvkm_gsp_fwsec_frts(gsp); + if (WARN_ON(ret)) + return ret; + + ret = r535_gsp_libos_init(gsp); + if (WARN_ON(ret)) + return ret; + + ret = r535_gsp_wpr_meta_init(gsp); + if (WARN_ON(ret)) + return ret; + + ret = r535_gsp_rpc_set_system_info(gsp); + if (WARN_ON(ret)) + return ret; + + ret = r535_gsp_rpc_set_registry(gsp); + if (WARN_ON(ret)) + return ret; + + /* Reset GSP into RISC-V mode. */ + ret = gsp->func->reset(gsp); + if (WARN_ON(ret)) + return ret; + + nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); + nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); + + mutex_init(&gsp->client_id.mutex); + idr_init(&gsp->client_id.idr); + return 0; +} + +static int +r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver, + const struct firmware **pfw) +{ + char fwname[64]; + + snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver); + return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw); +} + +int +r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + int ret; + + if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable)) + return -EINVAL; + + if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) || + (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) || + (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) || + (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) { + r535_gsp_dtor_fws(gsp); + return ret; + } + + return 0; +} + +#define NVKM_GSP_FIRMWARE(chip) \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin") + +NVKM_GSP_FIRMWARE(tu102); +NVKM_GSP_FIRMWARE(tu104); +NVKM_GSP_FIRMWARE(tu106); + +NVKM_GSP_FIRMWARE(tu116); +NVKM_GSP_FIRMWARE(tu117); + +NVKM_GSP_FIRMWARE(ga100); + +NVKM_GSP_FIRMWARE(ga102); +NVKM_GSP_FIRMWARE(ga103); +NVKM_GSP_FIRMWARE(ga104); +NVKM_GSP_FIRMWARE(ga106); +NVKM_GSP_FIRMWARE(ga107); + +NVKM_GSP_FIRMWARE(ad102); +NVKM_GSP_FIRMWARE(ad103); +NVKM_GSP_FIRMWARE(ad104); +NVKM_GSP_FIRMWARE(ad106); +NVKM_GSP_FIRMWARE(ad107); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c new file mode 100644 index 0000000000..59c5f2b917 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c @@ -0,0 +1,198 @@ +/* + * Copyright 2022 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/fb.h> + +#include <nvfw/flcn.h> +#include <nvfw/fw.h> +#include <nvfw/hs.h> + +int +tu102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob, + struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + const struct nvkm_falcon_fw_func *func = &gm200_flcn_fw; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_hs_header_v2 *hshdr; + const struct nvfw_hs_load_header_v2 *lhdr; + u32 loc, sig, cnt; + int ret; + + hdr = nvfw_bin_hdr(subdev, blob->data); + hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset); + loc = *(u32 *)(blob->data + hshdr->patch_loc); + sig = *(u32 *)(blob->data + hshdr->patch_sig); + cnt = *(u32 *)(blob->data + hshdr->num_sig); + + ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true, + blob->data + hdr->data_offset, hdr->data_size, falcon, fw); + if (ret) + goto done; + + ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data, + cnt, hshdr->sig_prod_offset + sig, 0, 0); + if (ret) + goto done; + + lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset); + + fw->nmem_base_img = 0; + fw->nmem_base = lhdr->os_code_offset; + fw->nmem_size = lhdr->os_code_size; + fw->imem_base_img = fw->nmem_size; + fw->imem_base = lhdr->app[0].offset; + fw->imem_size = lhdr->app[0].size; + fw->dmem_base_img = lhdr->os_data_offset; + fw->dmem_base = 0; + fw->dmem_size = lhdr->os_data_size; + fw->dmem_sign = loc - fw->dmem_base_img; + fw->boot_addr = lhdr->os_code_offset; + +done: + if (ret) + nvkm_falcon_fw_dtor(fw); + + return ret; +} + +static int +tu102_gsp_fwsec_load_bld(struct nvkm_falcon_fw *fw) +{ + struct flcn_bl_dmem_desc_v2 desc = { + .ctx_dma = FALCON_DMAIDX_PHYS_SYS_NCOH, + .code_dma_base = fw->fw.phys, + .non_sec_code_off = fw->nmem_base, + .non_sec_code_size = fw->nmem_size, + .sec_code_off = fw->imem_base, + .sec_code_size = fw->imem_size, + .code_entry_point = 0, + .data_dma_base = fw->fw.phys + fw->dmem_base_img, + .data_size = fw->dmem_size, + .argc = 0, + .argv = 0, + }; + + flcn_bl_dmem_desc_v2_dump(fw->falcon->user, &desc); + + nvkm_falcon_mask(fw->falcon, 0x600 + desc.ctx_dma * 4, 0x00000007, 0x00000005); + + return nvkm_falcon_pio_wr(fw->falcon, (u8 *)&desc, 0, 0, DMEM, 0, sizeof(desc), 0, 0); +} + +const struct nvkm_falcon_fw_func +tu102_gsp_fwsec = { + .reset = gm200_flcn_fw_reset, + .load = gm200_flcn_fw_load, + .load_bld = tu102_gsp_fwsec_load_bld, + .boot = gm200_flcn_fw_boot, +}; + +int +tu102_gsp_reset(struct nvkm_gsp *gsp) +{ + return gsp->falcon.func->reset_eng(&gsp->falcon); +} + +static u64 +tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size) +{ + struct nvkm_device *device = gsp->subdev.device; + const u64 base = fb_size - 0x100000; + u64 addr = 0; + + if (device->disp) + addr = nvkm_rd32(gsp->subdev.device, 0x625f04); + if (!(addr & 0x00000008)) + return base; + + addr = (addr & 0xffffff00) << 8; + if (addr < base) + return fb_size - 0x20000; + + return addr; +} + +int +tu102_gsp_oneinit(struct nvkm_gsp *gsp) +{ + gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device); + + gsp->fb.bios.vga_workspace.addr = tu102_gsp_vga_workspace_addr(gsp, gsp->fb.size); + gsp->fb.bios.vga_workspace.size = gsp->fb.size - gsp->fb.bios.vga_workspace.addr; + gsp->fb.bios.addr = gsp->fb.bios.vga_workspace.addr; + gsp->fb.bios.size = gsp->fb.bios.vga_workspace.size; + + return r535_gsp_oneinit(gsp); +} + +const struct nvkm_falcon_func +tu102_gsp_flcn = { + .disable = gm200_flcn_disable, + .enable = gm200_flcn_enable, + .addr2 = 0x1000, + .riscv_irqmask = 0x2b4, + .reset_eng = gp102_flcn_reset_eng, + .reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing, + .bind_inst = gm200_flcn_bind_inst, + .bind_stat = gm200_flcn_bind_stat, + .bind_intr = true, + .imem_pio = &gm200_flcn_imem_pio, + .dmem_pio = &gm200_flcn_dmem_pio, + .riscv_active = tu102_flcn_riscv_active, +}; + +static const struct nvkm_gsp_func +tu102_gsp_r535_113_01 = { + .flcn = &tu102_gsp_flcn, + .fwsec = &tu102_gsp_fwsec, + + .sig_section = ".fwsignature_tu10x", + + .wpr_heap.base_size = 8 << 20, + .wpr_heap.min_size = 64 << 20, + + .booter.ctor = tu102_gsp_booter_ctor, + + .dtor = r535_gsp_dtor, + .oneinit = tu102_gsp_oneinit, + .init = r535_gsp_init, + .fini = r535_gsp_fini, + .reset = tu102_gsp_reset, + + .rm = &r535_gsp_rm, +}; + +static struct nvkm_gsp_fwif +tu102_gsps[] = { + { 0, r535_gsp_load, &tu102_gsp_r535_113_01, "535.113.01" }, + { -1, gv100_gsp_nofw, &gv100_gsp }, + {} +}; + +int +tu102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) +{ + return nvkm_gsp_new_(tu102_gsps, device, type, inst, pgsp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c new file mode 100644 index 0000000000..04fbd9ed28 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c @@ -0,0 +1,57 @@ +/* + * Copyright 2022 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +static const struct nvkm_gsp_func +tu116_gsp_r535_113_01 = { + .flcn = &tu102_gsp_flcn, + .fwsec = &tu102_gsp_fwsec, + + .sig_section = ".fwsignature_tu11x", + + .wpr_heap.base_size = 8 << 20, + .wpr_heap.min_size = 64 << 20, + + .booter.ctor = tu102_gsp_booter_ctor, + + .dtor = r535_gsp_dtor, + .oneinit = tu102_gsp_oneinit, + .init = r535_gsp_init, + .fini = r535_gsp_fini, + .reset = tu102_gsp_reset, + + .rm = &r535_gsp_rm, +}; + +static struct nvkm_gsp_fwif +tu116_gsps[] = { + { 0, r535_gsp_load, &tu116_gsp_r535_113_01, "535.113.01" }, + { -1, gv100_gsp_nofw, &gv100_gsp }, + {} +}; + +int +tu116_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) +{ + return nvkm_gsp_new_(tu116_gsps, device, type, inst, pgsp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c index 46917eb600..0494775113 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c @@ -24,6 +24,8 @@ #include "priv.h" #include "pad.h" +#include <subdev/gsp.h> + static void gm200_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable) { @@ -44,5 +46,8 @@ int gm200_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_i2c **pi2c) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_i2c_new_(&gm200_i2c, device, type, inst, pi2c); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild index 06cbe19ce3..553d540f27 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild @@ -4,3 +4,5 @@ nvkm-y += nvkm/subdev/instmem/nv04.o nvkm-y += nvkm/subdev/instmem/nv40.o nvkm-y += nvkm/subdev/instmem/nv50.o nvkm-y += nvkm/subdev/instmem/gk20a.o + +nvkm-y += nvkm/subdev/instmem/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c index e0e4f97be0..a2cd3330ef 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c @@ -28,7 +28,7 @@ /****************************************************************************** * instmem object base implementation *****************************************************************************/ -static void +void nvkm_instobj_load(struct nvkm_instobj *iobj) { struct nvkm_memory *memory = &iobj->memory; @@ -48,7 +48,7 @@ nvkm_instobj_load(struct nvkm_instobj *iobj) iobj->suspend = NULL; } -static int +int nvkm_instobj_save(struct nvkm_instobj *iobj) { struct nvkm_memory *memory = &iobj->memory; @@ -94,15 +94,21 @@ nvkm_instobj_wrap(struct nvkm_device *device, struct nvkm_memory *memory, struct nvkm_memory **pmemory) { struct nvkm_instmem *imem = device->imem; + int ret; if (!imem->func->memory_wrap) return -ENOSYS; - return imem->func->memory_wrap(imem, memory, pmemory); + ret = imem->func->memory_wrap(imem, memory, pmemory); + if (ret) + return ret; + + container_of(*pmemory, struct nvkm_instobj, memory)->preserve = true; + return 0; } int -nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, +nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, bool preserve, struct nvkm_memory **pmemory) { struct nvkm_subdev *subdev = &imem->subdev; @@ -130,6 +136,7 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, nvkm_done(memory); } + container_of(memory, struct nvkm_instobj, memory)->preserve = preserve; done: if (ret) nvkm_memory_unref(&memory); @@ -172,22 +179,14 @@ static int nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_instmem *imem = nvkm_instmem(subdev); - struct nvkm_instobj *iobj; + int ret; if (suspend) { - list_for_each_entry(iobj, &imem->list, head) { - int ret = nvkm_instobj_save(iobj); - if (ret) - return ret; - } - - nvkm_bar_bar2_fini(subdev->device); + ret = imem->func->suspend(imem); + if (ret) + return ret; - list_for_each_entry(iobj, &imem->boot, head) { - int ret = nvkm_instobj_save(iobj); - if (ret) - return ret; - } + imem->suspend = true; } if (imem->func->fini) @@ -200,20 +199,16 @@ static int nvkm_instmem_init(struct nvkm_subdev *subdev) { struct nvkm_instmem *imem = nvkm_instmem(subdev); - struct nvkm_instobj *iobj; - list_for_each_entry(iobj, &imem->boot, head) { - if (iobj->suspend) - nvkm_instobj_load(iobj); - } + if (imem->suspend) { + if (imem->func->resume) + imem->func->resume(imem); - nvkm_bar_bar2_init(subdev->device); - - list_for_each_entry(iobj, &imem->list, head) { - if (iobj->suspend) - nvkm_instobj_load(iobj); + imem->suspend = false; + return 0; } + nvkm_bar_bar2_init(subdev->device); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c index a4ac94a2ab..201022ae92 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c @@ -49,14 +49,14 @@ #include <subdev/mmu.h> struct gk20a_instobj { - struct nvkm_memory memory; + struct nvkm_instobj base; struct nvkm_mm_node *mn; struct gk20a_instmem *imem; /* CPU mapping */ u32 *vaddr; }; -#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory) +#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, base.memory) /* * Used for objects allocated using the DMA API @@ -148,7 +148,7 @@ gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj) list_del(&obj->vaddr_node); vunmap(obj->base.vaddr); obj->base.vaddr = NULL; - imem->vaddr_use -= nvkm_memory_size(&obj->base.memory); + imem->vaddr_use -= nvkm_memory_size(&obj->base.base.memory); nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use, imem->vaddr_max); } @@ -283,7 +283,7 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, { struct gk20a_instobj *node = gk20a_instobj(memory); struct nvkm_vmm_map map = { - .memory = &node->memory, + .memory = &node->base.memory, .offset = offset, .mem = node->mn, }; @@ -391,8 +391,8 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, return -ENOMEM; *_node = &node->base; - nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory); - node->base.memory.ptrs = &gk20a_instobj_ptrs; + nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.base.memory); + node->base.base.memory.ptrs = &gk20a_instobj_ptrs; node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, &node->handle, GFP_KERNEL, @@ -438,8 +438,8 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, *_node = &node->base; node->dma_addrs = (void *)(node->pages + npages); - nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory); - node->base.memory.ptrs = &gk20a_instobj_ptrs; + nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.base.memory); + node->base.base.memory.ptrs = &gk20a_instobj_ptrs; /* Allocate backing memory */ for (i = 0; i < npages; i++) { @@ -533,7 +533,7 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, else ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT, align, &node); - *pmemory = node ? &node->memory : NULL; + *pmemory = node ? &node->base.memory : NULL; if (ret) return ret; @@ -564,6 +564,8 @@ gk20a_instmem_dtor(struct nvkm_instmem *base) static const struct nvkm_instmem_func gk20a_instmem = { .dtor = gk20a_instmem_dtor, + .suspend = nv04_instmem_suspend, + .resume = nv04_instmem_resume, .memory_new = gk20a_instobj_new, .zero = false, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c index 25603b01d6..e5320ef849 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c @@ -25,6 +25,7 @@ #include "priv.h" #include <core/ramht.h> +#include <subdev/bar.h> struct nv04_instmem { struct nvkm_instmem base; @@ -154,6 +155,48 @@ nv04_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data) nvkm_wr32(imem->subdev.device, 0x700000 + addr, data); } +void +nv04_instmem_resume(struct nvkm_instmem *imem) +{ + struct nvkm_instobj *iobj; + + list_for_each_entry(iobj, &imem->boot, head) { + if (iobj->suspend) + nvkm_instobj_load(iobj); + } + + nvkm_bar_bar2_init(imem->subdev.device); + + list_for_each_entry(iobj, &imem->list, head) { + if (iobj->suspend) + nvkm_instobj_load(iobj); + } +} + +int +nv04_instmem_suspend(struct nvkm_instmem *imem) +{ + struct nvkm_instobj *iobj; + + list_for_each_entry(iobj, &imem->list, head) { + if (iobj->preserve) { + int ret = nvkm_instobj_save(iobj); + if (ret) + return ret; + } + } + + nvkm_bar_bar2_fini(imem->subdev.device); + + list_for_each_entry(iobj, &imem->boot, head) { + int ret = nvkm_instobj_save(iobj); + if (ret) + return ret; + } + + return 0; +} + static int nv04_instmem_oneinit(struct nvkm_instmem *base) { @@ -210,6 +253,8 @@ static const struct nvkm_instmem_func nv04_instmem = { .dtor = nv04_instmem_dtor, .oneinit = nv04_instmem_oneinit, + .suspend = nv04_instmem_suspend, + .resume = nv04_instmem_resume, .rd32 = nv04_instmem_rd32, .wr32 = nv04_instmem_wr32, .memory_new = nv04_instobj_new, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c index 4b2d7465d2..a7f3fc342d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c @@ -27,6 +27,7 @@ #include <core/memory.h> #include <subdev/bar.h> #include <subdev/fb.h> +#include <subdev/gsp.h> #include <subdev/mmu.h> struct nv50_instmem { @@ -394,24 +395,44 @@ nv50_instmem_fini(struct nvkm_instmem *base) nv50_instmem(base)->addr = ~0ULL; } +static void * +nv50_instmem_dtor(struct nvkm_instmem *base) +{ + return nv50_instmem(base); +} + static const struct nvkm_instmem_func nv50_instmem = { + .dtor = nv50_instmem_dtor, .fini = nv50_instmem_fini, + .suspend = nv04_instmem_suspend, + .resume = nv04_instmem_resume, .memory_new = nv50_instobj_new, .memory_wrap = nv50_instobj_wrap, .zero = false, }; int -nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_instmem **pimem) +nv50_instmem_new_(const struct nvkm_instmem_func *func, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_instmem **pimem) { struct nv50_instmem *imem; if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) return -ENOMEM; - nvkm_instmem_ctor(&nv50_instmem, device, type, inst, &imem->base); + nvkm_instmem_ctor(func, device, type, inst, &imem->base); INIT_LIST_HEAD(&imem->lru); *pimem = &imem->base; return 0; } + +int +nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_instmem **pimem) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_instmem_new(&nv50_instmem, device, type, inst, pimem); + + return nv50_instmem_new_(&nv50_instmem, device, type, inst, pimem); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h index fe92986a38..4c14c96fb6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h @@ -7,6 +7,8 @@ struct nvkm_instmem_func { void *(*dtor)(struct nvkm_instmem *); int (*oneinit)(struct nvkm_instmem *); + int (*suspend)(struct nvkm_instmem *); + void (*resume)(struct nvkm_instmem *); void (*fini)(struct nvkm_instmem *); u32 (*rd32)(struct nvkm_instmem *, u32 addr); void (*wr32)(struct nvkm_instmem *, u32 addr, u32 data); @@ -16,19 +18,31 @@ struct nvkm_instmem_func { bool zero; }; +int nv50_instmem_new_(const struct nvkm_instmem_func *, struct nvkm_device *, + enum nvkm_subdev_type, int, struct nvkm_instmem **); + void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem *); void nvkm_instmem_boot(struct nvkm_instmem *); +int nv04_instmem_suspend(struct nvkm_instmem *); +void nv04_instmem_resume(struct nvkm_instmem *); + +int r535_instmem_new(const struct nvkm_instmem_func *, + struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **); + #include <core/memory.h> struct nvkm_instobj { struct nvkm_memory memory; struct list_head head; + bool preserve; u32 *suspend; }; void nvkm_instobj_ctor(const struct nvkm_memory_func *func, struct nvkm_instmem *, struct nvkm_instobj *); void nvkm_instobj_dtor(struct nvkm_instmem *, struct nvkm_instobj *); +int nvkm_instobj_save(struct nvkm_instobj *); +void nvkm_instobj_load(struct nvkm_instobj *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c new file mode 100644 index 0000000000..5f3c9c02a4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c @@ -0,0 +1,333 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/gsp.h> + +#include <nvhw/drf.h> + +#include <nvrm/nvtypes.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> +#include <nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h> +#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h> +#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h> + +struct fbsr_item { + const char *type; + u64 addr; + u64 size; + + struct list_head head; +}; + +struct fbsr { + struct list_head items; + + u64 size; + int regions; + + struct nvkm_gsp_client client; + struct nvkm_gsp_device device; + + u64 hmemory; + u64 sys_offset; +}; + +static int +fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper, + u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object) +{ + struct nvkm_gsp_client *client = device->object.client; + struct nvkm_gsp *gsp = client->gsp; + const u32 pages = size / GSP_PAGE_SIZE; + rpc_alloc_memory_v13_01 *rpc; + int ret; + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY, + sizeof(*rpc) + pages * sizeof(rpc->pteDesc.pte_pde[0])); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + rpc->hClient = client->object.handle; + rpc->hDevice = device->object.handle; + rpc->hMemory = handle; + if (aper == NVKM_MEM_TARGET_HOST) { + rpc->hClass = NV01_MEMORY_LIST_SYSTEM; + rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, NONCONTIGUOUS) | + NVDEF(NVOS02, FLAGS, LOCATION, PCI) | + NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP); + } else { + rpc->hClass = NV01_MEMORY_LIST_FBMEM; + rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, CONTIGUOUS) | + NVDEF(NVOS02, FLAGS, LOCATION, VIDMEM) | + NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP); + rpc->format = 6; /* NV_MMU_PTE_KIND_GENERIC_MEMORY */ + } + rpc->pteAdjust = 0; + rpc->length = size; + rpc->pageCount = pages; + rpc->pteDesc.idr = 0; + rpc->pteDesc.reserved1 = 0; + rpc->pteDesc.length = pages; + + if (sgt) { + struct scatterlist *sgl; + int pte = 0, idx; + + for_each_sgtable_dma_sg(sgt, sgl, idx) { + for (int i = 0; i < sg_dma_len(sgl) / GSP_PAGE_SIZE; i++) + rpc->pteDesc.pte_pde[pte++].pte = (sg_dma_address(sgl) >> 12) + i; + + } + } else { + for (int i = 0; i < pages; i++) + rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i; + } + + ret = nvkm_gsp_rpc_wr(gsp, rpc, true); + if (ret) + return ret; + + object->client = device->object.client; + object->parent = &device->object; + object->handle = handle; + return 0; +} + +static int +fbsr_send(struct fbsr *fbsr, struct fbsr_item *item) +{ + NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS *ctrl; + struct nvkm_gsp *gsp = fbsr->client.gsp; + struct nvkm_gsp_object memlist; + int ret; + + ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM, + item->addr, item->size, NULL, &memlist); + if (ret) + return ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) { + ret = PTR_ERR(ctrl); + goto done; + } + + ctrl->fbsrType = FBSR_TYPE_DMA; + ctrl->hClient = fbsr->client.object.handle; + ctrl->hVidMem = fbsr->hmemory++; + ctrl->vidOffset = 0; + ctrl->sysOffset = fbsr->sys_offset; + ctrl->size = item->size; + + ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); +done: + nvkm_gsp_rm_free(&memlist); + if (ret) + return ret; + + fbsr->sys_offset += item->size; + return 0; +} + +static int +fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size) +{ + NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl; + struct nvkm_gsp *gsp = fbsr->client.gsp; + struct nvkm_gsp_object memlist; + int ret; + + ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST, + 0, fbsr->size, sgt, &memlist); + if (ret) + return ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->fbsrType = FBSR_TYPE_DMA; + ctrl->numRegions = fbsr->regions; + ctrl->hClient = fbsr->client.object.handle; + ctrl->hSysMem = fbsr->hmemory++; + ctrl->gspFbAllocsSysOffset = items_size; + + ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); + if (ret) + return ret; + + nvkm_gsp_rm_free(&memlist); + return 0; +} + +static bool +fbsr_vram(struct fbsr *fbsr, const char *type, u64 addr, u64 size) +{ + struct fbsr_item *item; + + if (!(item = kzalloc(sizeof(*item), GFP_KERNEL))) + return false; + + item->type = type; + item->addr = addr; + item->size = size; + list_add_tail(&item->head, &fbsr->items); + return true; +} + +static bool +fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory) +{ + return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory)); +} + +static void +r535_instmem_resume(struct nvkm_instmem *imem) +{ + /* RM has restored VRAM contents already, so just need to free the sysmem buffer. */ + if (imem->rm.fbsr_valid) { + nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr); + imem->rm.fbsr_valid = false; + } +} + +static int +r535_instmem_suspend(struct nvkm_instmem *imem) +{ + struct nvkm_subdev *subdev = &imem->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_gsp *gsp = device->gsp; + struct nvkm_instobj *iobj; + struct fbsr fbsr = {}; + struct fbsr_item *item, *temp; + u64 items_size; + int ret; + + INIT_LIST_HEAD(&fbsr.items); + fbsr.hmemory = 0xcaf00003; + + /* Create a list of all regions we need RM to save during suspend. */ + list_for_each_entry(iobj, &imem->list, head) { + if (iobj->preserve) { + if (!fbsr_inst(&fbsr, "inst", &iobj->memory)) + return -ENOMEM; + } + } + + list_for_each_entry(iobj, &imem->boot, head) { + if (!fbsr_inst(&fbsr, "boot", &iobj->memory)) + return -ENOMEM; + } + + if (!fbsr_vram(&fbsr, "gsp-non-wpr", gsp->fb.heap.addr, gsp->fb.heap.size)) + return -ENOMEM; + + /* Determine memory requirements. */ + list_for_each_entry(item, &fbsr.items, head) { + nvkm_debug(subdev, "fbsr: %016llx %016llx %s\n", + item->addr, item->size, item->type); + fbsr.size += item->size; + fbsr.regions++; + } + + items_size = fbsr.size; + nvkm_debug(subdev, "fbsr: %d regions (0x%llx bytes)\n", fbsr.regions, items_size); + + fbsr.size += gsp->fb.rsvd_size; + fbsr.size += gsp->fb.bios.vga_workspace.size; + nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size); + + ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr); + if (ret) + goto done; + + /* Tell RM about the sysmem which will hold VRAM contents across suspend. */ + ret = nvkm_gsp_client_device_ctor(gsp, &fbsr.client, &fbsr.device); + if (ret) + goto done_sgt; + + ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size); + if (WARN_ON(ret)) + goto done_sgt; + + /* Send VRAM regions that need saving. */ + list_for_each_entry(item, &fbsr.items, head) { + ret = fbsr_send(&fbsr, item); + if (WARN_ON(ret)) + goto done_sgt; + } + + imem->rm.fbsr_valid = true; + + /* Cleanup everything except the sysmem backup, which will be removed after resume. */ +done_sgt: + if (ret) /* ... unless we failed already. */ + nvkm_gsp_sg_free(device, &imem->rm.fbsr); +done: + list_for_each_entry_safe(item, temp, &fbsr.items, head) { + list_del(&item->head); + kfree(item); + } + + nvkm_gsp_device_dtor(&fbsr.device); + nvkm_gsp_client_dtor(&fbsr.client); + return ret; +} + +static void * +r535_instmem_dtor(struct nvkm_instmem *imem) +{ + kfree(imem->func); + return imem; +} + +int +r535_instmem_new(const struct nvkm_instmem_func *hw, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_instmem **pinstmem) +{ + struct nvkm_instmem_func *rm; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_instmem_dtor; + rm->fini = hw->fini; + rm->suspend = r535_instmem_suspend; + rm->resume = r535_instmem_resume; + rm->memory_new = hw->memory_new; + rm->memory_wrap = hw->memory_wrap; + rm->zero = false; + + ret = nv50_instmem_new_(rm, device, type, inst, pinstmem); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c index 159d9f8c95..951f01e303 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + static void ga102_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4]) { @@ -53,5 +55,8 @@ int ga102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_ltc **pltc) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_ltc_new_(&ga102_ltc, device, type, inst, pltc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c index 265a05fd5f..053302ecb0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + void gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *ltc, int i, const u32 stencil) { @@ -49,5 +51,8 @@ int gp102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_ltc **pltc) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_ltc_new_(&gp102_ltc, device, type, inst, pltc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c index 5d28d30d09..65e9f04972 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + static void ga100_mc_device_disable(struct nvkm_mc *mc, u32 mask) { @@ -72,5 +74,8 @@ ga100_mc = { int ga100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_mc_new_(&ga100_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c index eb2ab03f43..05d2fa95e0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c @@ -23,6 +23,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + const struct nvkm_intr_data gp100_mc_intrs[] = { { NVKM_ENGINE_DISP , 0, 0, 0x04000000, true }, @@ -98,5 +100,8 @@ gp100_mc = { int gp100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_mc_new_(&gp100_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild index a602b0cb5b..7ba35ea59c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild @@ -16,6 +16,8 @@ nvkm-y += nvkm/subdev/mmu/gp10b.o nvkm-y += nvkm/subdev/mmu/gv100.o nvkm-y += nvkm/subdev/mmu/tu102.o +nvkm-y += nvkm/subdev/mmu/r535.o + nvkm-y += nvkm/subdev/mmu/mem.o nvkm-y += nvkm/subdev/mmu/memnv04.o nvkm-y += nvkm/subdev/mmu/memnv50.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c index ad3b44a9e0..b67ace7ae9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c @@ -403,6 +403,10 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev) nvkm_mmu_ptc_fini(mmu); mutex_destroy(&mmu->mutex); + + if (mmu->func->dtor) + mmu->func->dtor(mmu); + return mmu; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h index 5265bf4d83..e9ca653777 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h @@ -4,12 +4,16 @@ #define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev) #include <subdev/mmu.h> +int r535_mmu_new(const struct nvkm_mmu_func *hw, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_mmu **); + void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_mmu *); int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_mmu **); struct nvkm_mmu_func { + void (*dtor)(struct nvkm_mmu *); void (*init)(struct nvkm_mmu *); u8 dma_bits; @@ -37,6 +41,8 @@ struct nvkm_mmu_func { const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid); bool kind_sys; + + int (*promote_vmm)(struct nvkm_vmm *); }; extern const struct nvkm_mmu_func nv04_mmu; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c new file mode 100644 index 0000000000..d3e95453f2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c @@ -0,0 +1,123 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "vmm.h" + +#include <nvrm/nvtypes.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h> +#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> + +static int +r535_mmu_promote_vmm(struct nvkm_vmm *vmm) +{ + NV_VASPACE_ALLOCATION_PARAMETERS *args; + int ret; + + ret = nvkm_gsp_client_device_ctor(vmm->mmu->subdev.device->gsp, + &vmm->rm.client, &vmm->rm.device); + if (ret) + return ret; + + args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A, + sizeof(*args), &vmm->rm.object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW; + + ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args); + if (ret) + return ret; + + { + NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl; + + mutex_lock(&vmm->mutex.vmm); + ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000, + &vmm->rm.rsvd); + mutex_unlock(&vmm->mutex.vmm); + if (ret) + return ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object, + NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->pageSize = 0x20000000; + ctrl->virtAddrLo = vmm->rm.rsvd->addr; + ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1; + ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2; + ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr; + ctrl->levels[0].size = 0x20; + ctrl->levels[0].aperture = 1; + ctrl->levels[0].pageShift = 0x2f; + ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr; + ctrl->levels[1].size = 0x1000; + ctrl->levels[1].aperture = 1; + ctrl->levels[1].pageShift = 0x26; + if (vmm->pd->pde[0]->pde[0]) { + ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr; + ctrl->levels[2].size = 0x1000; + ctrl->levels[2].aperture = 1; + ctrl->levels[2].pageShift = 0x1d; + } + + ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl); + } + + return ret; +} + +static void +r535_mmu_dtor(struct nvkm_mmu *mmu) +{ + kfree(mmu->func); +} + +int +r535_mmu_new(const struct nvkm_mmu_func *hw, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) +{ + struct nvkm_mmu_func *rm; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_mmu_dtor; + rm->dma_bits = hw->dma_bits; + rm->mmu = hw->mmu; + rm->mem = hw->mem; + rm->vmm = hw->vmm; + rm->kind = hw->kind; + rm->kind_sys = hw->kind_sys; + rm->promote_vmm = r535_mmu_promote_vmm; + + ret = nvkm_mmu_new_(rm, device, type, inst, pmmu); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c index 8d060ce47f..df662ce4a4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c @@ -24,6 +24,7 @@ #include "vmm.h" #include <core/option.h> +#include <subdev/gsp.h> #include <nvif/class.h> @@ -54,5 +55,8 @@ int tu102_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mmu **pmmu) { + if (nvkm_gsp_rm(device->gsp)) + return r535_mmu_new(&tu102_mmu, device, type, inst, pmmu); + return nvkm_mmu_new_(&tu102_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c index 8e459d88ff..cf490ff2b9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c @@ -572,6 +572,12 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, } uvmm->vmm->managed.raw = raw; + if (mmu->func->promote_vmm) { + ret = mmu->func->promote_vmm(uvmm->vmm); + if (ret) + return ret; + } + page = uvmm->vmm->func->page; args->v0.page_nr = 0; while (page && (page++)->shift) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index eb5fcadcb3..9c97800fe0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c @@ -1030,6 +1030,13 @@ nvkm_vmm_dtor(struct nvkm_vmm *vmm) struct nvkm_vma *vma; struct rb_node *node; + if (vmm->rm.client.gsp) { + nvkm_gsp_rm_free(&vmm->rm.object); + nvkm_gsp_device_dtor(&vmm->rm.device); + nvkm_gsp_client_dtor(&vmm->rm.client); + nvkm_vmm_put(vmm, &vmm->rm.rsvd); + } + if (0) nvkm_vmm_dump(vmm); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c index f3630d0e0d..bddac77f48 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c @@ -558,7 +558,7 @@ gp100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr) void gp100_vmm_flush(struct nvkm_vmm *vmm, int depth) { - u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24; + u32 type = 0; if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) type |= 0x00000004; /* HUB_ONLY */ type |= 0x00000001; /* PAGE_ALL */ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c index 5a08458fe1..8379e72d77 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c @@ -27,7 +27,7 @@ static void tu102_vmm_flush(struct nvkm_vmm *vmm, int depth) { struct nvkm_device *device = vmm->mmu->subdev.device; - u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24; + u32 type = 0; type |= 0x00000001; /* PAGE_ALL */ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) @@ -35,9 +35,11 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth) mutex_lock(&vmm->mmu->mutex); - nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8); + if (!vmm->rm.bar2_pdb) + nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8); + else + nvkm_wr32(device, 0xb830a0, vmm->rm.bar2_pdb >> 8); nvkm_wr32(device, 0xb830a4, 0x00000000); - nvkm_wr32(device, 0x100e68, 0x00000000); nvkm_wr32(device, 0xb830b0, 0x80000000 | type); nvkm_msec(device, 2000, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c index cd31483609..da5b2b2190 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -23,6 +23,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + static const struct nvkm_falcon_func gp102_pmu_flcn = { .disable = gm200_flcn_disable, @@ -54,5 +56,8 @@ int gp102_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pmu **ppmu) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_pmu_new_(gp102_pmu_fwif, device, type, inst, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c index b4eaf6db36..b4530073bf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c @@ -23,6 +23,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + static const struct nvkm_subdev_func gm200_privring = { .intr = gk104_privring_intr, @@ -32,5 +34,8 @@ int gm200_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_subdev **pprivring) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_subdev_new_(&gm200_privring, device, type, inst, pprivring); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c index 44f021392b..5392833d36 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c @@ -23,6 +23,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + static int gp100_temp_get(struct nvkm_therm *therm) { @@ -52,5 +54,8 @@ int gp100_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_therm_new_(&gp100_therm, device, type, inst, ptherm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c index 84790cf52b..129eabb8b9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + static int ga100_top_parse(struct nvkm_top *top) { @@ -76,7 +78,7 @@ ga100_top_parse(struct nvkm_top *top) case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break; case 0x00000013: I_(NVKM_ENGINE_CE , inst); break; case 0x00000014: O_(NVKM_SUBDEV_GSP , 0); break; - case 0x00000015: O_(NVKM_ENGINE_NVJPG , 0); break; + case 0x00000015: I_(NVKM_ENGINE_NVJPG , inst); break; case 0x00000016: O_(NVKM_ENGINE_OFA , 0); break; case 0x00000017: O_(NVKM_SUBDEV_FLA , 0); break; break; @@ -104,5 +106,8 @@ int ga100_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_top **ptop) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_top_new_(&ga100_top, device, type, inst, ptop); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c index 2bbba8244c..da55dac8c2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c @@ -23,6 +23,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + static int gk104_top_parse(struct nvkm_top *top) { @@ -89,7 +91,7 @@ gk104_top_parse(struct nvkm_top *top) case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break; case 0x00000013: I_(NVKM_ENGINE_CE , inst); break; case 0x00000014: O_(NVKM_SUBDEV_GSP , 0); break; - case 0x00000015: O_(NVKM_ENGINE_NVJPG , 0); break; + case 0x00000015: I_(NVKM_ENGINE_NVJPG , inst); break; default: break; } @@ -115,5 +117,8 @@ int gk104_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_top **ptop) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_top_new_(&gk104_top, device, type, inst, ptop); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild index 23cd21b40a..23a8546061 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild @@ -4,3 +4,5 @@ nvkm-y += nvkm/subdev/vfn/uvfn.o nvkm-y += nvkm/subdev/vfn/gv100.o nvkm-y += nvkm/subdev/vfn/tu102.o nvkm-y += nvkm/subdev/vfn/ga100.o + +nvkm-y += nvkm/subdev/vfn/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c index fd5c693132..bb0bb6fda5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + #include <nvif/class.h> static const struct nvkm_intr_data @@ -43,5 +45,8 @@ int ga100_vfn_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_vfn **pvfn) { + if (nvkm_gsp_rm(device->gsp)) + return r535_vfn_new(&ga100_vfn, device, type, inst, 0xb80000, pvfn); + return nvkm_vfn_new_(&ga100_vfn, device, type, inst, 0xb80000, pvfn); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h index 96d53c0204..3a09781ad0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h @@ -5,16 +5,21 @@ #include <subdev/vfn.h> struct nvkm_vfn_func { + void (*dtor)(struct nvkm_vfn *); + const struct nvkm_intr_func *intr; const struct nvkm_intr_data *intrs; struct { u32 addr; u32 size; - const struct nvkm_sclass base; + struct nvkm_sclass base; } user; }; +int r535_vfn_new(const struct nvkm_vfn_func *hw, struct nvkm_device *, enum nvkm_subdev_type, int, + u32 addr, struct nvkm_vfn **); + int nvkm_vfn_new_(const struct nvkm_vfn_func *, struct nvkm_device *, enum nvkm_subdev_type, int, u32 addr, struct nvkm_vfn **); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c new file mode 100644 index 0000000000..dce337306c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c @@ -0,0 +1,50 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +static void +r535_vfn_dtor(struct nvkm_vfn *vfn) +{ + kfree(vfn->func); +} + +int +r535_vfn_new(const struct nvkm_vfn_func *hw, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, u32 addr, + struct nvkm_vfn **pvfn) +{ + struct nvkm_vfn_func *rm; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_vfn_dtor; + rm->intr = hw->intr; + rm->user = hw->user; + + ret = nvkm_vfn_new_(rm, device, type, inst, addr, pvfn); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c index 3d063fb5e1..a3bf13c5c7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include <subdev/gsp.h> + #include <nvif/class.h> static void @@ -104,5 +106,8 @@ int tu102_vfn_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_vfn **pvfn) { + if (nvkm_gsp_rm(device->gsp)) + return r535_vfn_new(&tu102_vfn, device, type, inst, 0xb80000, pvfn); + return nvkm_vfn_new_(&tu102_vfn, device, type, inst, 0xb80000, pvfn); } |