From dc50eab76b709d68175a358d6e23a5a3890764d3 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 19:39:57 +0200 Subject: Merging upstream version 6.7.7. Signed-off-by: Daniel Baumann --- drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c | 4 + drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c | 3 + drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c | 10 +- drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h | 3 + drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c | 185 ++ drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c | 4 + drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c | 8 + drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c | 8 +- drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild | 2 + .../gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c | 15 + drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h | 3 + drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c | 51 + .../gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c | 16 +- drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c | 8 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c | 4 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h | 3 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c | 87 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c | 46 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild | 8 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c | 57 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c | 74 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c | 359 +++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c | 74 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c | 148 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c | 4 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h | 57 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 2355 ++++++++++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c | 198 ++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c | 57 + drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c | 49 +- .../gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | 20 +- drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c | 45 + drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c | 27 +- drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h | 14 + drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c | 333 +++ drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c | 4 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h | 6 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c | 123 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c | 4 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c | 6 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c | 7 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c | 8 +- drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c | 5 + .../gpu/drm/nouveau/nvkm/subdev/privring/gm200.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c | 7 +- drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c | 7 +- drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h | 7 +- drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c | 50 + drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c | 5 + 68 files changed, 4578 insertions(+), 82 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev') diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c index 45dcf493e9..c7d38609bb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c @@ -20,6 +20,7 @@ * OTHER DEALINGS IN THE SOFTWARE. */ #include "priv.h" +#include #include @@ -322,5 +323,8 @@ int ga102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_acr **pacr) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_acr_new_(ga102_acr_fwif, device, type, inst, pacr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c index c22d551c00..565e9a070b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c @@ -201,5 +201,8 @@ int tu102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_acr **pacr) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_acr_new_(tu102_acr_fwif, device, type, inst, pacr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild index 8faee3317a..9754c68725 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild @@ -7,3 +7,5 @@ nvkm-y += nvkm/subdev/bar/gk20a.o nvkm-y += nvkm/subdev/bar/gm107.o nvkm-y += nvkm/subdev/bar/gm20b.o nvkm-y += nvkm/subdev/bar/tu102.o + +nvkm-y += nvkm/subdev/bar/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c index d017a1b5e5..91bc53be97 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c @@ -93,8 +93,16 @@ static int nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_bar *bar = nvkm_bar(subdev); + + if (!subdev->use.enabled) + return 0; + if (bar->func->bar1.fini) bar->func->bar1.fini(bar); + + if (!suspend) /* Handled by instmem. */ + nvkm_bar_bar2_fini(subdev->device); + return 0; } @@ -120,7 +128,7 @@ static void * nvkm_bar_dtor(struct nvkm_subdev *subdev) { struct nvkm_bar *bar = nvkm_bar(subdev); - nvkm_bar_bar2_fini(subdev->device); + return bar->func->dtor(bar); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h index daebfc991c..d0168e0b78 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h @@ -4,6 +4,9 @@ #define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev) #include +int r535_bar_new_(const struct nvkm_bar_func *, + struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **); + void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_bar *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c new file mode 100644 index 0000000000..3a30bea30e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c @@ -0,0 +1,185 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "gf100.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static void +r535_bar_flush(struct nvkm_bar *bar) +{ + ioread32_native(bar->flushBAR2); +} + +static void +r535_bar_bar2_wait(struct nvkm_bar *base) +{ +} + +static int +r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr) +{ + rpc_update_bar_pde_v15_00 *rpc; + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE, sizeof(*rpc)); + if (WARN_ON(IS_ERR_OR_NULL(rpc))) + return -EIO; + + rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2; + rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */ + rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu! + + return nvkm_gsp_rpc_wr(gsp, rpc, true); +} + +static void +r535_bar_bar2_fini(struct nvkm_bar *bar) +{ + struct nvkm_gsp *gsp = bar->subdev.device->gsp; + + bar->flushBAR2 = bar->flushBAR2PhysMode; + nvkm_done(bar->flushFBZero); + + WARN_ON(r535_bar_bar2_update_pde(gsp, 0)); +} + +static void +r535_bar_bar2_init(struct nvkm_bar *bar) +{ + struct nvkm_device *device = bar->subdev.device; + struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm; + struct nvkm_gsp *gsp = device->gsp; + + WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr)); + vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb; + + if (!bar->flushFBZero) { + struct nvkm_memory *fbZero; + int ret; + + ret = nvkm_ram_wrap(device, 0, 0x1000, &fbZero); + if (ret == 0) { + ret = nvkm_memory_kmap(fbZero, &bar->flushFBZero); + nvkm_memory_unref(&fbZero); + } + WARN_ON(ret); + } + + bar->bar2 = true; + bar->flushBAR2 = nvkm_kmap(bar->flushFBZero); + WARN_ON(!bar->flushBAR2); +} + +static void +r535_bar_bar1_wait(struct nvkm_bar *base) +{ +} + +static void +r535_bar_bar1_fini(struct nvkm_bar *base) +{ +} + +static void +r535_bar_bar1_init(struct nvkm_bar *bar) +{ + struct nvkm_device *device = bar->subdev.device; + struct nvkm_gsp *gsp = device->gsp; + struct nvkm_vmm *vmm = gf100_bar(bar)->bar[1].vmm; + struct nvkm_memory *pd3; + int ret; + + ret = nvkm_ram_wrap(device, gsp->bar.rm_bar1_pdb, 0x1000, &pd3); + if (WARN_ON(ret)) + return; + + nvkm_memory_unref(&vmm->pd->pt[0]->memory); + + ret = nvkm_memory_kmap(pd3, &vmm->pd->pt[0]->memory); + nvkm_memory_unref(&pd3); + if (WARN_ON(ret)) + return; + + vmm->pd->pt[0]->addr = nvkm_memory_addr(vmm->pd->pt[0]->memory); +} + +static void * +r535_bar_dtor(struct nvkm_bar *bar) +{ + void *data = gf100_bar_dtor(bar); + + nvkm_memory_unref(&bar->flushFBZero); + + if (bar->flushBAR2PhysMode) + iounmap(bar->flushBAR2PhysMode); + + kfree(bar->func); + return data; +} + +int +r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar) +{ + struct nvkm_bar_func *rm; + struct nvkm_bar *bar; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_bar_dtor; + rm->oneinit = hw->oneinit; + rm->bar1.init = r535_bar_bar1_init; + rm->bar1.fini = r535_bar_bar1_fini; + rm->bar1.wait = r535_bar_bar1_wait; + rm->bar1.vmm = hw->bar1.vmm; + rm->bar2.init = r535_bar_bar2_init; + rm->bar2.fini = r535_bar_bar2_fini; + rm->bar2.wait = r535_bar_bar2_wait; + rm->bar2.vmm = hw->bar2.vmm; + rm->flush = r535_bar_flush; + + ret = gf100_bar_new_(rm, device, type, inst, &bar); + if (ret) { + kfree(rm); + return ret; + } + *pbar = bar; + + bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE); + if (!bar->flushBAR2PhysMode) + return -ENOMEM; + + bar->flushBAR2 = bar->flushBAR2PhysMode; + + gf100_bar(*pbar)->bar2_halve = true; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c index c25ab407b8..b4196edad5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c @@ -22,6 +22,7 @@ #include "gf100.h" #include +#include #include static void @@ -95,5 +96,8 @@ int tu102_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar) { + if (nvkm_gsp_rm(device->gsp)) + return r535_bar_new_(&tu102_bar, device, type, inst, pbar); + return gf100_bar_new_(&tu102_bar, device, type, inst, pbar); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c index 6c318e41bd..91f486ee4c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c @@ -46,6 +46,14 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size) return true; } +void * +nvbios_pointer(struct nvkm_bios *bios, u32 addr) +{ + if (likely(nvbios_addr(bios, &addr, 0))) + return &bios->data[addr]; + return NULL; +} + u8 nvbios_rd08(struct nvkm_bios *bios, u32 addr) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c index 19188683c8..8c2bf1c16f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c @@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name) return (void *)fw; } +static void +shadow_fw_release(void *fw) +{ + release_firmware(fw); +} + static const struct nvbios_source shadow_fw = { .name = "firmware", .init = shadow_fw_init, - .fini = (void(*)(void *))release_firmware, + .fini = shadow_fw_release, .read = shadow_fw_read, .rw = false, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c index 80b5aaceea..8e1e0b057a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c @@ -24,6 +24,8 @@ */ #include "priv.h" +#include + static void gf100_bus_intr(struct nvkm_bus *bus) { @@ -72,5 +74,8 @@ int gf100_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_bus **pbus) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_bus_new_(&gf100_bus, device, type, inst, pbus); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild index d1abb64841..5f97bffca9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild @@ -16,3 +16,5 @@ nvkm-y += nvkm/subdev/devinit/gm200.o nvkm-y += nvkm/subdev/devinit/gv100.o nvkm-y += nvkm/subdev/devinit/tu102.o nvkm-y += nvkm/subdev/devinit/ga100.o + +nvkm-y += nvkm/subdev/devinit/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c index 6b280b05c4..5f0b12a1fc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c @@ -24,6 +24,7 @@ #include #include #include +#include static int ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq) @@ -62,8 +63,19 @@ ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq) return ret; } +static void +ga100_devinit_disable(struct nvkm_devinit *init) +{ + struct nvkm_device *device = init->subdev.device; + u32 r820c04 = nvkm_rd32(device, 0x820c04); + + if (r820c04 & 0x00000001) + nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0); +} + static const struct nvkm_devinit_func ga100_devinit = { + .disable = ga100_devinit_disable, .init = nv50_devinit_init, .post = tu102_devinit_post, .pll_set = ga100_devinit_pll_set, @@ -73,5 +85,8 @@ int ga100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit) { + if (nvkm_gsp_rm(device->gsp)) + return r535_devinit_new(&ga100_devinit, device, type, inst, pinit); + return nv50_devinit_new_(&ga100_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h index a648482d06..06bbfdcc78 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h @@ -4,6 +4,9 @@ #define nvkm_devinit(p) container_of((p), struct nvkm_devinit, subdev) #include +int r535_devinit_new(const struct nvkm_devinit_func *, + struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); + struct nvkm_devinit_func { void *(*dtor)(struct nvkm_devinit *); void (*preinit)(struct nvkm_devinit *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c new file mode 100644 index 0000000000..666eb93b17 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c @@ -0,0 +1,51 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "nv50.h" + +static void * +r535_devinit_dtor(struct nvkm_devinit *devinit) +{ + kfree(devinit->func); + return devinit; +} + +int +r535_devinit_new(const struct nvkm_devinit_func *hw, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_devinit **pdevinit) +{ + struct nvkm_devinit_func *rm; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_devinit_dtor; + rm->post = hw->post; + rm->disable = hw->disable; + + ret = nv50_devinit_new_(rm, device, type, inst, pdevinit); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c index 81a1ad2c88..f406b1525a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c @@ -24,6 +24,7 @@ #include #include #include +#include static int tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq) @@ -83,17 +84,9 @@ tu102_devinit_wait(struct nvkm_device *device) } int -tu102_devinit_post(struct nvkm_devinit *base, bool post) +tu102_devinit_post(struct nvkm_devinit *init, bool post) { - struct nv50_devinit *init = nv50_devinit(base); - int ret; - - ret = tu102_devinit_wait(init->base.subdev.device); - if (ret) - return ret; - - gm200_devinit_preos(init, post); - return 0; + return tu102_devinit_wait(init->subdev.device); } static const struct nvkm_devinit_func @@ -108,5 +101,8 @@ int tu102_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit) { + if (nvkm_gsp_rm(device->gsp)) + return r535_devinit_new(&tu102_devinit, device, type, inst, pinit); + return nv50_devinit_new_(&tu102_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c index 967efaddae..5390417a58 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c @@ -22,6 +22,7 @@ #include "priv.h" #include +#include #include #include #include @@ -175,7 +176,12 @@ int tu102_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fault **pfault) { - int ret = nvkm_fault_new_(&tu102_fault, device, type, inst, pfault); + int ret; + + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + + ret = nvkm_fault_new_(&tu102_fault, device, type, inst, pfault); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild index 394c305e75..d1611ad3bf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild @@ -36,6 +36,8 @@ nvkm-y += nvkm/subdev/fb/tu102.o nvkm-y += nvkm/subdev/fb/ga100.o nvkm-y += nvkm/subdev/fb/ga102.o +nvkm-y += nvkm/subdev/fb/r535.o + nvkm-y += nvkm/subdev/fb/ram.o nvkm-y += nvkm/subdev/fb/ramnv04.o nvkm-y += nvkm/subdev/fb/ramnv10.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c index 12037fd4fd..e9e7c1d5c4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c @@ -22,6 +22,8 @@ #include "gf100.h" #include "ram.h" +#include + static const struct nvkm_fb_func ga100_fb = { .dtor = gf100_fb_dtor, @@ -38,5 +40,8 @@ ga100_fb = { int ga100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { + if (nvkm_gsp_rm(device->gsp)) + return r535_fb_new(&ga100_fb, device, type, inst, pfb); + return gf100_fb_new_(&ga100_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c index 76f6877b54..25f82b372b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c @@ -22,6 +22,7 @@ #include "gf100.h" #include "ram.h" +#include #include static u64 @@ -59,6 +60,9 @@ ga102_fb = { int ga102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { + if (nvkm_gsp_rm(device->gsp)) + return r535_fb_new(&ga102_fb, device, type, inst, pfb); + return gf100_fb_new_(&ga102_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h index 77d6a8c108..35c55dfba2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h @@ -6,6 +6,9 @@ #include struct nvkm_bios; +int r535_fb_new(const struct nvkm_fb_func *, + struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); + struct nvkm_fb_func { void *(*dtor)(struct nvkm_fb *); u32 (*tags)(struct nvkm_fb *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c new file mode 100644 index 0000000000..d325150101 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c @@ -0,0 +1,87 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" +#include "ram.h" + +#include + +static const struct nvkm_ram_func +r535_fb_ram = { +}; + +static int +r535_fb_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) +{ + struct nvkm_gsp *gsp = fb->subdev.device->gsp; + struct nvkm_ram *ram; + int ret; + + if (!(ram = *pram = kzalloc(sizeof(*ram), GFP_KERNEL))) + return -ENOMEM; + + ram->func = &r535_fb_ram; + ram->fb = fb; + ram->type = NVKM_RAM_TYPE_UNKNOWN; /*TODO: pull this from GSP. */ + ram->size = gsp->fb.size; + ram->stolen = false; + mutex_init(&ram->mutex); + + for (int i = 0; i < gsp->fb.region_nr; i++) { + ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, + gsp->fb.region[i].addr >> NVKM_RAM_MM_SHIFT, + gsp->fb.region[i].size >> NVKM_RAM_MM_SHIFT, + 1); + if (ret) + return ret; + } + + return 0; +} + +static void * +r535_fb_dtor(struct nvkm_fb *fb) +{ + kfree(fb->func); + return fb; +} + +int +r535_fb_new(const struct nvkm_fb_func *hw, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) +{ + struct nvkm_fb_func *rm; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_fb_dtor; + rm->sysmem.flush_page_init = hw->sysmem.flush_page_init; + rm->vidmem.size = hw->vidmem.size; + rm->ram_new = r535_fb_ram_new; + + ret = nvkm_fb_new_(rm, device, type, inst, pfb); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c index 5c34416cb6..c826980bf7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c @@ -88,12 +88,20 @@ nvkm_vram_dtor(struct nvkm_memory *memory) struct nvkm_vram *vram = nvkm_vram(memory); struct nvkm_mm_node *next = vram->mn; struct nvkm_mm_node *node; - mutex_lock(&vram->ram->mutex); - while ((node = next)) { - next = node->next; - nvkm_mm_free(&vram->ram->vram, &node); + + if (next) { + if (likely(next->nl_entry.next)){ + mutex_lock(&vram->ram->mutex); + while ((node = next)) { + next = node->next; + nvkm_mm_free(&vram->ram->vram, &node); + } + mutex_unlock(&vram->ram->mutex); + } else { + kfree(vram->mn); + } } - mutex_unlock(&vram->ram->mutex); + return vram; } @@ -108,6 +116,34 @@ nvkm_vram = { .kmap = nvkm_vram_kmap, }; +int +nvkm_ram_wrap(struct nvkm_device *device, u64 addr, u64 size, + struct nvkm_memory **pmemory) +{ + struct nvkm_ram *ram; + struct nvkm_vram *vram; + + if (!device->fb || !(ram = device->fb->ram)) + return -ENODEV; + ram = device->fb->ram; + + if (!(vram = kzalloc(sizeof(*vram), GFP_KERNEL))) + return -ENOMEM; + + nvkm_memory_ctor(&nvkm_vram, &vram->memory); + vram->ram = ram; + vram->page = NVKM_RAM_MM_SHIFT; + *pmemory = &vram->memory; + + vram->mn = kzalloc(sizeof(*vram->mn), GFP_KERNEL); + if (!vram->mn) + return -ENOMEM; + + vram->mn->offset = addr >> NVKM_RAM_MM_SHIFT; + vram->mn->length = size >> NVKM_RAM_MM_SHIFT; + return 0; +} + int nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size, bool contig, bool back, struct nvkm_memory **pmemory) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c index bcc23d4c81..f7d2a749ce 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c @@ -22,6 +22,8 @@ #include "gf100.h" #include "ram.h" +#include + bool tu102_fb_vpr_scrub_required(struct nvkm_fb *fb) { @@ -46,6 +48,9 @@ tu102_fb = { int tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { + if (nvkm_gsp_rm(device->gsp)) + return r535_fb_new(&tu102_fb, device, type, inst, pfb); + return gf100_fb_new_(&tu102_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c index 7dc99492f5..d621edbdff 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c @@ -23,6 +23,8 @@ */ #include "priv.h" +#include + static u32 gm107_fuse_read(struct nvkm_fuse *fuse, u32 addr) { @@ -39,5 +41,8 @@ int gm107_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fuse **pfuse) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_fuse_new_(&gm107_fuse, device, type, inst, pfuse); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c index 4a96f926b6..4dbffae21d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include + static void ga102_gpio_reset(struct nvkm_gpio *gpio, u8 match) { @@ -115,5 +117,8 @@ int ga102_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gpio **pgpio) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_gpio_new_(&ga102_gpio, device, type, inst, pgpio); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c index c0e4cdb455..5f7063d557 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c @@ -23,6 +23,8 @@ */ #include "priv.h" +#include + static void gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo) { @@ -71,5 +73,8 @@ int gk104_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gpio **pgpio) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_gpio_new_(&gk104_gpio, device, type, inst, pgpio); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild index 7f61a1ed15..16bf2f1bb7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild @@ -1,4 +1,12 @@ # SPDX-License-Identifier: MIT nvkm-y += nvkm/subdev/gsp/base.o +nvkm-y += nvkm/subdev/gsp/fwsec.o + nvkm-y += nvkm/subdev/gsp/gv100.o +nvkm-y += nvkm/subdev/gsp/tu102.o +nvkm-y += nvkm/subdev/gsp/tu116.o +nvkm-y += nvkm/subdev/gsp/ga100.o nvkm-y += nvkm/subdev/gsp/ga102.o +nvkm-y += nvkm/subdev/gsp/ad102.o + +nvkm-y += nvkm/subdev/gsp/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c new file mode 100644 index 0000000000..c849c6299c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c @@ -0,0 +1,57 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +static const struct nvkm_gsp_func +ad102_gsp_r535_113_01 = { + .flcn = &ga102_gsp_flcn, + .fwsec = &ga102_gsp_fwsec, + + .sig_section = ".fwsignature_ad10x", + + .wpr_heap.os_carveout_size = 20 << 20, + .wpr_heap.base_size = 8 << 20, + .wpr_heap.min_size = 84 << 20, + + .booter.ctor = ga102_gsp_booter_ctor, + + .dtor = r535_gsp_dtor, + .oneinit = tu102_gsp_oneinit, + .init = r535_gsp_init, + .fini = r535_gsp_fini, + .reset = ga102_gsp_reset, + + .rm = &r535_gsp_rm, +}; + +static struct nvkm_gsp_fwif +ad102_gsps[] = { + { 0, r535_gsp_load, &ad102_gsp_r535_113_01, "535.113.01", true }, + {} +}; + +int +ad102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) +{ + return nvkm_gsp_new_(ad102_gsps, device, type, inst, pgsp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c index 591ac95c26..da1bebb896 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -20,15 +20,74 @@ * OTHER DEALINGS IN THE SOFTWARE. */ #include "priv.h" -#include -#include -#include -#include + +int +nvkm_gsp_intr_nonstall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst) +{ + for (int i = 0; i < gsp->intr_nr; i++) { + if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) + return gsp->intr[i].nonstall; + } + + return -ENOENT; +} + +int +nvkm_gsp_intr_stall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst) +{ + for (int i = 0; i < gsp->intr_nr; i++) { + if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) { + if (gsp->intr[i].stall != ~0) + return gsp->intr[i].stall; + + return -EINVAL; + } + } + + return -ENOENT; +} + +static int +nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend) +{ + struct nvkm_gsp *gsp = nvkm_gsp(subdev); + + if (!gsp->func->fini) + return 0; + + return gsp->func->fini(gsp, suspend); +} + +static int +nvkm_gsp_init(struct nvkm_subdev *subdev) +{ + struct nvkm_gsp *gsp = nvkm_gsp(subdev); + + if (!gsp->func->init) + return 0; + + return gsp->func->init(gsp); +} + +static int +nvkm_gsp_oneinit(struct nvkm_subdev *subdev) +{ + struct nvkm_gsp *gsp = nvkm_gsp(subdev); + + if (!gsp->func->oneinit) + return 0; + + return gsp->func->oneinit(gsp); +} static void * nvkm_gsp_dtor(struct nvkm_subdev *subdev) { struct nvkm_gsp *gsp = nvkm_gsp(subdev); + + if (gsp->func && gsp->func->dtor) + gsp->func->dtor(gsp); + nvkm_falcon_dtor(&gsp->falcon); return gsp; } @@ -36,6 +95,9 @@ nvkm_gsp_dtor(struct nvkm_subdev *subdev) static const struct nvkm_subdev_func nvkm_gsp = { .dtor = nvkm_gsp_dtor, + .oneinit = nvkm_gsp_oneinit, + .init = nvkm_gsp_init, + .fini = nvkm_gsp_fini, }; int @@ -54,6 +116,8 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device, return PTR_ERR(fwif); gsp->func = fwif->func; + gsp->rm = gsp->func->rm; - return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0, &gsp->falcon); + return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0x110000, + &gsp->falcon); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c new file mode 100644 index 0000000000..330d72b1a4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c @@ -0,0 +1,359 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include +#include + +#include + +union nvfw_falcon_appif_hdr { + struct nvfw_falcon_appif_hdr_v1 { + u8 ver; + u8 hdr; + u8 len; + u8 cnt; + } v1; +}; + +union nvfw_falcon_appif { + struct nvfw_falcon_appif_v1 { +#define NVFW_FALCON_APPIF_ID_DMEMMAPPER 0x00000004 + u32 id; + u32 dmem_base; + } v1; +}; + +union nvfw_falcon_appif_dmemmapper { + struct { + u32 signature; + u16 version; + u16 size; + u32 cmd_in_buffer_offset; + u32 cmd_in_buffer_size; + u32 cmd_out_buffer_offset; + u32 cmd_out_buffer_size; + u32 nvf_img_data_buffer_offset; + u32 nvf_img_data_buffer_size; + u32 printf_buffer_hdr; + u32 ucode_build_time_stamp; + u32 ucode_signature; +#define NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS 0x00000015 +#define NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB 0x00000019 + u32 init_cmd; + u32 ucode_feature; + u32 ucode_cmd_mask0; + u32 ucode_cmd_mask1; + u32 multi_tgt_tbl; + } v3; +}; + +struct nvfw_fwsec_frts_cmd { + struct { + u32 ver; + u32 hdr; + u64 addr; + u32 size; + u32 flags; + } read_vbios; + struct { + u32 ver; + u32 hdr; + u32 addr; + u32 size; +#define NVFW_FRTS_CMD_REGION_TYPE_FB 0x00000002 + u32 type; + } frts_region; +}; + +static int +nvkm_gsp_fwsec_patch(struct nvkm_gsp *gsp, struct nvkm_falcon_fw *fw, u32 if_offset, u32 init_cmd) +{ + union nvfw_falcon_appif_hdr *hdr = (void *)(fw->fw.img + fw->dmem_base_img + if_offset); + const u8 *dmem = fw->fw.img + fw->dmem_base_img; + int i; + + if (WARN_ON(hdr->v1.ver != 1)) + return -EINVAL; + + for (i = 0; i < hdr->v1.cnt; i++) { + union nvfw_falcon_appif *app = (void *)((u8 *)hdr + hdr->v1.hdr + i * hdr->v1.len); + union nvfw_falcon_appif_dmemmapper *dmemmap; + struct nvfw_fwsec_frts_cmd *frtscmd; + + if (app->v1.id != NVFW_FALCON_APPIF_ID_DMEMMAPPER) + continue; + + dmemmap = (void *)(dmem + app->v1.dmem_base); + dmemmap->v3.init_cmd = init_cmd; + + frtscmd = (void *)(dmem + dmemmap->v3.cmd_in_buffer_offset); + + frtscmd->read_vbios.ver = 1; + frtscmd->read_vbios.hdr = sizeof(frtscmd->read_vbios); + frtscmd->read_vbios.addr = 0; + frtscmd->read_vbios.size = 0; + frtscmd->read_vbios.flags = 2; + + if (init_cmd == NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS) { + frtscmd->frts_region.ver = 1; + frtscmd->frts_region.hdr = sizeof(frtscmd->frts_region); + frtscmd->frts_region.addr = gsp->fb.wpr2.frts.addr >> 12; + frtscmd->frts_region.size = gsp->fb.wpr2.frts.size >> 12; + frtscmd->frts_region.type = NVFW_FRTS_CMD_REGION_TYPE_FB; + } + + break; + } + + if (WARN_ON(i == hdr->v1.cnt)) + return -EINVAL; + + return 0; +} + +union nvfw_falcon_ucode_desc { + struct nvkm_falcon_ucode_desc_v2 { + u32 Hdr; + u32 StoredSize; + u32 UncompressedSize; + u32 VirtualEntry; + u32 InterfaceOffset; + u32 IMEMPhysBase; + u32 IMEMLoadSize; + u32 IMEMVirtBase; + u32 IMEMSecBase; + u32 IMEMSecSize; + u32 DMEMOffset; + u32 DMEMPhysBase; + u32 DMEMLoadSize; + u32 altIMEMLoadSize; + u32 altDMEMLoadSize; + } v2; + + struct nvkm_falcon_ucode_desc_v3 { + u32 Hdr; + u32 StoredSize; + u32 PKCDataOffset; + u32 InterfaceOffset; + u32 IMEMPhysBase; + u32 IMEMLoadSize; + u32 IMEMVirtBase; + u32 DMEMPhysBase; + u32 DMEMLoadSize; + u16 EngineIdMask; + u8 UcodeId; + u8 SignatureCount; + u16 SignatureVersions; + u16 Reserved; + } v3; +}; + +static int +nvkm_gsp_fwsec_v2(struct nvkm_gsp *gsp, const char *name, + const struct nvkm_falcon_ucode_desc_v2 *desc, u32 size, u32 init_cmd, + struct nvkm_falcon_fw *fw) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + const struct firmware *bl; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_bl_desc *bld; + int ret; + + /* Build ucode. */ + ret = nvkm_falcon_fw_ctor(gsp->func->fwsec, name, subdev->device, true, + (u8 *)desc + size, desc->IMEMLoadSize + desc->DMEMLoadSize, + &gsp->falcon, fw); + if (WARN_ON(ret)) + return ret; + + fw->nmem_base_img = 0; + fw->nmem_base = desc->IMEMPhysBase; + fw->nmem_size = desc->IMEMLoadSize - desc->IMEMSecSize; + + fw->imem_base_img = 0; + fw->imem_base = desc->IMEMSecBase; + fw->imem_size = desc->IMEMSecSize; + + fw->dmem_base_img = desc->DMEMOffset; + fw->dmem_base = desc->DMEMPhysBase; + fw->dmem_size = desc->DMEMLoadSize; + + /* Bootloader. */ + ret = nvkm_firmware_get(subdev, "acr/bl", 0, &bl); + if (ret) + return ret; + + hdr = nvfw_bin_hdr(subdev, bl->data); + bld = nvfw_bl_desc(subdev, bl->data + hdr->header_offset); + + fw->boot_addr = bld->start_tag << 8; + fw->boot_size = bld->code_size; + fw->boot = kmemdup(bl->data + hdr->data_offset + bld->code_off, fw->boot_size, GFP_KERNEL); + if (!fw->boot) + ret = -ENOMEM; + + nvkm_firmware_put(bl); + + /* Patch in interface data. */ + return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd); +} + +static int +nvkm_gsp_fwsec_v3(struct nvkm_gsp *gsp, const char *name, + const struct nvkm_falcon_ucode_desc_v3 *desc, u32 size, u32 init_cmd, + struct nvkm_falcon_fw *fw) +{ + struct nvkm_device *device = gsp->subdev.device; + struct nvkm_bios *bios = device->bios; + int ret; + + /* Build ucode. */ + ret = nvkm_falcon_fw_ctor(gsp->func->fwsec, name, device, true, + (u8 *)desc + size, desc->IMEMLoadSize + desc->DMEMLoadSize, + &gsp->falcon, fw); + if (WARN_ON(ret)) + return ret; + + fw->imem_base_img = 0; + fw->imem_base = desc->IMEMPhysBase; + fw->imem_size = desc->IMEMLoadSize; + fw->dmem_base_img = desc->IMEMLoadSize; + fw->dmem_base = desc->DMEMPhysBase; + fw->dmem_size = ALIGN(desc->DMEMLoadSize, 256); + fw->dmem_sign = desc->PKCDataOffset; + fw->boot_addr = 0; + fw->fuse_ver = desc->SignatureVersions; + fw->ucode_id = desc->UcodeId; + fw->engine_id = desc->EngineIdMask; + + /* Patch in signature. */ + ret = nvkm_falcon_fw_sign(fw, fw->dmem_base_img + desc->PKCDataOffset, 96 * 4, + nvbios_pointer(bios, 0), desc->SignatureCount, + (u8 *)desc + 0x2c - (u8 *)nvbios_pointer(bios, 0), 0, 0); + if (WARN_ON(ret)) + return ret; + + /* Patch in interface data. */ + return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd); +} + +static int +nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_bios *bios = device->bios; + const union nvfw_falcon_ucode_desc *desc; + struct nvbios_pmuE flcn_ucode; + u8 idx, ver, hdr; + u32 data; + u16 size, vers; + struct nvkm_falcon_fw fw = {}; + u32 mbox0 = 0; + int ret; + + /* Lookup in VBIOS. */ + for (idx = 0; (data = nvbios_pmuEp(bios, idx, &ver, &hdr, &flcn_ucode)); idx++) { + if (flcn_ucode.type == 0x85) + break; + } + + if (WARN_ON(!data)) + return -EINVAL; + + /* Deteremine version. */ + desc = nvbios_pointer(bios, flcn_ucode.data); + if (WARN_ON(!(desc->v2.Hdr & 0x00000001))) + return -EINVAL; + + size = (desc->v2.Hdr & 0xffff0000) >> 16; + vers = (desc->v2.Hdr & 0x0000ff00) >> 8; + + switch (vers) { + case 2: ret = nvkm_gsp_fwsec_v2(gsp, name, &desc->v2, size, init_cmd, &fw); break; + case 3: ret = nvkm_gsp_fwsec_v3(gsp, name, &desc->v3, size, init_cmd, &fw); break; + default: + nvkm_error(subdev, "%s(v%d): version unknown\n", name, vers); + return -EINVAL; + } + + if (ret) { + nvkm_error(subdev, "%s(v%d): %d\n", name, vers, ret); + return ret; + } + + /* Boot. */ + ret = nvkm_falcon_fw_boot(&fw, subdev, true, &mbox0, NULL, 0, 0); + nvkm_falcon_fw_dtor(&fw); + if (ret) + return ret; + + return 0; +} + +int +nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + int ret; + u32 err; + + ret = nvkm_gsp_fwsec(gsp, "fwsec-sb", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB); + if (ret) + return ret; + + /* Verify. */ + err = nvkm_rd32(device, 0x001400 + (0xf * 4)) & 0x0000ffff; + if (err) { + nvkm_error(subdev, "fwsec-sb: 0x%04x\n", err); + return -EIO; + } + + return 0; +} + +int +nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + int ret; + u32 err, wpr2_lo, wpr2_hi; + + ret = nvkm_gsp_fwsec(gsp, "fwsec-frts", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS); + if (ret) + return ret; + + /* Verify. */ + err = nvkm_rd32(device, 0x001400 + (0xe * 4)) >> 16; + if (err) { + nvkm_error(subdev, "fwsec-frts: 0x%04x\n", err); + return -EIO; + } + + wpr2_lo = nvkm_rd32(device, 0x1fa824); + wpr2_hi = nvkm_rd32(device, 0x1fa828); + nvkm_debug(subdev, "fwsec-frts: WPR2 @ %08x - %08x\n", wpr2_lo, wpr2_hi); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c new file mode 100644 index 0000000000..223f68b532 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c @@ -0,0 +1,74 @@ +/* + * Copyright 2022 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +static const struct nvkm_falcon_func +ga100_gsp_flcn = { + .disable = gm200_flcn_disable, + .enable = gm200_flcn_enable, + .addr2 = 0x1000, + .riscv_irqmask = 0x2b4, + .reset_eng = gp102_flcn_reset_eng, + .reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing, + .bind_inst = gm200_flcn_bind_inst, + .bind_stat = gm200_flcn_bind_stat, + .bind_intr = true, + .imem_pio = &gm200_flcn_imem_pio, + .dmem_pio = &gm200_flcn_dmem_pio, + .riscv_active = tu102_flcn_riscv_active, + .intr_retrigger = ga100_flcn_intr_retrigger, +}; + +static const struct nvkm_gsp_func +ga100_gsp_r535_113_01 = { + .flcn = &ga100_gsp_flcn, + .fwsec = &tu102_gsp_fwsec, + + .sig_section = ".fwsignature_ga100", + + .wpr_heap.base_size = 8 << 20, + .wpr_heap.min_size = 64 << 20, + + .booter.ctor = tu102_gsp_booter_ctor, + + .dtor = r535_gsp_dtor, + .oneinit = tu102_gsp_oneinit, + .init = r535_gsp_init, + .fini = r535_gsp_fini, + .reset = tu102_gsp_reset, + + .rm = &r535_gsp_rm, +}; + +static struct nvkm_gsp_fwif +ga100_gsps[] = { + { 0, r535_gsp_load, &ga100_gsp_r535_113_01, "535.113.01" }, + { -1, gv100_gsp_nofw, &gv100_gsp }, + {} +}; + +int +ga100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) +{ + return nvkm_gsp_new_(ga100_gsps, device, type, inst, pgsp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c index a3996ceca9..4c4b4168a2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c @@ -21,33 +21,165 @@ */ #include "priv.h" -static const struct nvkm_falcon_func +#include +#include +#include + +int +ga102_gsp_reset(struct nvkm_gsp *gsp) +{ + int ret; + + ret = gsp->falcon.func->reset_eng(&gsp->falcon); + if (ret) + return ret; + + nvkm_falcon_mask(&gsp->falcon, 0x1668, 0x00000111, 0x00000111); + return 0; +} + +int +ga102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob, + struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + const struct nvkm_falcon_fw_func *func = &ga102_flcn_fw; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_hs_header_v2 *hshdr; + const struct nvfw_hs_load_header_v2 *lhdr; + u32 loc, sig, cnt, *meta; + int ret; + + hdr = nvfw_bin_hdr(subdev, blob->data); + hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset); + meta = (u32 *)(blob->data + hshdr->meta_data_offset); + loc = *(u32 *)(blob->data + hshdr->patch_loc); + sig = *(u32 *)(blob->data + hshdr->patch_sig); + cnt = *(u32 *)(blob->data + hshdr->num_sig); + + ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true, + blob->data + hdr->data_offset, hdr->data_size, falcon, fw); + if (ret) + goto done; + + ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data, + cnt, hshdr->sig_prod_offset + sig, 0, 0); + if (ret) + goto done; + + lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset); + + fw->imem_base_img = lhdr->app[0].offset; + fw->imem_base = 0; + fw->imem_size = lhdr->app[0].size; + + fw->dmem_base_img = lhdr->os_data_offset; + fw->dmem_base = 0; + fw->dmem_size = lhdr->os_data_size; + fw->dmem_sign = loc - lhdr->os_data_offset; + + fw->boot_addr = lhdr->app[0].offset; + + fw->fuse_ver = meta[0]; + fw->engine_id = meta[1]; + fw->ucode_id = meta[2]; + +done: + if (ret) + nvkm_falcon_fw_dtor(fw); + + return ret; +} + +static int +ga102_gsp_fwsec_signature(struct nvkm_falcon_fw *fw, u32 *src_base_src) +{ + struct nvkm_falcon *falcon = fw->falcon; + struct nvkm_device *device = falcon->owner->device; + u32 sig_fuse_version = fw->fuse_ver; + u32 reg_fuse_version; + int idx = 0; + + FLCN_DBG(falcon, "brom: %08x %08x", fw->engine_id, fw->ucode_id); + FLCN_DBG(falcon, "sig_fuse_version: %08x", sig_fuse_version); + + if (fw->engine_id & 0x00000400) { + reg_fuse_version = nvkm_rd32(device, 0x8241c0 + (fw->ucode_id - 1) * 4); + } else { + WARN_ON(1); + return -ENOSYS; + } + + FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version); + reg_fuse_version = BIT(fls(reg_fuse_version)); + FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version); + if (!(reg_fuse_version & fw->fuse_ver)) + return -EINVAL; + + while (!(reg_fuse_version & sig_fuse_version & 1)) { + idx += (sig_fuse_version & 1); + reg_fuse_version >>= 1; + sig_fuse_version >>= 1; + } + + return idx; +} + +const struct nvkm_falcon_fw_func +ga102_gsp_fwsec = { + .signature = ga102_gsp_fwsec_signature, + .reset = gm200_flcn_fw_reset, + .load = ga102_flcn_fw_load, + .boot = ga102_flcn_fw_boot, +}; + +const struct nvkm_falcon_func ga102_gsp_flcn = { .disable = gm200_flcn_disable, .enable = gm200_flcn_enable, .select = ga102_flcn_select, .addr2 = 0x1000, + .riscv_irqmask = 0x528, .reset_eng = gp102_flcn_reset_eng, .reset_prep = ga102_flcn_reset_prep, .reset_wait_mem_scrubbing = ga102_flcn_reset_wait_mem_scrubbing, .imem_dma = &ga102_flcn_dma, .dmem_dma = &ga102_flcn_dma, + .riscv_active = ga102_flcn_riscv_active, + .intr_retrigger = ga100_flcn_intr_retrigger, }; static const struct nvkm_gsp_func -ga102_gsp = { +ga102_gsp_r535_113_01 = { .flcn = &ga102_gsp_flcn, + .fwsec = &ga102_gsp_fwsec, + + .sig_section = ".fwsignature_ga10x", + + .wpr_heap.os_carveout_size = 20 << 20, + .wpr_heap.base_size = 8 << 20, + .wpr_heap.min_size = 84 << 20, + + .booter.ctor = ga102_gsp_booter_ctor, + + .dtor = r535_gsp_dtor, + .oneinit = tu102_gsp_oneinit, + .init = r535_gsp_init, + .fini = r535_gsp_fini, + .reset = ga102_gsp_reset, + + .rm = &r535_gsp_rm, }; -static int -ga102_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) -{ - return 0; -} +static const struct nvkm_gsp_func +ga102_gsp = { + .flcn = &ga102_gsp_flcn, +}; static struct nvkm_gsp_fwif ga102_gsps[] = { - { -1, ga102_gsp_nofw, &ga102_gsp }, + { 0, r535_gsp_load, &ga102_gsp_r535_113_01, "535.113.01" }, + { -1, gv100_gsp_nofw, &ga102_gsp }, {} }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c index da6a809cd3..62d9289bca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c @@ -34,12 +34,12 @@ gv100_gsp_flcn = { .dmem_pio = &gm200_flcn_dmem_pio, }; -static const struct nvkm_gsp_func +const struct nvkm_gsp_func gv100_gsp = { .flcn = &gv100_gsp_flcn, }; -static int +int gv100_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) { return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h index 89749a4020..9f4a62375a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -4,16 +4,67 @@ #include enum nvkm_acr_lsf_id; -struct nvkm_gsp_func { - const struct nvkm_falcon_func *flcn; -}; +int nvkm_gsp_fwsec_frts(struct nvkm_gsp *); +int nvkm_gsp_fwsec_sb(struct nvkm_gsp *); struct nvkm_gsp_fwif { int version; int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *); const struct nvkm_gsp_func *func; + const char *ver; + bool enable; }; +int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); +int r535_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); + +struct nvkm_gsp_func { + const struct nvkm_falcon_func *flcn; + const struct nvkm_falcon_fw_func *fwsec; + + char *sig_section; + + struct { + u32 os_carveout_size; + u32 base_size; + u64 min_size; + } wpr_heap; + + struct { + int (*ctor)(struct nvkm_gsp *, const char *name, const struct firmware *, + struct nvkm_falcon *, struct nvkm_falcon_fw *); + } booter; + + void (*dtor)(struct nvkm_gsp *); + int (*oneinit)(struct nvkm_gsp *); + int (*init)(struct nvkm_gsp *); + int (*fini)(struct nvkm_gsp *, bool suspend); + int (*reset)(struct nvkm_gsp *); + + const struct nvkm_gsp_rm *rm; +}; + +extern const struct nvkm_falcon_func tu102_gsp_flcn; +extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec; +int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *, + struct nvkm_falcon *, struct nvkm_falcon_fw *); +int tu102_gsp_oneinit(struct nvkm_gsp *); +int tu102_gsp_reset(struct nvkm_gsp *); + +extern const struct nvkm_falcon_func ga102_gsp_flcn; +extern const struct nvkm_falcon_fw_func ga102_gsp_fwsec; +int ga102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *, + struct nvkm_falcon *, struct nvkm_falcon_fw *); +int ga102_gsp_reset(struct nvkm_gsp *); + +void r535_gsp_dtor(struct nvkm_gsp *); +int r535_gsp_oneinit(struct nvkm_gsp *); +int r535_gsp_init(struct nvkm_gsp *); +int r535_gsp_fini(struct nvkm_gsp *, bool suspend); +extern const struct nvkm_gsp_rm r535_gsp_rm; + int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); + +extern const struct nvkm_gsp_func gv100_gsp; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c new file mode 100644 index 0000000000..a41735ab60 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -0,0 +1,2355 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE +#define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16 + +struct r535_gsp_msg { + u8 auth_tag_buffer[16]; + u8 aad_buffer[16]; + u32 checksum; + u32 sequence; + u32 elem_count; + u32 pad; + u8 data[]; +}; + +#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data) + +static int +r535_rpc_status_to_errno(uint32_t rpc_status) +{ + switch (rpc_status) { + case 0x55: /* NV_ERR_NOT_READY */ + case 0x66: /* NV_ERR_TIMEOUT_RETRY */ + return -EAGAIN; + case 0x51: /* NV_ERR_NO_MEMORY */ + return -ENOMEM; + default: + return -EINVAL; + } +} + +static void * +r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime) +{ + struct r535_gsp_msg *mqe; + u32 size, rptr = *gsp->msgq.rptr; + int used; + u8 *msg; + u32 len; + + size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + repc, GSP_PAGE_SIZE); + if (WARN_ON(!size || size >= gsp->msgq.cnt)) + return ERR_PTR(-EINVAL); + + do { + u32 wptr = *gsp->msgq.wptr; + + used = wptr + gsp->msgq.cnt - rptr; + if (used >= gsp->msgq.cnt) + used -= gsp->msgq.cnt; + if (used >= size) + break; + + usleep_range(1, 2); + } while (--(*ptime)); + + if (WARN_ON(!*ptime)) + return ERR_PTR(-ETIMEDOUT); + + mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + rptr * 0x1000); + + if (prepc) { + *prepc = (used * GSP_PAGE_SIZE) - sizeof(*mqe); + return mqe->data; + } + + msg = kvmalloc(repc, GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe); + len = min_t(u32, repc, len); + memcpy(msg, mqe->data, len); + + rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE); + if (rptr == gsp->msgq.cnt) + rptr = 0; + + repc -= len; + + if (repc) { + mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000); + memcpy(msg + len, mqe, repc); + + rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE); + } + + mb(); + (*gsp->msgq.rptr) = rptr; + return msg; +} + +static void * +r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 repc, int *ptime) +{ + return r535_gsp_msgq_wait(gsp, repc, NULL, ptime); +} + +static int +r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv) +{ + struct r535_gsp_msg *cmd = container_of(argv, typeof(*cmd), data); + struct r535_gsp_msg *cqe; + u32 argc = cmd->checksum; + u64 *ptr = (void *)cmd; + u64 *end; + u64 csum = 0; + int free, time = 1000000; + u32 wptr, size; + u32 off = 0; + + argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE); + + end = (u64 *)((char *)ptr + argc); + cmd->pad = 0; + cmd->checksum = 0; + cmd->sequence = gsp->cmdq.seq++; + cmd->elem_count = DIV_ROUND_UP(argc, 0x1000); + + while (ptr < end) + csum ^= *ptr++; + + cmd->checksum = upper_32_bits(csum) ^ lower_32_bits(csum); + + wptr = *gsp->cmdq.wptr; + do { + do { + free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1; + if (free >= gsp->cmdq.cnt) + free -= gsp->cmdq.cnt; + if (free >= 1) + break; + + usleep_range(1, 2); + } while(--time); + + if (WARN_ON(!time)) { + kvfree(cmd); + return -ETIMEDOUT; + } + + cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000); + size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE); + memcpy(cqe, (u8 *)cmd + off, size); + + wptr += DIV_ROUND_UP(size, 0x1000); + if (wptr == gsp->cmdq.cnt) + wptr = 0; + + off += size; + argc -= size; + } while(argc); + + nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr); + wmb(); + (*gsp->cmdq.wptr) = wptr; + mb(); + + nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000); + + kvfree(cmd); + return 0; +} + +static void * +r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 argc) +{ + struct r535_gsp_msg *cmd; + u32 size = GSP_MSG_HDR_SIZE + argc; + + size = ALIGN(size, GSP_MSG_MIN_SIZE); + cmd = kvzalloc(size, GFP_KERNEL); + if (!cmd) + return ERR_PTR(-ENOMEM); + + cmd->checksum = argc; + return cmd->data; +} + +struct nvfw_gsp_rpc { + u32 header_version; + u32 signature; + u32 length; + u32 function; + u32 rpc_result; + u32 rpc_result_private; + u32 sequence; + union { + u32 spare; + u32 cpuRmGfid; + }; + u8 data[]; +}; + +static void +r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg) +{ + kvfree(msg); +} + +static void +r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl) +{ + if (gsp->subdev.debug >= lvl) { + nvkm_printk__(&gsp->subdev, lvl, info, + "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n", + msg->function, msg->length, msg->length - sizeof(*msg), + msg->rpc_result, msg->rpc_result_private); + print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1, + msg->data, msg->length - sizeof(*msg), true); + } +} + +static struct nvfw_gsp_rpc * +r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 repc) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvfw_gsp_rpc *msg; + int time = 4000000, i; + u32 size; + +retry: + msg = r535_gsp_msgq_wait(gsp, sizeof(*msg), &size, &time); + if (IS_ERR_OR_NULL(msg)) + return msg; + + msg = r535_gsp_msgq_recv(gsp, msg->length, &time); + if (IS_ERR_OR_NULL(msg)) + return msg; + + if (msg->rpc_result) { + r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR); + r535_gsp_msg_done(gsp, msg); + return ERR_PTR(-EINVAL); + } + + r535_gsp_msg_dump(gsp, msg, NV_DBG_TRACE); + + if (fn && msg->function == fn) { + if (repc) { + if (msg->length < sizeof(*msg) + repc) { + nvkm_error(subdev, "msg len %d < %zd\n", + msg->length, sizeof(*msg) + repc); + r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR); + r535_gsp_msg_done(gsp, msg); + return ERR_PTR(-EIO); + } + + return msg; + } + + r535_gsp_msg_done(gsp, msg); + return NULL; + } + + for (i = 0; i < gsp->msgq.ntfy_nr; i++) { + struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i]; + + if (ntfy->fn == msg->function) { + if (ntfy->func) + ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg)); + break; + } + } + + if (i == gsp->msgq.ntfy_nr) + r535_gsp_msg_dump(gsp, msg, NV_DBG_WARN); + + r535_gsp_msg_done(gsp, msg); + if (fn) + goto retry; + + if (*gsp->msgq.rptr != *gsp->msgq.wptr) + goto retry; + + return NULL; +} + +static int +r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv) +{ + int ret = 0; + + mutex_lock(&gsp->msgq.mutex); + if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) { + ret = -ENOSPC; + } else { + gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn; + gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func; + gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv; + gsp->msgq.ntfy_nr++; + } + mutex_unlock(&gsp->msgq.mutex); + return ret; +} + +static int +r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn) +{ + void *repv; + + mutex_lock(&gsp->cmdq.mutex); + repv = r535_gsp_msg_recv(gsp, fn, 0); + mutex_unlock(&gsp->cmdq.mutex); + if (IS_ERR(repv)) + return PTR_ERR(repv); + + return 0; +} + +static void * +r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) +{ + struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data); + struct nvfw_gsp_rpc *msg; + u32 fn = rpc->function; + void *repv = NULL; + int ret; + + if (gsp->subdev.debug >= NV_DBG_TRACE) { + nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function, + rpc->length, rpc->length - sizeof(*rpc)); + print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1, + rpc->data, rpc->length - sizeof(*rpc), true); + } + + ret = r535_gsp_cmdq_push(gsp, rpc); + if (ret) + return ERR_PTR(ret); + + if (wait) { + msg = r535_gsp_msg_recv(gsp, fn, repc); + if (!IS_ERR_OR_NULL(msg)) + repv = msg->data; + else + repv = msg; + } + + return repv; +} + +static void +r535_gsp_event_dtor(struct nvkm_gsp_event *event) +{ + struct nvkm_gsp_device *device = event->device; + struct nvkm_gsp_client *client = device->object.client; + struct nvkm_gsp *gsp = client->gsp; + + mutex_lock(&gsp->client_id.mutex); + if (event->func) { + list_del(&event->head); + event->func = NULL; + } + mutex_unlock(&gsp->client_id.mutex); + + nvkm_gsp_rm_free(&event->object); + event->device = NULL; +} + +static int +r535_gsp_device_event_get(struct nvkm_gsp_event *event) +{ + struct nvkm_gsp_device *device = event->device; + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->event = event->id; + ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl); +} + +static int +r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id, + nvkm_gsp_event_func func, struct nvkm_gsp_event *event) +{ + struct nvkm_gsp_client *client = device->object.client; + struct nvkm_gsp *gsp = client->gsp; + NV0005_ALLOC_PARAMETERS *args; + int ret; + + args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle, + NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args), + &event->object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->hParentClient = client->object.handle; + args->hSrcResource = 0; + args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX; + args->notifyIndex = NV01_EVENT_CLIENT_RM | id; + args->data = NULL; + + ret = nvkm_gsp_rm_alloc_wr(&event->object, args); + if (ret) + return ret; + + event->device = device; + event->id = id; + + ret = r535_gsp_device_event_get(event); + if (ret) { + nvkm_gsp_event_dtor(event); + return ret; + } + + mutex_lock(&gsp->client_id.mutex); + event->func = func; + list_add(&event->head, &client->events); + mutex_unlock(&gsp->client_id.mutex); + return 0; +} + +static void +r535_gsp_device_dtor(struct nvkm_gsp_device *device) +{ + nvkm_gsp_rm_free(&device->subdevice); + nvkm_gsp_rm_free(&device->object); +} + +static int +r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device) +{ + NV2080_ALLOC_PARAMETERS *args; + + return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args), + &device->subdevice); +} + +static int +r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device) +{ + NV0080_ALLOC_PARAMETERS *args; + int ret; + + args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args), + &device->object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->hClientShare = client->object.handle; + + ret = nvkm_gsp_rm_alloc_wr(&device->object, args); + if (ret) + return ret; + + ret = r535_gsp_subdevice_ctor(device); + if (ret) + nvkm_gsp_rm_free(&device->object); + + return ret; +} + +static void +r535_gsp_client_dtor(struct nvkm_gsp_client *client) +{ + struct nvkm_gsp *gsp = client->gsp; + + nvkm_gsp_rm_free(&client->object); + + mutex_lock(&gsp->client_id.mutex); + idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff); + mutex_unlock(&gsp->client_id.mutex); + + client->gsp = NULL; +} + +static int +r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) +{ + NV0000_ALLOC_PARAMETERS *args; + int ret; + + mutex_lock(&gsp->client_id.mutex); + ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL); + mutex_unlock(&gsp->client_id.mutex); + if (ret < 0) + return ret; + + client->gsp = gsp; + client->object.client = client; + INIT_LIST_HEAD(&client->events); + + args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args), + &client->object); + if (IS_ERR(args)) { + r535_gsp_client_dtor(client); + return ret; + } + + args->hClient = client->object.handle; + args->processID = ~0; + + ret = nvkm_gsp_rm_alloc_wr(&client->object, args); + if (ret) { + r535_gsp_client_dtor(client); + return ret; + } + + return 0; +} + +static int +r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object) +{ + struct nvkm_gsp_client *client = object->client; + struct nvkm_gsp *gsp = client->gsp; + rpc_free_v03_00 *rpc; + + nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n", + client->object.handle, object->handle); + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc)); + if (WARN_ON(IS_ERR_OR_NULL(rpc))) + return -EIO; + + rpc->params.hRoot = client->object.handle; + rpc->params.hObjectParent = 0; + rpc->params.hObjectOld = object->handle; + return nvkm_gsp_rpc_wr(gsp, rpc, true); +} + +static void +r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *repv) +{ + rpc_gsp_rm_alloc_v03_00 *rpc = container_of(repv, typeof(*rpc), params); + + nvkm_gsp_rpc_done(object->client->gsp, rpc); +} + +static void * +r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc) +{ + rpc_gsp_rm_alloc_v03_00 *rpc = container_of(argv, typeof(*rpc), params); + struct nvkm_gsp *gsp = object->client->gsp; + void *ret; + + rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc) + repc); + if (IS_ERR_OR_NULL(rpc)) + return rpc; + + if (rpc->status) { + ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status)); + if (PTR_ERR(ret) != -EAGAIN) + nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status); + } else { + ret = repc ? rpc->params : NULL; + } + + nvkm_gsp_rpc_done(gsp, rpc); + + return ret; +} + +static void * +r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, u32 argc) +{ + struct nvkm_gsp_client *client = object->client; + struct nvkm_gsp *gsp = client->gsp; + rpc_gsp_rm_alloc_v03_00 *rpc; + + nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x cls:0x%08x argc:%d\n", + client->object.handle, object->parent->handle, object->handle, oclass, argc); + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, sizeof(*rpc) + argc); + if (IS_ERR(rpc)) + return rpc; + + rpc->hClient = client->object.handle; + rpc->hParent = object->parent->handle; + rpc->hObject = object->handle; + rpc->hClass = oclass; + rpc->status = 0; + rpc->paramsSize = argc; + return rpc->params; +} + +static void +r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv) +{ + rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params); + + if (!repv) + return; + nvkm_gsp_rpc_done(object->client->gsp, rpc); +} + +static int +r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc) +{ + rpc_gsp_rm_control_v03_00 *rpc = container_of((*argv), typeof(*rpc), params); + struct nvkm_gsp *gsp = object->client->gsp; + int ret = 0; + + rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc); + if (IS_ERR_OR_NULL(rpc)) { + *argv = NULL; + return PTR_ERR(rpc); + } + + if (rpc->status) { + ret = r535_rpc_status_to_errno(rpc->status); + if (ret != -EAGAIN) + nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n", + object->client->object.handle, object->handle, rpc->cmd, rpc->status); + } + + if (repc) + *argv = rpc->params; + else + nvkm_gsp_rpc_done(gsp, rpc); + + return ret; +} + +static void * +r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc) +{ + struct nvkm_gsp_client *client = object->client; + struct nvkm_gsp *gsp = client->gsp; + rpc_gsp_rm_control_v03_00 *rpc; + + nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x argc:%d\n", + client->object.handle, object->handle, cmd, argc); + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, sizeof(*rpc) + argc); + if (IS_ERR(rpc)) + return rpc; + + rpc->hClient = client->object.handle; + rpc->hObject = object->handle; + rpc->cmd = cmd; + rpc->status = 0; + rpc->paramsSize = argc; + return rpc->params; +} + +static void +r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv) +{ + struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data); + + r535_gsp_msg_done(gsp, rpc); +} + +static void * +r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc) +{ + struct nvfw_gsp_rpc *rpc; + + rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64))); + if (IS_ERR(rpc)) + return ERR_CAST(rpc); + + rpc->header_version = 0x03000000; + rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V'; + rpc->function = fn; + rpc->rpc_result = 0xffffffff; + rpc->rpc_result_private = 0xffffffff; + rpc->length = sizeof(*rpc) + argc; + return rpc->data; +} + +static void * +r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) +{ + struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data); + struct r535_gsp_msg *cmd = container_of((void *)rpc, typeof(*cmd), data); + const u32 max_msg_size = (16 * 0x1000) - sizeof(struct r535_gsp_msg); + const u32 max_rpc_size = max_msg_size - sizeof(*rpc); + u32 rpc_size = rpc->length - sizeof(*rpc); + void *repv; + + mutex_lock(&gsp->cmdq.mutex); + if (rpc_size > max_rpc_size) { + const u32 fn = rpc->function; + + /* Adjust length, and send initial RPC. */ + rpc->length = sizeof(*rpc) + max_rpc_size; + cmd->checksum = rpc->length; + + repv = r535_gsp_rpc_send(gsp, argv, false, 0); + if (IS_ERR(repv)) + goto done; + + argv += max_rpc_size; + rpc_size -= max_rpc_size; + + /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */ + while (rpc_size) { + u32 size = min(rpc_size, max_rpc_size); + void *next; + + next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size); + if (IS_ERR(next)) { + repv = next; + goto done; + } + + memcpy(next, argv, size); + + repv = r535_gsp_rpc_send(gsp, next, false, 0); + if (IS_ERR(repv)) + goto done; + + argv += size; + rpc_size -= size; + } + + /* Wait for reply. */ + if (wait) { + rpc = r535_gsp_msg_recv(gsp, fn, repc); + if (!IS_ERR_OR_NULL(rpc)) + repv = rpc->data; + else + repv = rpc; + } else { + repv = NULL; + } + } else { + repv = r535_gsp_rpc_send(gsp, argv, wait, repc); + } + +done: + mutex_unlock(&gsp->cmdq.mutex); + return repv; +} + +const struct nvkm_gsp_rm +r535_gsp_rm = { + .rpc_get = r535_gsp_rpc_get, + .rpc_push = r535_gsp_rpc_push, + .rpc_done = r535_gsp_rpc_done, + + .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get, + .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push, + .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done, + + .rm_alloc_get = r535_gsp_rpc_rm_alloc_get, + .rm_alloc_push = r535_gsp_rpc_rm_alloc_push, + .rm_alloc_done = r535_gsp_rpc_rm_alloc_done, + + .rm_free = r535_gsp_rpc_rm_free, + + .client_ctor = r535_gsp_client_ctor, + .client_dtor = r535_gsp_client_dtor, + + .device_ctor = r535_gsp_device_ctor, + .device_dtor = r535_gsp_device_dtor, + + .event_ctor = r535_gsp_device_event_ctor, + .event_dtor = r535_gsp_event_dtor, +}; + +static void +r535_gsp_msgq_work(struct work_struct *work) +{ + struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work); + + mutex_lock(&gsp->cmdq.mutex); + if (*gsp->msgq.rptr != *gsp->msgq.wptr) + r535_gsp_msg_recv(gsp, 0, 0); + mutex_unlock(&gsp->cmdq.mutex); +} + +static irqreturn_t +r535_gsp_intr(struct nvkm_inth *inth) +{ + struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth); + struct nvkm_subdev *subdev = &gsp->subdev; + u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008); + u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 + + gsp->falcon.func->riscv_irqmask); + u32 stat = intr & inte; + + if (!stat) { + nvkm_debug(subdev, "inte %08x %08x\n", intr, inte); + return IRQ_NONE; + } + + if (stat & 0x00000040) { + nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040); + schedule_work(&gsp->msgq.work); + stat &= ~0x00000040; + } + + if (stat) { + nvkm_error(subdev, "intr %08x\n", stat); + nvkm_falcon_wr32(&gsp->falcon, 0x014, stat); + nvkm_falcon_wr32(&gsp->falcon, 0x004, stat); + } + + nvkm_falcon_intr_retrigger(&gsp->falcon); + return IRQ_HANDLED; +} + +static int +r535_gsp_intr_get_table(struct nvkm_gsp *gsp) +{ + NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl; + int ret = 0; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl)); + if (WARN_ON(ret)) { + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return ret; + } + + for (unsigned i = 0; i < ctrl->tableLen; i++) { + enum nvkm_subdev_type type; + int inst; + + nvkm_debug(&gsp->subdev, + "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i, + ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask, + ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall); + + switch (ctrl->table[i].engineIdx) { + case MC_ENGINE_IDX_GSP: + type = NVKM_SUBDEV_GSP; + inst = 0; + break; + case MC_ENGINE_IDX_DISP: + type = NVKM_ENGINE_DISP; + inst = 0; + break; + case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9: + type = NVKM_ENGINE_CE; + inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0; + break; + case MC_ENGINE_IDX_GR0: + type = NVKM_ENGINE_GR; + inst = 0; + break; + case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7: + type = NVKM_ENGINE_NVDEC; + inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0; + break; + case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2: + type = NVKM_ENGINE_NVENC; + inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC; + break; + case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7: + type = NVKM_ENGINE_NVJPG; + inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0; + break; + case MC_ENGINE_IDX_OFA0: + type = NVKM_ENGINE_OFA; + inst = 0; + break; + default: + continue; + } + + if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) { + ret = -ENOSPC; + break; + } + + gsp->intr[gsp->intr_nr].type = type; + gsp->intr[gsp->intr_nr].inst = inst; + gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall; + gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall; + gsp->intr_nr++; + } + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return ret; +} + +static int +r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) +{ + GspStaticConfigInfo *rpc; + int last_usable = -1; + + rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + gsp->internal.client.object.client = &gsp->internal.client; + gsp->internal.client.object.parent = NULL; + gsp->internal.client.object.handle = rpc->hInternalClient; + gsp->internal.client.gsp = gsp; + + gsp->internal.device.object.client = &gsp->internal.client; + gsp->internal.device.object.parent = &gsp->internal.client.object; + gsp->internal.device.object.handle = rpc->hInternalDevice; + + gsp->internal.device.subdevice.client = &gsp->internal.client; + gsp->internal.device.subdevice.parent = &gsp->internal.device.object; + gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; + + gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; + gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; + + for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) { + NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = + &rpc->fbRegionInfoParams.fbRegion[i]; + + nvkm_debug(&gsp->subdev, "fb region %d: " + "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i, + reg->base, reg->limit, reg->reserved, reg->performance, + reg->supportCompressed, reg->supportISO, reg->bProtected); + + if (!reg->reserved && !reg->bProtected) { + if (reg->supportCompressed && reg->supportISO && + !WARN_ON_ONCE(gsp->fb.region_nr >= ARRAY_SIZE(gsp->fb.region))) { + const u64 size = (reg->limit + 1) - reg->base; + + gsp->fb.region[gsp->fb.region_nr].addr = reg->base; + gsp->fb.region[gsp->fb.region_nr].size = size; + gsp->fb.region_nr++; + } + + last_usable = i; + } + } + + if (last_usable >= 0) { + u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1; + + gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base; + } + + for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) { + if (rpc->gpcInfo.gpcMask & BIT(gpc)) { + gsp->gr.tpcs += hweight32(rpc->tpcInfo[gpc].tpcMask); + gsp->gr.gpcs++; + } + } + + nvkm_gsp_rpc_done(gsp, rpc); + return 0; +} + +static void +nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem) +{ + if (mem->data) { + /* + * Poison the buffer to catch any unexpected access from + * GSP-RM if the buffer was prematurely freed. + */ + memset(mem->data, 0xFF, mem->size); + + dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr); + memset(mem, 0, sizeof(*mem)); + } +} + +static int +nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem) +{ + mem->size = size; + mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); + if (WARN_ON(!mem->data)) + return -ENOMEM; + + return 0; +} + +static int +r535_gsp_postinit(struct nvkm_gsp *gsp) +{ + struct nvkm_device *device = gsp->subdev.device; + int ret; + + ret = r535_gsp_rpc_get_gsp_static_info(gsp); + if (WARN_ON(ret)) + return ret; + + INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work); + + ret = r535_gsp_intr_get_table(gsp); + if (WARN_ON(ret)) + return ret; + + ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst); + if (WARN_ON(ret < 0)) + return ret; + + ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev, + r535_gsp_intr, &gsp->subdev.inth); + if (WARN_ON(ret)) + return ret; + + nvkm_inth_allow(&gsp->subdev.inth); + nvkm_wr32(device, 0x110004, 0x00000040); + + /* Release the DMA buffers that were needed only for boot and init */ + nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw); + nvkm_gsp_mem_dtor(gsp, &gsp->libos); + nvkm_gsp_mem_dtor(gsp, &gsp->rmargs); + nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta); + + return ret; +} + +static int +r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend) +{ + rpc_unloading_guest_driver_v1F_07 *rpc; + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(*rpc)); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + if (suspend) { + rpc->bInPMTransition = 1; + rpc->bGc6Entering = 0; + rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; + } else { + rpc->bInPMTransition = 0; + rpc->bGc6Entering = 0; + rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0; + } + + return nvkm_gsp_rpc_wr(gsp, rpc, true); +} + +/* dword only */ +struct nv_gsp_registry_entries { + const char *name; + u32 value; +}; + +static const struct nv_gsp_registry_entries r535_registry_entries[] = { + { "RMSecBusResetEnable", 1 }, + { "RMForcePcieConfigSave", 1 }, +}; +#define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries) + +static int +r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) +{ + PACKED_REGISTRY_TABLE *rpc; + char *strings; + int str_offset; + int i; + size_t rpc_size = struct_size(rpc, entries, NV_GSP_REG_NUM_ENTRIES); + + /* add strings + null terminator */ + for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) + rpc_size += strlen(r535_registry_entries[i].name) + 1; + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, rpc_size); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + rpc->numEntries = NV_GSP_REG_NUM_ENTRIES; + + str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]); + strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES]; + for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) { + int name_len = strlen(r535_registry_entries[i].name) + 1; + + rpc->entries[i].nameOffset = str_offset; + rpc->entries[i].type = 1; + rpc->entries[i].data = r535_registry_entries[i].value; + rpc->entries[i].length = 4; + memcpy(strings, r535_registry_entries[i].name, name_len); + strings += name_len; + str_offset += name_len; + } + rpc->size = str_offset; + + return nvkm_gsp_rpc_wr(gsp, rpc, false); +} + +#if defined(CONFIG_ACPI) && defined(CONFIG_X86) +static void +r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) +{ + const guid_t NVOP_DSM_GUID = + GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B, + 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0); + u64 NVOP_DSM_REV = 0x00000100; + union acpi_object argv4 = { + .buffer.type = ACPI_TYPE_BUFFER, + .buffer.length = 4, + .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), + }, *obj; + + caps->status = 0xffff; + + if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a))) + return; + + obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4); + if (!obj) + return; + + if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || + WARN_ON(obj->buffer.length != 4)) + return; + + caps->status = 0; + caps->optimusCaps = *(u32 *)obj->buffer.pointer; + + ACPI_FREE(obj); + + kfree(argv4.buffer.pointer); +} + +static void +r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) +{ + const guid_t JT_DSM_GUID = + GUID_INIT(0xCBECA351L, 0x067B, 0x4924, + 0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34); + u64 JT_DSM_REV = 0x00000103; + u32 caps; + union acpi_object argv4 = { + .buffer.type = ACPI_TYPE_BUFFER, + .buffer.length = sizeof(caps), + .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), + }, *obj; + + jt->status = 0xffff; + + obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4); + if (!obj) + return; + + if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || + WARN_ON(obj->buffer.length != 4)) + return; + + jt->status = 0; + jt->jtCaps = *(u32 *)obj->buffer.pointer; + jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; + jt->bSBIOSCaps = 0; + + ACPI_FREE(obj); + + kfree(argv4.buffer.pointer); +} + +static void +r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode, + MUX_METHOD_DATA_ELEMENT *part) +{ + union acpi_object mux_arg = { ACPI_TYPE_INTEGER }; + struct acpi_object_list input = { 1, &mux_arg }; + acpi_handle iter = NULL, handle_mux = NULL; + acpi_status status; + unsigned long long value; + + mode->status = 0xffff; + part->status = 0xffff; + + do { + status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter); + if (ACPI_FAILURE(status) || !iter) + return; + + status = acpi_evaluate_integer(iter, "_ADR", NULL, &value); + if (ACPI_FAILURE(status) || value != id) + continue; + + handle_mux = iter; + } while (!handle_mux); + + if (!handle_mux) + return; + + /* I -think- 0 means "acquire" according to nvidia's driver source */ + input.pointer->integer.type = ACPI_TYPE_INTEGER; + input.pointer->integer.value = 0; + + status = acpi_evaluate_integer(handle_mux, "MXDM", &input, &value); + if (ACPI_SUCCESS(status)) { + mode->acpiId = id; + mode->mode = value; + mode->status = 0; + } + + status = acpi_evaluate_integer(handle_mux, "MXDS", &input, &value); + if (ACPI_SUCCESS(status)) { + part->acpiId = id; + part->mode = value; + part->status = 0; + } +} + +static void +r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux) +{ + mux->tableLen = dod->acpiIdListLen / sizeof(dod->acpiIdList[0]); + + for (int i = 0; i < mux->tableLen; i++) { + r535_gsp_acpi_mux_id(handle, dod->acpiIdList[i], &mux->acpiIdMuxModeTable[i], + &mux->acpiIdMuxPartTable[i]); + } +} + +static void +r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod) +{ + acpi_status status; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *_DOD; + + dod->status = 0xffff; + + status = acpi_evaluate_object(handle, "_DOD", NULL, &output); + if (ACPI_FAILURE(status)) + return; + + _DOD = output.pointer; + + if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) || + WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList))) + return; + + for (int i = 0; i < _DOD->package.count; i++) { + if (WARN_ON(_DOD->package.elements[i].type != ACPI_TYPE_INTEGER)) + return; + + dod->acpiIdList[i] = _DOD->package.elements[i].integer.value; + dod->acpiIdListLen += sizeof(dod->acpiIdList[0]); + } + + dod->status = 0; + kfree(output.pointer); +} +#endif + +static void +r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi) +{ +#if defined(CONFIG_ACPI) && defined(CONFIG_X86) + acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev); + + if (!handle) + return; + + acpi->bValid = 1; + + r535_gsp_acpi_dod(handle, &acpi->dodMethodData); + if (acpi->dodMethodData.status == 0) + r535_gsp_acpi_mux(handle, &acpi->dodMethodData, &acpi->muxMethodData); + + r535_gsp_acpi_jt(handle, &acpi->jtMethodData); + r535_gsp_acpi_caps(handle, &acpi->capsMethodData); +#endif +} + +static int +r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp) +{ + struct nvkm_device *device = gsp->subdev.device; + struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device); + GspSystemInfo *info; + + if (WARN_ON(device->type == NVKM_DEVICE_TEGRA)) + return -ENOSYS; + + info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info)); + if (IS_ERR(info)) + return PTR_ERR(info); + + info->gpuPhysAddr = device->func->resource_addr(device, 0); + info->gpuPhysFbAddr = device->func->resource_addr(device, 1); + info->gpuPhysInstAddr = device->func->resource_addr(device, 3); + info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev); + info->maxUserVa = TASK_SIZE; + info->pciConfigMirrorBase = 0x088000; + info->pciConfigMirrorSize = 0x001000; + r535_gsp_acpi_info(gsp, &info->acpiMethodData); + + return nvkm_gsp_rpc_wr(gsp, info, false); +} + +static int +r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc) +{ + struct nvkm_gsp *gsp = priv; + struct nvkm_subdev *subdev = &gsp->subdev; + rpc_os_error_log_v17_00 *msg = repv; + + if (WARN_ON(repc < sizeof(*msg))) + return -EINVAL; + + nvkm_error(subdev, "Xid:%d %s\n", msg->exceptType, msg->errString); + return 0; +} + +static int +r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) +{ + rpc_rc_triggered_v17_02 *msg = repv; + struct nvkm_gsp *gsp = priv; + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_chan *chan; + unsigned long flags; + + if (WARN_ON(repc < sizeof(*msg))) + return -EINVAL; + + nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n", + msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope, + msg->partitionAttributionId); + + chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags); + if (!chan) { + nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid); + return 0; + } + + nvkm_chan_error(chan, false); + nvkm_chan_put(&chan, flags); + return 0; +} + +static int +r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc) +{ + struct nvkm_gsp *gsp = priv; + struct nvkm_subdev *subdev = &gsp->subdev; + + WARN_ON(repc != 0); + + nvkm_error(subdev, "mmu fault queued\n"); + return 0; +} + +static int +r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc) +{ + struct nvkm_gsp *gsp = priv; + struct nvkm_gsp_client *client; + struct nvkm_subdev *subdev = &gsp->subdev; + rpc_post_event_v17_00 *msg = repv; + + if (WARN_ON(repc < sizeof(*msg))) + return -EINVAL; + if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize)) + return -EINVAL; + + nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n", + msg->hClient, msg->hEvent, msg->notifyIndex, msg->data, + msg->status, msg->eventDataSize, msg->bNotifyList); + + mutex_lock(&gsp->client_id.mutex); + client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff); + if (client) { + struct nvkm_gsp_event *event; + bool handled = false; + + list_for_each_entry(event, &client->events, head) { + if (event->object.handle == msg->hEvent) { + event->func(event, msg->eventData, msg->eventDataSize); + handled = true; + } + } + + if (!handled) { + nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n", + msg->hClient, msg->hEvent); + } + } else { + nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient); + } + mutex_unlock(&gsp->client_id.mutex); + return 0; +} + +/** + * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP + * + * The GSP sequencer is a list of I/O commands that the GSP can send to + * the driver to perform for various purposes. The most common usage is to + * perform a special mid-initialization reset. + */ +static int +r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) +{ + struct nvkm_gsp *gsp = priv; + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + rpc_run_cpu_sequencer_v17_00 *seq = repv; + int ptr = 0, ret; + + nvkm_debug(subdev, "seq: %08x %08x\n", seq->bufferSizeDWord, seq->cmdIndex); + + while (ptr < seq->cmdIndex) { + GSP_SEQUENCER_BUFFER_CMD *cmd = (void *)&seq->commandBuffer[ptr]; + + ptr += 1; + ptr += GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(cmd->opCode); + + switch (cmd->opCode) { + case GSP_SEQ_BUF_OPCODE_REG_WRITE: { + u32 addr = cmd->payload.regWrite.addr; + u32 data = cmd->payload.regWrite.val; + + nvkm_trace(subdev, "seq wr32 %06x %08x\n", addr, data); + nvkm_wr32(device, addr, data); + } + break; + case GSP_SEQ_BUF_OPCODE_REG_MODIFY: { + u32 addr = cmd->payload.regModify.addr; + u32 mask = cmd->payload.regModify.mask; + u32 data = cmd->payload.regModify.val; + + nvkm_trace(subdev, "seq mask %06x %08x %08x\n", addr, mask, data); + nvkm_mask(device, addr, mask, data); + } + break; + case GSP_SEQ_BUF_OPCODE_REG_POLL: { + u32 addr = cmd->payload.regPoll.addr; + u32 mask = cmd->payload.regPoll.mask; + u32 data = cmd->payload.regPoll.val; + u32 usec = cmd->payload.regPoll.timeout ?: 4000000; + //u32 error = cmd->payload.regPoll.error; + + nvkm_trace(subdev, "seq poll %06x %08x %08x %d\n", addr, mask, data, usec); + nvkm_rd32(device, addr); + nvkm_usec(device, usec, + if ((nvkm_rd32(device, addr) & mask) == data) + break; + ); + } + break; + case GSP_SEQ_BUF_OPCODE_DELAY_US: { + u32 usec = cmd->payload.delayUs.val; + + nvkm_trace(subdev, "seq usec %d\n", usec); + udelay(usec); + } + break; + case GSP_SEQ_BUF_OPCODE_REG_STORE: { + u32 addr = cmd->payload.regStore.addr; + u32 slot = cmd->payload.regStore.index; + + seq->regSaveArea[slot] = nvkm_rd32(device, addr); + nvkm_trace(subdev, "seq save %08x -> %d: %08x\n", addr, slot, + seq->regSaveArea[slot]); + } + break; + case GSP_SEQ_BUF_OPCODE_CORE_RESET: + nvkm_trace(subdev, "seq core reset\n"); + nvkm_falcon_reset(&gsp->falcon); + nvkm_falcon_mask(&gsp->falcon, 0x624, 0x00000080, 0x00000080); + nvkm_falcon_wr32(&gsp->falcon, 0x10c, 0x00000000); + break; + case GSP_SEQ_BUF_OPCODE_CORE_START: + nvkm_trace(subdev, "seq core start\n"); + if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000040) + nvkm_falcon_wr32(&gsp->falcon, 0x130, 0x00000002); + else + nvkm_falcon_wr32(&gsp->falcon, 0x100, 0x00000002); + break; + case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: + nvkm_trace(subdev, "seq core wait halt\n"); + nvkm_msec(device, 2000, + if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000010) + break; + ); + break; + case GSP_SEQ_BUF_OPCODE_CORE_RESUME: { + struct nvkm_sec2 *sec2 = device->sec2; + u32 mbox0; + + nvkm_trace(subdev, "seq core resume\n"); + + ret = gsp->func->reset(gsp); + if (WARN_ON(ret)) + return ret; + + nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); + nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); + + nvkm_falcon_start(&sec2->falcon); + + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x1180f8) & 0x04000000) + break; + ) < 0) + return -ETIMEDOUT; + + mbox0 = nvkm_falcon_rd32(&sec2->falcon, 0x040); + if (WARN_ON(mbox0)) { + nvkm_error(&gsp->subdev, "seq core resume sec2: 0x%x\n", mbox0); + return -EIO; + } + + nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); + + if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) + return -EIO; + } + break; + default: + nvkm_error(subdev, "unknown sequencer opcode %08x\n", cmd->opCode); + return -EINVAL; + } + } + + return 0; +} + +static int +r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + u32 wpr2_hi; + int ret; + + wpr2_hi = nvkm_rd32(device, 0x1fa828); + if (!wpr2_hi) { + nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n"); + return 0; + } + + ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); + if (WARN_ON(ret)) + return ret; + + wpr2_hi = nvkm_rd32(device, 0x1fa828); + if (WARN_ON(wpr2_hi)) + return -EIO; + + return 0; +} + +static int +r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) +{ + int ret; + + ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); + if (ret) + return ret; + + nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); + + if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) + return -EIO; + + return 0; +} + +static int +r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp) +{ + GspFwWprMeta *meta; + int ret; + + ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta); + if (ret) + return ret; + + meta = gsp->wpr_meta.data; + + meta->magic = GSP_FW_WPR_META_MAGIC; + meta->revision = GSP_FW_WPR_META_REVISION; + + meta->sysmemAddrOfRadix3Elf = gsp->radix3.mem[0].addr; + meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size; + + meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; + meta->sizeOfBootloader = gsp->boot.fw.size; + meta->bootloaderCodeOffset = gsp->boot.code_offset; + meta->bootloaderDataOffset = gsp->boot.data_offset; + meta->bootloaderManifestOffset = gsp->boot.manifest_offset; + + meta->sysmemAddrOfSignature = gsp->sig.addr; + meta->sizeOfSignature = gsp->sig.size; + + meta->gspFwRsvdStart = gsp->fb.heap.addr; + meta->nonWprHeapOffset = gsp->fb.heap.addr; + meta->nonWprHeapSize = gsp->fb.heap.size; + meta->gspFwWprStart = gsp->fb.wpr2.addr; + meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr; + meta->gspFwHeapSize = gsp->fb.wpr2.heap.size; + meta->gspFwOffset = gsp->fb.wpr2.elf.addr; + meta->bootBinOffset = gsp->fb.wpr2.boot.addr; + meta->frtsOffset = gsp->fb.wpr2.frts.addr; + meta->frtsSize = gsp->fb.wpr2.frts.size; + meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000); + meta->fbSize = gsp->fb.size; + meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr; + meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; + meta->bootCount = 0; + meta->partitionRpcAddr = 0; + meta->partitionRpcRequestOffset = 0; + meta->partitionRpcReplyOffset = 0; + meta->verified = 0; + return 0; +} + +static int +r535_gsp_shared_init(struct nvkm_gsp *gsp) +{ + struct { + msgqTxHeader tx; + msgqRxHeader rx; + } *cmdq, *msgq; + int ret, i; + + gsp->shm.cmdq.size = 0x40000; + gsp->shm.msgq.size = 0x40000; + + gsp->shm.ptes.nr = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT; + gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); + gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); + + ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size + + gsp->shm.cmdq.size + + gsp->shm.msgq.size, + &gsp->shm.mem); + if (ret) + return ret; + + gsp->shm.ptes.ptr = gsp->shm.mem.data; + gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size; + gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size; + + for (i = 0; i < gsp->shm.ptes.nr; i++) + gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT); + + cmdq = gsp->shm.cmdq.ptr; + cmdq->tx.version = 0; + cmdq->tx.size = gsp->shm.cmdq.size; + cmdq->tx.entryOff = GSP_PAGE_SIZE; + cmdq->tx.msgSize = GSP_PAGE_SIZE; + cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize; + cmdq->tx.writePtr = 0; + cmdq->tx.flags = 1; + cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr); + + msgq = gsp->shm.msgq.ptr; + + gsp->cmdq.cnt = cmdq->tx.msgCount; + gsp->cmdq.wptr = &cmdq->tx.writePtr; + gsp->cmdq.rptr = &msgq->rx.readPtr; + gsp->msgq.cnt = cmdq->tx.msgCount; + gsp->msgq.wptr = &msgq->tx.writePtr; + gsp->msgq.rptr = &cmdq->rx.readPtr; + return 0; +} + +static int +r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) +{ + GSP_ARGUMENTS_CACHED *args; + int ret; + + if (!resume) { + ret = r535_gsp_shared_init(gsp); + if (ret) + return ret; + + ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs); + if (ret) + return ret; + } + + args = gsp->rmargs.data; + args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr; + args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr; + args->messageQueueInitArguments.cmdQueueOffset = + (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data; + args->messageQueueInitArguments.statQueueOffset = + (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data; + + if (!resume) { + args->srInitArguments.oldLevel = 0; + args->srInitArguments.flags = 0; + args->srInitArguments.bInPMTransition = 0; + } else { + args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; + args->srInitArguments.flags = 0; + args->srInitArguments.bInPMTransition = 1; + } + + return 0; +} + +static inline u64 +r535_gsp_libos_id8(const char *name) +{ + u64 id = 0; + + for (int i = 0; i < sizeof(id) && *name; i++, name++) + id = (id << 8) | *name; + + return id; +} + +/** + * create_pte_array() - creates a PTE array of a physically contiguous buffer + * @ptes: pointer to the array + * @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned) + * @size: size of the buffer + * + * GSP-RM sometimes expects physically-contiguous buffers to have an array of + * "PTEs" for each page in that buffer. Although in theory that allows for + * the buffer to be physically discontiguous, GSP-RM does not currently + * support that. + * + * In this case, the PTEs are DMA addresses of each page of the buffer. Since + * the buffer is physically contiguous, calculating all the PTEs is simple + * math. + * + * See memdescGetPhysAddrsForGpu() + */ +static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size) +{ + unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE); + unsigned int i; + + for (i = 0; i < num_pages; i++) + ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT); +} + +/** + * r535_gsp_libos_init() -- create the libos arguments structure + * + * The logging buffers are byte queues that contain encoded printf-like + * messages from GSP-RM. They need to be decoded by a special application + * that can parse the buffers. + * + * The 'loginit' buffer contains logs from early GSP-RM init and + * exception dumps. The 'logrm' buffer contains the subsequent logs. Both are + * written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE. + * + * The physical address map for the log buffer is stored in the buffer + * itself, starting with offset 1. Offset 0 contains the "put" pointer. + * + * The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is + * configured for a larger page size (e.g. 64K pages), we need to give + * the GSP an array of 4K pages. Fortunately, since the buffer is + * physically contiguous, it's simple math to calculate the addresses. + * + * The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently + * ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the + * buffers to be physically contiguous anyway. + * + * The memory allocated for the arguments must remain until the GSP sends the + * init_done RPC. + * + * See _kgspInitLibosLoggingStructures (allocates memory for buffers) + * See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array) + */ +static int +r535_gsp_libos_init(struct nvkm_gsp *gsp) +{ + LibosMemoryRegionInitArgument *args; + int ret; + + ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos); + if (ret) + return ret; + + args = gsp->libos.data; + + ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit); + if (ret) + return ret; + + args[0].id8 = r535_gsp_libos_id8("LOGINIT"); + args[0].pa = gsp->loginit.addr; + args[0].size = gsp->loginit.size; + args[0].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; + args[0].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; + create_pte_array(gsp->loginit.data + sizeof(u64), gsp->loginit.addr, gsp->loginit.size); + + ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logintr); + if (ret) + return ret; + + args[1].id8 = r535_gsp_libos_id8("LOGINTR"); + args[1].pa = gsp->logintr.addr; + args[1].size = gsp->logintr.size; + args[1].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; + args[1].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; + create_pte_array(gsp->logintr.data + sizeof(u64), gsp->logintr.addr, gsp->logintr.size); + + ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logrm); + if (ret) + return ret; + + args[2].id8 = r535_gsp_libos_id8("LOGRM"); + args[2].pa = gsp->logrm.addr; + args[2].size = gsp->logrm.size; + args[2].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; + args[2].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; + create_pte_array(gsp->logrm.data + sizeof(u64), gsp->logrm.addr, gsp->logrm.size); + + ret = r535_gsp_rmargs_init(gsp, false); + if (ret) + return ret; + + args[3].id8 = r535_gsp_libos_id8("RMARGS"); + args[3].pa = gsp->rmargs.addr; + args[3].size = gsp->rmargs.size; + args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; + args[3].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; + return 0; +} + +void +nvkm_gsp_sg_free(struct nvkm_device *device, struct sg_table *sgt) +{ + struct scatterlist *sgl; + int i; + + dma_unmap_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); + + for_each_sgtable_sg(sgt, sgl, i) { + struct page *page = sg_page(sgl); + + __free_page(page); + } + + sg_free_table(sgt); +} + +int +nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt) +{ + const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE); + struct scatterlist *sgl; + int ret, i; + + ret = sg_alloc_table(sgt, pages, GFP_KERNEL); + if (ret) + return ret; + + for_each_sgtable_sg(sgt, sgl, i) { + struct page *page = alloc_page(GFP_KERNEL); + + if (!page) { + nvkm_gsp_sg_free(device, sgt); + return -ENOMEM; + } + + sg_set_page(sgl, page, PAGE_SIZE, 0); + } + + ret = dma_map_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); + if (ret) + nvkm_gsp_sg_free(device, sgt); + + return ret; +} + +static void +nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3) +{ + for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) + nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]); +} + +/** + * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list + * + * The GSP uses a three-level page table, called radix3, to map the firmware. + * Each 64-bit "pointer" in the table is either the bus address of an entry in + * the next table (for levels 0 and 1) or the bus address of the next page in + * the GSP firmware image itself. + * + * Level 0 contains a single entry in one page that points to the first page + * of level 1. + * + * Level 1, since it's also only one page in size, contains up to 512 entries, + * one for each page in Level 2. + * + * Level 2 can be up to 512 pages in size, and each of those entries points to + * the next page of the firmware image. Since there can be up to 512*512 + * pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB. + * + * Internally, the GSP has its window into system memory, but the base + * physical address of the aperture is not 0. In fact, it varies depending on + * the GPU architecture. Since the GPU is a PCI device, this window is + * accessed via DMA and is therefore bound by IOMMU translation. The end + * result is that GSP-RM must translate the bus addresses in the table to GSP + * physical addresses. All this should happen transparently. + * + * Returns 0 on success, or negative error code + * + * See kgspCreateRadix3_IMPL + */ +static int +nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, + struct nvkm_gsp_radix3 *rx3) +{ + u64 addr; + + for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) { + u64 *ptes; + size_t bufsize; + int ret, idx; + + bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); + ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]); + if (ret) + return ret; + + ptes = rx3->mem[i].data; + if (i == 2) { + struct scatterlist *sgl; + + for_each_sgtable_dma_sg(sgt, sgl, idx) { + for (int j = 0; j < sg_dma_len(sgl) / GSP_PAGE_SIZE; j++) + *ptes++ = sg_dma_address(sgl) + (GSP_PAGE_SIZE * j); + } + } else { + for (int j = 0; j < size / GSP_PAGE_SIZE; j++) + *ptes++ = addr + GSP_PAGE_SIZE * j; + } + + size = rx3->mem[i].size; + addr = rx3->mem[i].addr; + } + + return 0; +} + +int +r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) +{ + u32 mbox0 = 0xff, mbox1 = 0xff; + int ret; + + if (!gsp->running) + return 0; + + if (suspend) { + GspFwWprMeta *meta = gsp->wpr_meta.data; + u64 len = meta->gspFwWprEnd - meta->gspFwWprStart; + GspFwSRMeta *sr; + + ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt); + if (ret) + return ret; + + ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3); + if (ret) + return ret; + + ret = nvkm_gsp_mem_ctor(gsp, sizeof(*sr), &gsp->sr.meta); + if (ret) + return ret; + + sr = gsp->sr.meta.data; + sr->magic = GSP_FW_SR_META_MAGIC; + sr->revision = GSP_FW_SR_META_REVISION; + sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.mem[0].addr; + sr->sizeOfSuspendResumeData = len; + + mbox0 = lower_32_bits(gsp->sr.meta.addr); + mbox1 = upper_32_bits(gsp->sr.meta.addr); + } + + ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); + if (WARN_ON(ret)) + return ret; + + nvkm_msec(gsp->subdev.device, 2000, + if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000) + break; + ); + + nvkm_falcon_reset(&gsp->falcon); + + ret = nvkm_gsp_fwsec_sb(gsp); + WARN_ON(ret); + + ret = r535_gsp_booter_unload(gsp, mbox0, mbox1); + WARN_ON(ret); + + gsp->running = false; + return 0; +} + +int +r535_gsp_init(struct nvkm_gsp *gsp) +{ + u32 mbox0, mbox1; + int ret; + + if (!gsp->sr.meta.data) { + mbox0 = lower_32_bits(gsp->wpr_meta.addr); + mbox1 = upper_32_bits(gsp->wpr_meta.addr); + } else { + r535_gsp_rmargs_init(gsp, true); + + mbox0 = lower_32_bits(gsp->sr.meta.addr); + mbox1 = upper_32_bits(gsp->sr.meta.addr); + } + + /* Execute booter to handle (eventually...) booting GSP-RM. */ + ret = r535_gsp_booter_load(gsp, mbox0, mbox1); + if (WARN_ON(ret)) + goto done; + + ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE); + if (ret) + goto done; + + gsp->running = true; + +done: + if (gsp->sr.meta.data) { + nvkm_gsp_mem_dtor(gsp, &gsp->sr.meta); + nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3); + nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); + return ret; + } + + if (ret == 0) + ret = r535_gsp_postinit(gsp); + + return ret; +} + +static int +r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp) +{ + const struct firmware *fw = gsp->fws.bl; + const struct nvfw_bin_hdr *hdr; + RM_RISCV_UCODE_DESC *desc; + int ret; + + hdr = nvfw_bin_hdr(&gsp->subdev, fw->data); + desc = (void *)fw->data + hdr->header_offset; + + ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw); + if (ret) + return ret; + + memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size); + + gsp->boot.code_offset = desc->monitorCodeOffset; + gsp->boot.data_offset = desc->monitorDataOffset; + gsp->boot.manifest_offset = desc->manifestOffset; + gsp->boot.app_version = desc->appVersion; + return 0; +} + +static const struct nvkm_firmware_func +r535_gsp_fw = { + .type = NVKM_FIRMWARE_IMG_SGT, +}; + +static int +r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u64 *psize) +{ + const u8 *img = gsp->fws.rm->data; + const struct elf64_hdr *ehdr = (const struct elf64_hdr *)img; + const struct elf64_shdr *shdr = (const struct elf64_shdr *)&img[ehdr->e_shoff]; + const char *names = &img[shdr[ehdr->e_shstrndx].sh_offset]; + + for (int i = 0; i < ehdr->e_shnum; i++, shdr++) { + if (!strcmp(&names[shdr->sh_name], name)) { + *pdata = &img[shdr->sh_offset]; + *psize = shdr->sh_size; + return 0; + } + } + + nvkm_error(&gsp->subdev, "section '%s' not found\n", name); + return -ENOENT; +} + +static void +r535_gsp_dtor_fws(struct nvkm_gsp *gsp) +{ + nvkm_firmware_put(gsp->fws.bl); + gsp->fws.bl = NULL; + nvkm_firmware_put(gsp->fws.booter.unload); + gsp->fws.booter.unload = NULL; + nvkm_firmware_put(gsp->fws.booter.load); + gsp->fws.booter.load = NULL; + nvkm_firmware_put(gsp->fws.rm); + gsp->fws.rm = NULL; +} + +void +r535_gsp_dtor(struct nvkm_gsp *gsp) +{ + idr_destroy(&gsp->client_id.idr); + mutex_destroy(&gsp->client_id.mutex); + + nvkm_gsp_radix3_dtor(gsp, &gsp->radix3); + nvkm_gsp_mem_dtor(gsp, &gsp->sig); + nvkm_firmware_dtor(&gsp->fw); + + nvkm_falcon_fw_dtor(&gsp->booter.unload); + nvkm_falcon_fw_dtor(&gsp->booter.load); + + mutex_destroy(&gsp->msgq.mutex); + mutex_destroy(&gsp->cmdq.mutex); + + r535_gsp_dtor_fws(gsp); + + nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem); + nvkm_gsp_mem_dtor(gsp, &gsp->loginit); + nvkm_gsp_mem_dtor(gsp, &gsp->logintr); + nvkm_gsp_mem_dtor(gsp, &gsp->logrm); +} + +int +r535_gsp_oneinit(struct nvkm_gsp *gsp) +{ + struct nvkm_device *device = gsp->subdev.device; + const u8 *data; + u64 size; + int ret; + + mutex_init(&gsp->cmdq.mutex); + mutex_init(&gsp->msgq.mutex); + + ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load, + &device->sec2->falcon, &gsp->booter.load); + if (ret) + return ret; + + ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload, + &device->sec2->falcon, &gsp->booter.unload); + if (ret) + return ret; + + /* Load GSP firmware from ELF image into DMA-accessible memory. */ + ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size); + if (ret) + return ret; + + ret = nvkm_firmware_ctor(&r535_gsp_fw, "gsp-rm", device, data, size, &gsp->fw); + if (ret) + return ret; + + /* Load relevant signature from ELF image. */ + ret = r535_gsp_elf_section(gsp, gsp->func->sig_section, &data, &size); + if (ret) + return ret; + + ret = nvkm_gsp_mem_ctor(gsp, ALIGN(size, 256), &gsp->sig); + if (ret) + return ret; + + memcpy(gsp->sig.data, data, size); + + /* Build radix3 page table for ELF image. */ + ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); + if (ret) + return ret; + + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, + r535_gsp_msg_run_cpu_sequencer, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, + r535_gsp_msg_rc_triggered, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, + r535_gsp_msg_mmu_fault_queued, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL); + ret = r535_gsp_rm_boot_ctor(gsp); + if (ret) + return ret; + + /* Release FW images - we've copied them to DMA buffers now. */ + r535_gsp_dtor_fws(gsp); + + /* Calculate FB layout. */ + gsp->fb.wpr2.frts.size = 0x100000; + gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size; + + gsp->fb.wpr2.boot.size = gsp->boot.fw.size; + gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000); + + gsp->fb.wpr2.elf.size = gsp->fw.len; + gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000); + + { + u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30); + + gsp->fb.wpr2.heap.size = + gsp->func->wpr_heap.os_carveout_size + + gsp->func->wpr_heap.base_size + + ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) + + ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20); + + gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size); + } + + gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000); + gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000); + + gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000); + gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr; + + gsp->fb.heap.size = 0x100000; + gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size; + + ret = nvkm_gsp_fwsec_frts(gsp); + if (WARN_ON(ret)) + return ret; + + ret = r535_gsp_libos_init(gsp); + if (WARN_ON(ret)) + return ret; + + ret = r535_gsp_wpr_meta_init(gsp); + if (WARN_ON(ret)) + return ret; + + ret = r535_gsp_rpc_set_system_info(gsp); + if (WARN_ON(ret)) + return ret; + + ret = r535_gsp_rpc_set_registry(gsp); + if (WARN_ON(ret)) + return ret; + + /* Reset GSP into RISC-V mode. */ + ret = gsp->func->reset(gsp); + if (WARN_ON(ret)) + return ret; + + nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); + nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); + + mutex_init(&gsp->client_id.mutex); + idr_init(&gsp->client_id.idr); + return 0; +} + +static int +r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver, + const struct firmware **pfw) +{ + char fwname[64]; + + snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver); + return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw); +} + +int +r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + int ret; + + if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable)) + return -EINVAL; + + if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) || + (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) || + (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) || + (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) { + r535_gsp_dtor_fws(gsp); + return ret; + } + + return 0; +} + +#define NVKM_GSP_FIRMWARE(chip) \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin") + +NVKM_GSP_FIRMWARE(tu102); +NVKM_GSP_FIRMWARE(tu104); +NVKM_GSP_FIRMWARE(tu106); + +NVKM_GSP_FIRMWARE(tu116); +NVKM_GSP_FIRMWARE(tu117); + +NVKM_GSP_FIRMWARE(ga100); + +NVKM_GSP_FIRMWARE(ga102); +NVKM_GSP_FIRMWARE(ga103); +NVKM_GSP_FIRMWARE(ga104); +NVKM_GSP_FIRMWARE(ga106); +NVKM_GSP_FIRMWARE(ga107); + +NVKM_GSP_FIRMWARE(ad102); +NVKM_GSP_FIRMWARE(ad103); +NVKM_GSP_FIRMWARE(ad104); +NVKM_GSP_FIRMWARE(ad106); +NVKM_GSP_FIRMWARE(ad107); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c new file mode 100644 index 0000000000..59c5f2b917 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c @@ -0,0 +1,198 @@ +/* + * Copyright 2022 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include + +#include +#include +#include + +int +tu102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob, + struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + const struct nvkm_falcon_fw_func *func = &gm200_flcn_fw; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_hs_header_v2 *hshdr; + const struct nvfw_hs_load_header_v2 *lhdr; + u32 loc, sig, cnt; + int ret; + + hdr = nvfw_bin_hdr(subdev, blob->data); + hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset); + loc = *(u32 *)(blob->data + hshdr->patch_loc); + sig = *(u32 *)(blob->data + hshdr->patch_sig); + cnt = *(u32 *)(blob->data + hshdr->num_sig); + + ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true, + blob->data + hdr->data_offset, hdr->data_size, falcon, fw); + if (ret) + goto done; + + ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data, + cnt, hshdr->sig_prod_offset + sig, 0, 0); + if (ret) + goto done; + + lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset); + + fw->nmem_base_img = 0; + fw->nmem_base = lhdr->os_code_offset; + fw->nmem_size = lhdr->os_code_size; + fw->imem_base_img = fw->nmem_size; + fw->imem_base = lhdr->app[0].offset; + fw->imem_size = lhdr->app[0].size; + fw->dmem_base_img = lhdr->os_data_offset; + fw->dmem_base = 0; + fw->dmem_size = lhdr->os_data_size; + fw->dmem_sign = loc - fw->dmem_base_img; + fw->boot_addr = lhdr->os_code_offset; + +done: + if (ret) + nvkm_falcon_fw_dtor(fw); + + return ret; +} + +static int +tu102_gsp_fwsec_load_bld(struct nvkm_falcon_fw *fw) +{ + struct flcn_bl_dmem_desc_v2 desc = { + .ctx_dma = FALCON_DMAIDX_PHYS_SYS_NCOH, + .code_dma_base = fw->fw.phys, + .non_sec_code_off = fw->nmem_base, + .non_sec_code_size = fw->nmem_size, + .sec_code_off = fw->imem_base, + .sec_code_size = fw->imem_size, + .code_entry_point = 0, + .data_dma_base = fw->fw.phys + fw->dmem_base_img, + .data_size = fw->dmem_size, + .argc = 0, + .argv = 0, + }; + + flcn_bl_dmem_desc_v2_dump(fw->falcon->user, &desc); + + nvkm_falcon_mask(fw->falcon, 0x600 + desc.ctx_dma * 4, 0x00000007, 0x00000005); + + return nvkm_falcon_pio_wr(fw->falcon, (u8 *)&desc, 0, 0, DMEM, 0, sizeof(desc), 0, 0); +} + +const struct nvkm_falcon_fw_func +tu102_gsp_fwsec = { + .reset = gm200_flcn_fw_reset, + .load = gm200_flcn_fw_load, + .load_bld = tu102_gsp_fwsec_load_bld, + .boot = gm200_flcn_fw_boot, +}; + +int +tu102_gsp_reset(struct nvkm_gsp *gsp) +{ + return gsp->falcon.func->reset_eng(&gsp->falcon); +} + +static u64 +tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size) +{ + struct nvkm_device *device = gsp->subdev.device; + const u64 base = fb_size - 0x100000; + u64 addr = 0; + + if (device->disp) + addr = nvkm_rd32(gsp->subdev.device, 0x625f04); + if (!(addr & 0x00000008)) + return base; + + addr = (addr & 0xffffff00) << 8; + if (addr < base) + return fb_size - 0x20000; + + return addr; +} + +int +tu102_gsp_oneinit(struct nvkm_gsp *gsp) +{ + gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device); + + gsp->fb.bios.vga_workspace.addr = tu102_gsp_vga_workspace_addr(gsp, gsp->fb.size); + gsp->fb.bios.vga_workspace.size = gsp->fb.size - gsp->fb.bios.vga_workspace.addr; + gsp->fb.bios.addr = gsp->fb.bios.vga_workspace.addr; + gsp->fb.bios.size = gsp->fb.bios.vga_workspace.size; + + return r535_gsp_oneinit(gsp); +} + +const struct nvkm_falcon_func +tu102_gsp_flcn = { + .disable = gm200_flcn_disable, + .enable = gm200_flcn_enable, + .addr2 = 0x1000, + .riscv_irqmask = 0x2b4, + .reset_eng = gp102_flcn_reset_eng, + .reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing, + .bind_inst = gm200_flcn_bind_inst, + .bind_stat = gm200_flcn_bind_stat, + .bind_intr = true, + .imem_pio = &gm200_flcn_imem_pio, + .dmem_pio = &gm200_flcn_dmem_pio, + .riscv_active = tu102_flcn_riscv_active, +}; + +static const struct nvkm_gsp_func +tu102_gsp_r535_113_01 = { + .flcn = &tu102_gsp_flcn, + .fwsec = &tu102_gsp_fwsec, + + .sig_section = ".fwsignature_tu10x", + + .wpr_heap.base_size = 8 << 20, + .wpr_heap.min_size = 64 << 20, + + .booter.ctor = tu102_gsp_booter_ctor, + + .dtor = r535_gsp_dtor, + .oneinit = tu102_gsp_oneinit, + .init = r535_gsp_init, + .fini = r535_gsp_fini, + .reset = tu102_gsp_reset, + + .rm = &r535_gsp_rm, +}; + +static struct nvkm_gsp_fwif +tu102_gsps[] = { + { 0, r535_gsp_load, &tu102_gsp_r535_113_01, "535.113.01" }, + { -1, gv100_gsp_nofw, &gv100_gsp }, + {} +}; + +int +tu102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) +{ + return nvkm_gsp_new_(tu102_gsps, device, type, inst, pgsp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c new file mode 100644 index 0000000000..04fbd9ed28 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c @@ -0,0 +1,57 @@ +/* + * Copyright 2022 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +static const struct nvkm_gsp_func +tu116_gsp_r535_113_01 = { + .flcn = &tu102_gsp_flcn, + .fwsec = &tu102_gsp_fwsec, + + .sig_section = ".fwsignature_tu11x", + + .wpr_heap.base_size = 8 << 20, + .wpr_heap.min_size = 64 << 20, + + .booter.ctor = tu102_gsp_booter_ctor, + + .dtor = r535_gsp_dtor, + .oneinit = tu102_gsp_oneinit, + .init = r535_gsp_init, + .fini = r535_gsp_fini, + .reset = tu102_gsp_reset, + + .rm = &r535_gsp_rm, +}; + +static struct nvkm_gsp_fwif +tu116_gsps[] = { + { 0, r535_gsp_load, &tu116_gsp_r535_113_01, "535.113.01" }, + { -1, gv100_gsp_nofw, &gv100_gsp }, + {} +}; + +int +tu116_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) +{ + return nvkm_gsp_new_(tu116_gsps, device, type, inst, pgsp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c index 46917eb600..0494775113 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c @@ -24,6 +24,8 @@ #include "priv.h" #include "pad.h" +#include + static void gm200_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable) { @@ -44,5 +46,8 @@ int gm200_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_i2c **pi2c) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_i2c_new_(&gm200_i2c, device, type, inst, pi2c); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild index 06cbe19ce3..553d540f27 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild @@ -4,3 +4,5 @@ nvkm-y += nvkm/subdev/instmem/nv04.o nvkm-y += nvkm/subdev/instmem/nv40.o nvkm-y += nvkm/subdev/instmem/nv50.o nvkm-y += nvkm/subdev/instmem/gk20a.o + +nvkm-y += nvkm/subdev/instmem/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c index e0e4f97be0..a2cd3330ef 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c @@ -28,7 +28,7 @@ /****************************************************************************** * instmem object base implementation *****************************************************************************/ -static void +void nvkm_instobj_load(struct nvkm_instobj *iobj) { struct nvkm_memory *memory = &iobj->memory; @@ -48,7 +48,7 @@ nvkm_instobj_load(struct nvkm_instobj *iobj) iobj->suspend = NULL; } -static int +int nvkm_instobj_save(struct nvkm_instobj *iobj) { struct nvkm_memory *memory = &iobj->memory; @@ -94,15 +94,21 @@ nvkm_instobj_wrap(struct nvkm_device *device, struct nvkm_memory *memory, struct nvkm_memory **pmemory) { struct nvkm_instmem *imem = device->imem; + int ret; if (!imem->func->memory_wrap) return -ENOSYS; - return imem->func->memory_wrap(imem, memory, pmemory); + ret = imem->func->memory_wrap(imem, memory, pmemory); + if (ret) + return ret; + + container_of(*pmemory, struct nvkm_instobj, memory)->preserve = true; + return 0; } int -nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, +nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, bool preserve, struct nvkm_memory **pmemory) { struct nvkm_subdev *subdev = &imem->subdev; @@ -130,6 +136,7 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, nvkm_done(memory); } + container_of(memory, struct nvkm_instobj, memory)->preserve = preserve; done: if (ret) nvkm_memory_unref(&memory); @@ -172,22 +179,14 @@ static int nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_instmem *imem = nvkm_instmem(subdev); - struct nvkm_instobj *iobj; + int ret; if (suspend) { - list_for_each_entry(iobj, &imem->list, head) { - int ret = nvkm_instobj_save(iobj); - if (ret) - return ret; - } - - nvkm_bar_bar2_fini(subdev->device); + ret = imem->func->suspend(imem); + if (ret) + return ret; - list_for_each_entry(iobj, &imem->boot, head) { - int ret = nvkm_instobj_save(iobj); - if (ret) - return ret; - } + imem->suspend = true; } if (imem->func->fini) @@ -200,20 +199,16 @@ static int nvkm_instmem_init(struct nvkm_subdev *subdev) { struct nvkm_instmem *imem = nvkm_instmem(subdev); - struct nvkm_instobj *iobj; - list_for_each_entry(iobj, &imem->boot, head) { - if (iobj->suspend) - nvkm_instobj_load(iobj); - } + if (imem->suspend) { + if (imem->func->resume) + imem->func->resume(imem); - nvkm_bar_bar2_init(subdev->device); - - list_for_each_entry(iobj, &imem->list, head) { - if (iobj->suspend) - nvkm_instobj_load(iobj); + imem->suspend = false; + return 0; } + nvkm_bar_bar2_init(subdev->device); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c index a4ac94a2ab..201022ae92 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c @@ -49,14 +49,14 @@ #include struct gk20a_instobj { - struct nvkm_memory memory; + struct nvkm_instobj base; struct nvkm_mm_node *mn; struct gk20a_instmem *imem; /* CPU mapping */ u32 *vaddr; }; -#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory) +#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, base.memory) /* * Used for objects allocated using the DMA API @@ -148,7 +148,7 @@ gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj) list_del(&obj->vaddr_node); vunmap(obj->base.vaddr); obj->base.vaddr = NULL; - imem->vaddr_use -= nvkm_memory_size(&obj->base.memory); + imem->vaddr_use -= nvkm_memory_size(&obj->base.base.memory); nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use, imem->vaddr_max); } @@ -283,7 +283,7 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, { struct gk20a_instobj *node = gk20a_instobj(memory); struct nvkm_vmm_map map = { - .memory = &node->memory, + .memory = &node->base.memory, .offset = offset, .mem = node->mn, }; @@ -391,8 +391,8 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, return -ENOMEM; *_node = &node->base; - nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory); - node->base.memory.ptrs = &gk20a_instobj_ptrs; + nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.base.memory); + node->base.base.memory.ptrs = &gk20a_instobj_ptrs; node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, &node->handle, GFP_KERNEL, @@ -438,8 +438,8 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, *_node = &node->base; node->dma_addrs = (void *)(node->pages + npages); - nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory); - node->base.memory.ptrs = &gk20a_instobj_ptrs; + nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.base.memory); + node->base.base.memory.ptrs = &gk20a_instobj_ptrs; /* Allocate backing memory */ for (i = 0; i < npages; i++) { @@ -533,7 +533,7 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, else ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT, align, &node); - *pmemory = node ? &node->memory : NULL; + *pmemory = node ? &node->base.memory : NULL; if (ret) return ret; @@ -564,6 +564,8 @@ gk20a_instmem_dtor(struct nvkm_instmem *base) static const struct nvkm_instmem_func gk20a_instmem = { .dtor = gk20a_instmem_dtor, + .suspend = nv04_instmem_suspend, + .resume = nv04_instmem_resume, .memory_new = gk20a_instobj_new, .zero = false, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c index 25603b01d6..e5320ef849 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c @@ -25,6 +25,7 @@ #include "priv.h" #include +#include struct nv04_instmem { struct nvkm_instmem base; @@ -154,6 +155,48 @@ nv04_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data) nvkm_wr32(imem->subdev.device, 0x700000 + addr, data); } +void +nv04_instmem_resume(struct nvkm_instmem *imem) +{ + struct nvkm_instobj *iobj; + + list_for_each_entry(iobj, &imem->boot, head) { + if (iobj->suspend) + nvkm_instobj_load(iobj); + } + + nvkm_bar_bar2_init(imem->subdev.device); + + list_for_each_entry(iobj, &imem->list, head) { + if (iobj->suspend) + nvkm_instobj_load(iobj); + } +} + +int +nv04_instmem_suspend(struct nvkm_instmem *imem) +{ + struct nvkm_instobj *iobj; + + list_for_each_entry(iobj, &imem->list, head) { + if (iobj->preserve) { + int ret = nvkm_instobj_save(iobj); + if (ret) + return ret; + } + } + + nvkm_bar_bar2_fini(imem->subdev.device); + + list_for_each_entry(iobj, &imem->boot, head) { + int ret = nvkm_instobj_save(iobj); + if (ret) + return ret; + } + + return 0; +} + static int nv04_instmem_oneinit(struct nvkm_instmem *base) { @@ -210,6 +253,8 @@ static const struct nvkm_instmem_func nv04_instmem = { .dtor = nv04_instmem_dtor, .oneinit = nv04_instmem_oneinit, + .suspend = nv04_instmem_suspend, + .resume = nv04_instmem_resume, .rd32 = nv04_instmem_rd32, .wr32 = nv04_instmem_wr32, .memory_new = nv04_instobj_new, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c index 4b2d7465d2..a7f3fc342d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c @@ -27,6 +27,7 @@ #include #include #include +#include #include struct nv50_instmem { @@ -394,24 +395,44 @@ nv50_instmem_fini(struct nvkm_instmem *base) nv50_instmem(base)->addr = ~0ULL; } +static void * +nv50_instmem_dtor(struct nvkm_instmem *base) +{ + return nv50_instmem(base); +} + static const struct nvkm_instmem_func nv50_instmem = { + .dtor = nv50_instmem_dtor, .fini = nv50_instmem_fini, + .suspend = nv04_instmem_suspend, + .resume = nv04_instmem_resume, .memory_new = nv50_instobj_new, .memory_wrap = nv50_instobj_wrap, .zero = false, }; int -nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_instmem **pimem) +nv50_instmem_new_(const struct nvkm_instmem_func *func, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_instmem **pimem) { struct nv50_instmem *imem; if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) return -ENOMEM; - nvkm_instmem_ctor(&nv50_instmem, device, type, inst, &imem->base); + nvkm_instmem_ctor(func, device, type, inst, &imem->base); INIT_LIST_HEAD(&imem->lru); *pimem = &imem->base; return 0; } + +int +nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_instmem **pimem) +{ + if (nvkm_gsp_rm(device->gsp)) + return r535_instmem_new(&nv50_instmem, device, type, inst, pimem); + + return nv50_instmem_new_(&nv50_instmem, device, type, inst, pimem); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h index fe92986a38..4c14c96fb6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h @@ -7,6 +7,8 @@ struct nvkm_instmem_func { void *(*dtor)(struct nvkm_instmem *); int (*oneinit)(struct nvkm_instmem *); + int (*suspend)(struct nvkm_instmem *); + void (*resume)(struct nvkm_instmem *); void (*fini)(struct nvkm_instmem *); u32 (*rd32)(struct nvkm_instmem *, u32 addr); void (*wr32)(struct nvkm_instmem *, u32 addr, u32 data); @@ -16,19 +18,31 @@ struct nvkm_instmem_func { bool zero; }; +int nv50_instmem_new_(const struct nvkm_instmem_func *, struct nvkm_device *, + enum nvkm_subdev_type, int, struct nvkm_instmem **); + void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem *); void nvkm_instmem_boot(struct nvkm_instmem *); +int nv04_instmem_suspend(struct nvkm_instmem *); +void nv04_instmem_resume(struct nvkm_instmem *); + +int r535_instmem_new(const struct nvkm_instmem_func *, + struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **); + #include struct nvkm_instobj { struct nvkm_memory memory; struct list_head head; + bool preserve; u32 *suspend; }; void nvkm_instobj_ctor(const struct nvkm_memory_func *func, struct nvkm_instmem *, struct nvkm_instobj *); void nvkm_instobj_dtor(struct nvkm_instmem *, struct nvkm_instobj *); +int nvkm_instobj_save(struct nvkm_instobj *); +void nvkm_instobj_load(struct nvkm_instobj *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c new file mode 100644 index 0000000000..5f3c9c02a4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c @@ -0,0 +1,333 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +struct fbsr_item { + const char *type; + u64 addr; + u64 size; + + struct list_head head; +}; + +struct fbsr { + struct list_head items; + + u64 size; + int regions; + + struct nvkm_gsp_client client; + struct nvkm_gsp_device device; + + u64 hmemory; + u64 sys_offset; +}; + +static int +fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper, + u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object) +{ + struct nvkm_gsp_client *client = device->object.client; + struct nvkm_gsp *gsp = client->gsp; + const u32 pages = size / GSP_PAGE_SIZE; + rpc_alloc_memory_v13_01 *rpc; + int ret; + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY, + sizeof(*rpc) + pages * sizeof(rpc->pteDesc.pte_pde[0])); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + rpc->hClient = client->object.handle; + rpc->hDevice = device->object.handle; + rpc->hMemory = handle; + if (aper == NVKM_MEM_TARGET_HOST) { + rpc->hClass = NV01_MEMORY_LIST_SYSTEM; + rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, NONCONTIGUOUS) | + NVDEF(NVOS02, FLAGS, LOCATION, PCI) | + NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP); + } else { + rpc->hClass = NV01_MEMORY_LIST_FBMEM; + rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, CONTIGUOUS) | + NVDEF(NVOS02, FLAGS, LOCATION, VIDMEM) | + NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP); + rpc->format = 6; /* NV_MMU_PTE_KIND_GENERIC_MEMORY */ + } + rpc->pteAdjust = 0; + rpc->length = size; + rpc->pageCount = pages; + rpc->pteDesc.idr = 0; + rpc->pteDesc.reserved1 = 0; + rpc->pteDesc.length = pages; + + if (sgt) { + struct scatterlist *sgl; + int pte = 0, idx; + + for_each_sgtable_dma_sg(sgt, sgl, idx) { + for (int i = 0; i < sg_dma_len(sgl) / GSP_PAGE_SIZE; i++) + rpc->pteDesc.pte_pde[pte++].pte = (sg_dma_address(sgl) >> 12) + i; + + } + } else { + for (int i = 0; i < pages; i++) + rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i; + } + + ret = nvkm_gsp_rpc_wr(gsp, rpc, true); + if (ret) + return ret; + + object->client = device->object.client; + object->parent = &device->object; + object->handle = handle; + return 0; +} + +static int +fbsr_send(struct fbsr *fbsr, struct fbsr_item *item) +{ + NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS *ctrl; + struct nvkm_gsp *gsp = fbsr->client.gsp; + struct nvkm_gsp_object memlist; + int ret; + + ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM, + item->addr, item->size, NULL, &memlist); + if (ret) + return ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) { + ret = PTR_ERR(ctrl); + goto done; + } + + ctrl->fbsrType = FBSR_TYPE_DMA; + ctrl->hClient = fbsr->client.object.handle; + ctrl->hVidMem = fbsr->hmemory++; + ctrl->vidOffset = 0; + ctrl->sysOffset = fbsr->sys_offset; + ctrl->size = item->size; + + ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); +done: + nvkm_gsp_rm_free(&memlist); + if (ret) + return ret; + + fbsr->sys_offset += item->size; + return 0; +} + +static int +fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size) +{ + NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl; + struct nvkm_gsp *gsp = fbsr->client.gsp; + struct nvkm_gsp_object memlist; + int ret; + + ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST, + 0, fbsr->size, sgt, &memlist); + if (ret) + return ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->fbsrType = FBSR_TYPE_DMA; + ctrl->numRegions = fbsr->regions; + ctrl->hClient = fbsr->client.object.handle; + ctrl->hSysMem = fbsr->hmemory++; + ctrl->gspFbAllocsSysOffset = items_size; + + ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); + if (ret) + return ret; + + nvkm_gsp_rm_free(&memlist); + return 0; +} + +static bool +fbsr_vram(struct fbsr *fbsr, const char *type, u64 addr, u64 size) +{ + struct fbsr_item *item; + + if (!(item = kzalloc(sizeof(*item), GFP_KERNEL))) + return false; + + item->type = type; + item->addr = addr; + item->size = size; + list_add_tail(&item->head, &fbsr->items); + return true; +} + +static bool +fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory) +{ + return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory)); +} + +static void +r535_instmem_resume(struct nvkm_instmem *imem) +{ + /* RM has restored VRAM contents already, so just need to free the sysmem buffer. */ + if (imem->rm.fbsr_valid) { + nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr); + imem->rm.fbsr_valid = false; + } +} + +static int +r535_instmem_suspend(struct nvkm_instmem *imem) +{ + struct nvkm_subdev *subdev = &imem->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_gsp *gsp = device->gsp; + struct nvkm_instobj *iobj; + struct fbsr fbsr = {}; + struct fbsr_item *item, *temp; + u64 items_size; + int ret; + + INIT_LIST_HEAD(&fbsr.items); + fbsr.hmemory = 0xcaf00003; + + /* Create a list of all regions we need RM to save during suspend. */ + list_for_each_entry(iobj, &imem->list, head) { + if (iobj->preserve) { + if (!fbsr_inst(&fbsr, "inst", &iobj->memory)) + return -ENOMEM; + } + } + + list_for_each_entry(iobj, &imem->boot, head) { + if (!fbsr_inst(&fbsr, "boot", &iobj->memory)) + return -ENOMEM; + } + + if (!fbsr_vram(&fbsr, "gsp-non-wpr", gsp->fb.heap.addr, gsp->fb.heap.size)) + return -ENOMEM; + + /* Determine memory requirements. */ + list_for_each_entry(item, &fbsr.items, head) { + nvkm_debug(subdev, "fbsr: %016llx %016llx %s\n", + item->addr, item->size, item->type); + fbsr.size += item->size; + fbsr.regions++; + } + + items_size = fbsr.size; + nvkm_debug(subdev, "fbsr: %d regions (0x%llx bytes)\n", fbsr.regions, items_size); + + fbsr.size += gsp->fb.rsvd_size; + fbsr.size += gsp->fb.bios.vga_workspace.size; + nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size); + + ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr); + if (ret) + goto done; + + /* Tell RM about the sysmem which will hold VRAM contents across suspend. */ + ret = nvkm_gsp_client_device_ctor(gsp, &fbsr.client, &fbsr.device); + if (ret) + goto done_sgt; + + ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size); + if (WARN_ON(ret)) + goto done_sgt; + + /* Send VRAM regions that need saving. */ + list_for_each_entry(item, &fbsr.items, head) { + ret = fbsr_send(&fbsr, item); + if (WARN_ON(ret)) + goto done_sgt; + } + + imem->rm.fbsr_valid = true; + + /* Cleanup everything except the sysmem backup, which will be removed after resume. */ +done_sgt: + if (ret) /* ... unless we failed already. */ + nvkm_gsp_sg_free(device, &imem->rm.fbsr); +done: + list_for_each_entry_safe(item, temp, &fbsr.items, head) { + list_del(&item->head); + kfree(item); + } + + nvkm_gsp_device_dtor(&fbsr.device); + nvkm_gsp_client_dtor(&fbsr.client); + return ret; +} + +static void * +r535_instmem_dtor(struct nvkm_instmem *imem) +{ + kfree(imem->func); + return imem; +} + +int +r535_instmem_new(const struct nvkm_instmem_func *hw, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_instmem **pinstmem) +{ + struct nvkm_instmem_func *rm; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_instmem_dtor; + rm->fini = hw->fini; + rm->suspend = r535_instmem_suspend; + rm->resume = r535_instmem_resume; + rm->memory_new = hw->memory_new; + rm->memory_wrap = hw->memory_wrap; + rm->zero = false; + + ret = nv50_instmem_new_(rm, device, type, inst, pinstmem); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c index 159d9f8c95..951f01e303 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include + static void ga102_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4]) { @@ -53,5 +55,8 @@ int ga102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_ltc **pltc) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_ltc_new_(&ga102_ltc, device, type, inst, pltc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c index 265a05fd5f..053302ecb0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include + void gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *ltc, int i, const u32 stencil) { @@ -49,5 +51,8 @@ int gp102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_ltc **pltc) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_ltc_new_(&gp102_ltc, device, type, inst, pltc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c index 5d28d30d09..65e9f04972 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include + static void ga100_mc_device_disable(struct nvkm_mc *mc, u32 mask) { @@ -72,5 +74,8 @@ ga100_mc = { int ga100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_mc_new_(&ga100_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c index eb2ab03f43..05d2fa95e0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c @@ -23,6 +23,8 @@ */ #include "priv.h" +#include + const struct nvkm_intr_data gp100_mc_intrs[] = { { NVKM_ENGINE_DISP , 0, 0, 0x04000000, true }, @@ -98,5 +100,8 @@ gp100_mc = { int gp100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_mc_new_(&gp100_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild index a602b0cb5b..7ba35ea59c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild @@ -16,6 +16,8 @@ nvkm-y += nvkm/subdev/mmu/gp10b.o nvkm-y += nvkm/subdev/mmu/gv100.o nvkm-y += nvkm/subdev/mmu/tu102.o +nvkm-y += nvkm/subdev/mmu/r535.o + nvkm-y += nvkm/subdev/mmu/mem.o nvkm-y += nvkm/subdev/mmu/memnv04.o nvkm-y += nvkm/subdev/mmu/memnv50.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c index ad3b44a9e0..b67ace7ae9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c @@ -403,6 +403,10 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev) nvkm_mmu_ptc_fini(mmu); mutex_destroy(&mmu->mutex); + + if (mmu->func->dtor) + mmu->func->dtor(mmu); + return mmu; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h index 5265bf4d83..e9ca653777 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h @@ -4,12 +4,16 @@ #define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev) #include +int r535_mmu_new(const struct nvkm_mmu_func *hw, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_mmu **); + void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_mmu *); int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_mmu **); struct nvkm_mmu_func { + void (*dtor)(struct nvkm_mmu *); void (*init)(struct nvkm_mmu *); u8 dma_bits; @@ -37,6 +41,8 @@ struct nvkm_mmu_func { const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid); bool kind_sys; + + int (*promote_vmm)(struct nvkm_vmm *); }; extern const struct nvkm_mmu_func nv04_mmu; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c new file mode 100644 index 0000000000..d3e95453f2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c @@ -0,0 +1,123 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "vmm.h" + +#include +#include +#include +#include + +static int +r535_mmu_promote_vmm(struct nvkm_vmm *vmm) +{ + NV_VASPACE_ALLOCATION_PARAMETERS *args; + int ret; + + ret = nvkm_gsp_client_device_ctor(vmm->mmu->subdev.device->gsp, + &vmm->rm.client, &vmm->rm.device); + if (ret) + return ret; + + args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A, + sizeof(*args), &vmm->rm.object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW; + + ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args); + if (ret) + return ret; + + { + NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl; + + mutex_lock(&vmm->mutex.vmm); + ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000, + &vmm->rm.rsvd); + mutex_unlock(&vmm->mutex.vmm); + if (ret) + return ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object, + NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->pageSize = 0x20000000; + ctrl->virtAddrLo = vmm->rm.rsvd->addr; + ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1; + ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2; + ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr; + ctrl->levels[0].size = 0x20; + ctrl->levels[0].aperture = 1; + ctrl->levels[0].pageShift = 0x2f; + ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr; + ctrl->levels[1].size = 0x1000; + ctrl->levels[1].aperture = 1; + ctrl->levels[1].pageShift = 0x26; + if (vmm->pd->pde[0]->pde[0]) { + ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr; + ctrl->levels[2].size = 0x1000; + ctrl->levels[2].aperture = 1; + ctrl->levels[2].pageShift = 0x1d; + } + + ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl); + } + + return ret; +} + +static void +r535_mmu_dtor(struct nvkm_mmu *mmu) +{ + kfree(mmu->func); +} + +int +r535_mmu_new(const struct nvkm_mmu_func *hw, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) +{ + struct nvkm_mmu_func *rm; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_mmu_dtor; + rm->dma_bits = hw->dma_bits; + rm->mmu = hw->mmu; + rm->mem = hw->mem; + rm->vmm = hw->vmm; + rm->kind = hw->kind; + rm->kind_sys = hw->kind_sys; + rm->promote_vmm = r535_mmu_promote_vmm; + + ret = nvkm_mmu_new_(rm, device, type, inst, pmmu); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c index 8d060ce47f..df662ce4a4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c @@ -24,6 +24,7 @@ #include "vmm.h" #include +#include #include @@ -54,5 +55,8 @@ int tu102_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mmu **pmmu) { + if (nvkm_gsp_rm(device->gsp)) + return r535_mmu_new(&tu102_mmu, device, type, inst, pmmu); + return nvkm_mmu_new_(&tu102_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c index 8e459d88ff..cf490ff2b9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c @@ -572,6 +572,12 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, } uvmm->vmm->managed.raw = raw; + if (mmu->func->promote_vmm) { + ret = mmu->func->promote_vmm(uvmm->vmm); + if (ret) + return ret; + } + page = uvmm->vmm->func->page; args->v0.page_nr = 0; while (page && (page++)->shift) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index eb5fcadcb3..9c97800fe0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c @@ -1030,6 +1030,13 @@ nvkm_vmm_dtor(struct nvkm_vmm *vmm) struct nvkm_vma *vma; struct rb_node *node; + if (vmm->rm.client.gsp) { + nvkm_gsp_rm_free(&vmm->rm.object); + nvkm_gsp_device_dtor(&vmm->rm.device); + nvkm_gsp_client_dtor(&vmm->rm.client); + nvkm_vmm_put(vmm, &vmm->rm.rsvd); + } + if (0) nvkm_vmm_dump(vmm); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c index f3630d0e0d..bddac77f48 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c @@ -558,7 +558,7 @@ gp100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr) void gp100_vmm_flush(struct nvkm_vmm *vmm, int depth) { - u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24; + u32 type = 0; if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) type |= 0x00000004; /* HUB_ONLY */ type |= 0x00000001; /* PAGE_ALL */ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c index 5a08458fe1..8379e72d77 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c @@ -27,7 +27,7 @@ static void tu102_vmm_flush(struct nvkm_vmm *vmm, int depth) { struct nvkm_device *device = vmm->mmu->subdev.device; - u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24; + u32 type = 0; type |= 0x00000001; /* PAGE_ALL */ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) @@ -35,9 +35,11 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth) mutex_lock(&vmm->mmu->mutex); - nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8); + if (!vmm->rm.bar2_pdb) + nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8); + else + nvkm_wr32(device, 0xb830a0, vmm->rm.bar2_pdb >> 8); nvkm_wr32(device, 0xb830a4, 0x00000000); - nvkm_wr32(device, 0x100e68, 0x00000000); nvkm_wr32(device, 0xb830b0, 0x80000000 | type); nvkm_msec(device, 2000, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c index cd31483609..da5b2b2190 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -23,6 +23,8 @@ */ #include "priv.h" +#include + static const struct nvkm_falcon_func gp102_pmu_flcn = { .disable = gm200_flcn_disable, @@ -54,5 +56,8 @@ int gp102_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pmu **ppmu) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_pmu_new_(gp102_pmu_fwif, device, type, inst, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c index b4eaf6db36..b4530073bf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c @@ -23,6 +23,8 @@ */ #include "priv.h" +#include + static const struct nvkm_subdev_func gm200_privring = { .intr = gk104_privring_intr, @@ -32,5 +34,8 @@ int gm200_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_subdev **pprivring) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_subdev_new_(&gm200_privring, device, type, inst, pprivring); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c index 44f021392b..5392833d36 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c @@ -23,6 +23,8 @@ */ #include "priv.h" +#include + static int gp100_temp_get(struct nvkm_therm *therm) { @@ -52,5 +54,8 @@ int gp100_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_therm_new_(&gp100_therm, device, type, inst, ptherm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c index 84790cf52b..129eabb8b9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include + static int ga100_top_parse(struct nvkm_top *top) { @@ -76,7 +78,7 @@ ga100_top_parse(struct nvkm_top *top) case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break; case 0x00000013: I_(NVKM_ENGINE_CE , inst); break; case 0x00000014: O_(NVKM_SUBDEV_GSP , 0); break; - case 0x00000015: O_(NVKM_ENGINE_NVJPG , 0); break; + case 0x00000015: I_(NVKM_ENGINE_NVJPG , inst); break; case 0x00000016: O_(NVKM_ENGINE_OFA , 0); break; case 0x00000017: O_(NVKM_SUBDEV_FLA , 0); break; break; @@ -104,5 +106,8 @@ int ga100_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_top **ptop) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_top_new_(&ga100_top, device, type, inst, ptop); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c index 2bbba8244c..da55dac8c2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c @@ -23,6 +23,8 @@ */ #include "priv.h" +#include + static int gk104_top_parse(struct nvkm_top *top) { @@ -89,7 +91,7 @@ gk104_top_parse(struct nvkm_top *top) case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break; case 0x00000013: I_(NVKM_ENGINE_CE , inst); break; case 0x00000014: O_(NVKM_SUBDEV_GSP , 0); break; - case 0x00000015: O_(NVKM_ENGINE_NVJPG , 0); break; + case 0x00000015: I_(NVKM_ENGINE_NVJPG , inst); break; default: break; } @@ -115,5 +117,8 @@ int gk104_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_top **ptop) { + if (nvkm_gsp_rm(device->gsp)) + return -ENODEV; + return nvkm_top_new_(&gk104_top, device, type, inst, ptop); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild index 23cd21b40a..23a8546061 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild @@ -4,3 +4,5 @@ nvkm-y += nvkm/subdev/vfn/uvfn.o nvkm-y += nvkm/subdev/vfn/gv100.o nvkm-y += nvkm/subdev/vfn/tu102.o nvkm-y += nvkm/subdev/vfn/ga100.o + +nvkm-y += nvkm/subdev/vfn/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c index fd5c693132..bb0bb6fda5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include + #include static const struct nvkm_intr_data @@ -43,5 +45,8 @@ int ga100_vfn_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_vfn **pvfn) { + if (nvkm_gsp_rm(device->gsp)) + return r535_vfn_new(&ga100_vfn, device, type, inst, 0xb80000, pvfn); + return nvkm_vfn_new_(&ga100_vfn, device, type, inst, 0xb80000, pvfn); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h index 96d53c0204..3a09781ad0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h @@ -5,16 +5,21 @@ #include struct nvkm_vfn_func { + void (*dtor)(struct nvkm_vfn *); + const struct nvkm_intr_func *intr; const struct nvkm_intr_data *intrs; struct { u32 addr; u32 size; - const struct nvkm_sclass base; + struct nvkm_sclass base; } user; }; +int r535_vfn_new(const struct nvkm_vfn_func *hw, struct nvkm_device *, enum nvkm_subdev_type, int, + u32 addr, struct nvkm_vfn **); + int nvkm_vfn_new_(const struct nvkm_vfn_func *, struct nvkm_device *, enum nvkm_subdev_type, int, u32 addr, struct nvkm_vfn **); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c new file mode 100644 index 0000000000..dce337306c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c @@ -0,0 +1,50 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +static void +r535_vfn_dtor(struct nvkm_vfn *vfn) +{ + kfree(vfn->func); +} + +int +r535_vfn_new(const struct nvkm_vfn_func *hw, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, u32 addr, + struct nvkm_vfn **pvfn) +{ + struct nvkm_vfn_func *rm; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_vfn_dtor; + rm->intr = hw->intr; + rm->user = hw->user; + + ret = nvkm_vfn_new_(rm, device, type, inst, addr, pvfn); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c index 3d063fb5e1..a3bf13c5c7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include + #include static void @@ -104,5 +106,8 @@ int tu102_vfn_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_vfn **pvfn) { + if (nvkm_gsp_rm(device->gsp)) + return r535_vfn_new(&tu102_vfn, device, type, inst, 0xb80000, pvfn); + return nvkm_vfn_new_(&tu102_vfn, device, type, inst, 0xb80000, pvfn); } -- cgit v1.2.3