summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nvkm/falcon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/falcon')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c322
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c214
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/fw.c354
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c62
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c357
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/gp102.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/priv.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/v1.c101
13 files changed, 2053 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
new file mode 100644
index 0000000000..9ffe7b921c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/falcon/base.o
+nvkm-y += nvkm/falcon/cmdq.o
+nvkm-y += nvkm/falcon/fw.o
+nvkm-y += nvkm/falcon/msgq.o
+nvkm-y += nvkm/falcon/qmgr.o
+nvkm-y += nvkm/falcon/v1.o
+
+nvkm-y += nvkm/falcon/gm200.o
+nvkm-y += nvkm/falcon/gp102.o
+nvkm-y += nvkm/falcon/ga100.o
+nvkm-y += nvkm/falcon/ga102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
new file mode 100644
index 0000000000..235149f73a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/top.h>
+
+static const struct nvkm_falcon_func_dma *
+nvkm_falcon_dma(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base)
+{
+ switch (*mem_type) {
+ case IMEM: return falcon->func->imem_dma;
+ case DMEM: return falcon->func->dmem_dma;
+ default:
+ return NULL;
+ }
+}
+
+int
+nvkm_falcon_dma_wr(struct nvkm_falcon *falcon, const u8 *img, u64 dma_addr, u32 dma_base,
+ enum nvkm_falcon_mem mem_type, u32 mem_base, int len, bool sec)
+{
+ const struct nvkm_falcon_func_dma *dma = nvkm_falcon_dma(falcon, &mem_type, &mem_base);
+ const char *type = nvkm_falcon_mem(mem_type);
+ const int dmalen = 256;
+ u32 dma_start = 0;
+ u32 dst, src, cmd;
+ int ret, i;
+
+ if (WARN_ON(!dma->xfer))
+ return -EINVAL;
+
+ if (mem_type == DMEM) {
+ dma_start = dma_base;
+ dma_addr += dma_base;
+ }
+
+ FLCN_DBG(falcon, "%s %08x <- %08x bytes at %08x (%010llx %08x)",
+ type, mem_base, len, dma_base, dma_addr - dma_base, dma_start);
+ if (WARN_ON(!len || (len & (dmalen - 1))))
+ return -EINVAL;
+
+ ret = dma->init(falcon, dma_addr, dmalen, mem_type, sec, &cmd);
+ if (ret)
+ return ret;
+
+ dst = mem_base;
+ src = dma_base;
+ if (len) {
+ while (len >= dmalen) {
+ dma->xfer(falcon, dst, src - dma_start, cmd);
+
+ if (img && nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
+ for (i = 0; i < dmalen; i += 4, mem_base += 4) {
+ const int w = 8, x = (i / 4) % w;
+
+ if (x == 0)
+ printk(KERN_INFO "%s %08x <-", type, mem_base);
+ printk(KERN_CONT " %08x", *(u32 *)(img + src + i));
+ if (x == (w - 1) || ((i + 4) == dmalen))
+ printk(KERN_CONT " <- %08x+%08x", dma_base,
+ src + i - dma_base - (x * 4));
+ if (i == (7 * 4))
+ printk(KERN_CONT " *");
+ }
+ }
+
+ if (nvkm_msec(falcon->owner->device, 2000,
+ if (dma->done(falcon))
+ break;
+ ) < 0)
+ return -ETIMEDOUT;
+
+ src += dmalen;
+ dst += dmalen;
+ len -= dmalen;
+ }
+ WARN_ON(len);
+ }
+
+ return 0;
+}
+
+static const struct nvkm_falcon_func_pio *
+nvkm_falcon_pio(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base)
+{
+ switch (*mem_type) {
+ case IMEM:
+ return falcon->func->imem_pio;
+ case DMEM:
+ if (!falcon->func->emem_addr || *mem_base < falcon->func->emem_addr)
+ return falcon->func->dmem_pio;
+
+ *mem_base -= falcon->func->emem_addr;
+ fallthrough;
+ case EMEM:
+ return falcon->func->emem_pio;
+ default:
+ return NULL;
+ }
+}
+
+int
+nvkm_falcon_pio_rd(struct nvkm_falcon *falcon, u8 port, enum nvkm_falcon_mem mem_type, u32 mem_base,
+ const u8 *img, u32 img_base, int len)
+{
+ const struct nvkm_falcon_func_pio *pio = nvkm_falcon_pio(falcon, &mem_type, &mem_base);
+ const char *type = nvkm_falcon_mem(mem_type);
+ int xfer_len;
+
+ if (WARN_ON(!pio || !pio->rd))
+ return -EINVAL;
+
+ FLCN_DBG(falcon, "%s %08x -> %08x bytes at %08x", type, mem_base, len, img_base);
+ if (WARN_ON(!len || (len & (pio->min - 1))))
+ return -EINVAL;
+
+ pio->rd_init(falcon, port, mem_base);
+ do {
+ xfer_len = min(len, pio->max);
+ pio->rd(falcon, port, img, xfer_len);
+
+ if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
+ for (img_base = 0; img_base < xfer_len; img_base += 4, mem_base += 4) {
+ if (((img_base / 4) % 8) == 0)
+ printk(KERN_INFO "%s %08x ->", type, mem_base);
+ printk(KERN_CONT " %08x", *(u32 *)(img + img_base));
+ }
+ }
+
+ img += xfer_len;
+ len -= xfer_len;
+ } while (len);
+
+ return 0;
+}
+
+int
+nvkm_falcon_pio_wr(struct nvkm_falcon *falcon, const u8 *img, u32 img_base, u8 port,
+ enum nvkm_falcon_mem mem_type, u32 mem_base, int len, u16 tag, bool sec)
+{
+ const struct nvkm_falcon_func_pio *pio = nvkm_falcon_pio(falcon, &mem_type, &mem_base);
+ const char *type = nvkm_falcon_mem(mem_type);
+ int xfer_len;
+
+ if (WARN_ON(!pio || !pio->wr))
+ return -EINVAL;
+
+ FLCN_DBG(falcon, "%s %08x <- %08x bytes at %08x", type, mem_base, len, img_base);
+ if (WARN_ON(!len || (len & (pio->min - 1))))
+ return -EINVAL;
+
+ pio->wr_init(falcon, port, sec, mem_base);
+ do {
+ xfer_len = min(len, pio->max);
+ pio->wr(falcon, port, img, xfer_len, tag++);
+
+ if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
+ for (img_base = 0; img_base < xfer_len; img_base += 4, mem_base += 4) {
+ if (((img_base / 4) % 8) == 0)
+ printk(KERN_INFO "%s %08x <-", type, mem_base);
+ printk(KERN_CONT " %08x", *(u32 *)(img + img_base));
+ if ((img_base / 4) == 7 && mem_type == IMEM)
+ printk(KERN_CONT " %04x", tag - 1);
+ }
+ }
+
+ img += xfer_len;
+ len -= xfer_len;
+ } while (len);
+
+ return 0;
+}
+
+void
+nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
+ u32 size, u16 tag, u8 port, bool secure)
+{
+ if (secure && !falcon->secret) {
+ nvkm_warn(falcon->user,
+ "writing with secure tag on a non-secure falcon!\n");
+ return;
+ }
+
+ falcon->func->load_imem(falcon, data, start, size, tag, port,
+ secure);
+}
+
+void
+nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
+ u32 size, u8 port)
+{
+ mutex_lock(&falcon->dmem_mutex);
+
+ falcon->func->load_dmem(falcon, data, start, size, port);
+
+ mutex_unlock(&falcon->dmem_mutex);
+}
+
+void
+nvkm_falcon_start(struct nvkm_falcon *falcon)
+{
+ falcon->func->start(falcon);
+}
+
+int
+nvkm_falcon_reset(struct nvkm_falcon *falcon)
+{
+ int ret;
+
+ ret = falcon->func->disable(falcon);
+ if (WARN_ON(ret))
+ return ret;
+
+ return nvkm_falcon_enable(falcon);
+}
+
+static int
+nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
+{
+ const struct nvkm_falcon_func *func = falcon->func;
+ const struct nvkm_subdev *subdev = falcon->owner;
+ u32 reg;
+
+ if (!falcon->addr) {
+ falcon->addr = nvkm_top_addr(subdev->device, subdev->type, subdev->inst);
+ if (WARN_ON(!falcon->addr))
+ return -ENODEV;
+ }
+
+ reg = nvkm_falcon_rd32(falcon, 0x12c);
+ falcon->version = reg & 0xf;
+ falcon->secret = (reg >> 4) & 0x3;
+ falcon->code.ports = (reg >> 8) & 0xf;
+ falcon->data.ports = (reg >> 12) & 0xf;
+
+ reg = nvkm_falcon_rd32(falcon, 0x108);
+ falcon->code.limit = (reg & 0x1ff) << 8;
+ falcon->data.limit = (reg & 0x3fe00) >> 1;
+
+ if (func->debug) {
+ u32 val = nvkm_falcon_rd32(falcon, func->debug);
+ falcon->debug = (val >> 20) & 0x1;
+ }
+
+ return 0;
+}
+
+void
+nvkm_falcon_put(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
+{
+ if (unlikely(!falcon))
+ return;
+
+ mutex_lock(&falcon->mutex);
+ if (falcon->user == user) {
+ nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
+ falcon->user = NULL;
+ }
+ mutex_unlock(&falcon->mutex);
+}
+
+int
+nvkm_falcon_get(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
+{
+ int ret = 0;
+
+ mutex_lock(&falcon->mutex);
+ if (falcon->user) {
+ nvkm_error(user, "%s falcon already acquired by %s!\n",
+ falcon->name, falcon->user->name);
+ mutex_unlock(&falcon->mutex);
+ return -EBUSY;
+ }
+
+ nvkm_debug(user, "acquired %s falcon\n", falcon->name);
+ if (!falcon->oneinit)
+ ret = nvkm_falcon_oneinit(falcon);
+ falcon->user = user;
+ mutex_unlock(&falcon->mutex);
+ return ret;
+}
+
+void
+nvkm_falcon_dtor(struct nvkm_falcon *falcon)
+{
+}
+
+int
+nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
+ struct nvkm_subdev *subdev, const char *name, u32 addr,
+ struct nvkm_falcon *falcon)
+{
+ falcon->func = func;
+ falcon->owner = subdev;
+ falcon->name = name;
+ falcon->addr = addr;
+ falcon->addr2 = func->addr2;
+ mutex_init(&falcon->mutex);
+ mutex_init(&falcon->dmem_mutex);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
new file mode 100644
index 0000000000..211ebe7afa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+static bool
+nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind)
+{
+ u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg);
+ u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg);
+ u32 free;
+
+ size = ALIGN(size, QUEUE_ALIGNMENT);
+
+ if (head >= tail) {
+ free = cmdq->offset + cmdq->size - head;
+ free -= HDR_SIZE;
+
+ if (size > free) {
+ *rewind = true;
+ head = cmdq->offset;
+ }
+ }
+
+ if (head < tail)
+ free = tail - head - 1;
+
+ return size <= free;
+}
+
+static void
+nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
+ nvkm_falcon_pio_wr(falcon, data, 0, 0, DMEM, cmdq->position, size, 0, false);
+ cmdq->position += ALIGN(size, QUEUE_ALIGNMENT);
+}
+
+static void
+nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq)
+{
+ struct nvfw_falcon_cmd cmd;
+
+ cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND;
+ cmd.size = sizeof(cmd);
+ nvkm_falcon_cmdq_push(cmdq, &cmd, cmd.size);
+
+ cmdq->position = cmdq->offset;
+}
+
+static int
+nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq *cmdq, u32 size)
+{
+ struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
+ bool rewind = false;
+
+ mutex_lock(&cmdq->mutex);
+
+ if (!nvkm_falcon_cmdq_has_room(cmdq, size, &rewind)) {
+ FLCNQ_DBG(cmdq, "queue full");
+ mutex_unlock(&cmdq->mutex);
+ return -EAGAIN;
+ }
+
+ cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg);
+
+ if (rewind)
+ nvkm_falcon_cmdq_rewind(cmdq);
+
+ return 0;
+}
+
+static void
+nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq)
+{
+ nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position);
+ mutex_unlock(&cmdq->mutex);
+}
+
+static int
+nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd)
+{
+ static unsigned timeout = 2000;
+ unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
+ int ret = -EAGAIN;
+
+ while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
+ ret = nvkm_falcon_cmdq_open(cmdq, cmd->size);
+ if (ret) {
+ FLCNQ_ERR(cmdq, "timeout waiting for queue space");
+ return ret;
+ }
+
+ nvkm_falcon_cmdq_push(cmdq, cmd, cmd->size);
+ nvkm_falcon_cmdq_close(cmdq);
+ return ret;
+}
+
+/* specifies that we want to know the command status in the answer message */
+#define CMD_FLAGS_STATUS BIT(0)
+/* specifies that we want an interrupt when the answer message is queued */
+#define CMD_FLAGS_INTR BIT(1)
+
+int
+nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd,
+ nvkm_falcon_qmgr_callback cb, void *priv,
+ unsigned long timeout)
+{
+ struct nvkm_falcon_qmgr_seq *seq;
+ int ret;
+
+ if (!wait_for_completion_timeout(&cmdq->ready,
+ msecs_to_jiffies(1000))) {
+ FLCNQ_ERR(cmdq, "timeout waiting for queue ready");
+ return -ETIMEDOUT;
+ }
+
+ seq = nvkm_falcon_qmgr_seq_acquire(cmdq->qmgr);
+ if (IS_ERR(seq))
+ return PTR_ERR(seq);
+
+ cmd->seq_id = seq->id;
+ cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
+
+ seq->state = SEQ_STATE_USED;
+ seq->async = !timeout;
+ seq->callback = cb;
+ seq->priv = priv;
+
+ ret = nvkm_falcon_cmdq_write(cmdq, cmd);
+ if (ret) {
+ seq->state = SEQ_STATE_PENDING;
+ nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
+ return ret;
+ }
+
+ if (!seq->async) {
+ if (!wait_for_completion_timeout(&seq->done, timeout)) {
+ FLCNQ_ERR(cmdq, "timeout waiting for reply");
+ return -ETIMEDOUT;
+ }
+ ret = seq->result;
+ nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
+ }
+
+ return ret;
+}
+
+void
+nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *cmdq)
+{
+ reinit_completion(&cmdq->ready);
+}
+
+void
+nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *cmdq,
+ u32 index, u32 offset, u32 size)
+{
+ const struct nvkm_falcon_func *func = cmdq->qmgr->falcon->func;
+
+ cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride;
+ cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride;
+ cmdq->offset = offset;
+ cmdq->size = size;
+ complete_all(&cmdq->ready);
+
+ FLCNQ_DBG(cmdq, "initialised @ index %d offset 0x%08x size 0x%08x",
+ index, cmdq->offset, cmdq->size);
+}
+
+void
+nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **pcmdq)
+{
+ struct nvkm_falcon_cmdq *cmdq = *pcmdq;
+ if (cmdq) {
+ kfree(*pcmdq);
+ *pcmdq = NULL;
+ }
+}
+
+int
+nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
+ struct nvkm_falcon_cmdq **pcmdq)
+{
+ struct nvkm_falcon_cmdq *cmdq = *pcmdq;
+
+ if (!(cmdq = *pcmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL)))
+ return -ENOMEM;
+
+ cmdq->qmgr = qmgr;
+ cmdq->name = name;
+ mutex_init(&cmdq->mutex);
+ init_completion(&cmdq->ready);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
new file mode 100644
index 0000000000..80a480b121
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2022 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/memory.h>
+#include <subdev/mmu.h>
+
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+
+int
+nvkm_falcon_fw_patch(struct nvkm_falcon_fw *fw)
+{
+ struct nvkm_falcon *falcon = fw->falcon;
+ u32 sig_base_src = fw->sig_base_prd;
+ u32 src, dst, len, i;
+ int idx = 0;
+
+ FLCNFW_DBG(fw, "patching sigs:%d size:%d", fw->sig_nr, fw->sig_size);
+ if (fw->func->signature) {
+ idx = fw->func->signature(fw, &sig_base_src);
+ if (idx < 0)
+ return idx;
+ }
+
+ src = idx * fw->sig_size;
+ dst = fw->sig_base_img;
+ len = fw->sig_size / 4;
+ FLCNFW_DBG(fw, "patch idx:%d src:%08x dst:%08x", idx, sig_base_src + src, dst);
+ for (i = 0; i < len; i++) {
+ u32 sig = *(u32 *)(fw->sigs + src);
+
+ if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
+ if (i % 8 == 0)
+ printk(KERN_INFO "sig -> %08x:", dst);
+ printk(KERN_CONT " %08x", sig);
+ }
+
+ *(u32 *)(fw->fw.img + dst) = sig;
+ src += 4;
+ dst += 4;
+ }
+
+ return 0;
+}
+
+static void
+nvkm_falcon_fw_dtor_sigs(struct nvkm_falcon_fw *fw)
+{
+ kfree(fw->sigs);
+ fw->sigs = NULL;
+}
+
+int
+nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user,
+ bool release, u32 *pmbox0, u32 *pmbox1, u32 mbox0_ok, u32 irqsclr)
+{
+ struct nvkm_falcon *falcon = fw->falcon;
+ int ret;
+
+ ret = nvkm_falcon_get(falcon, user);
+ if (ret)
+ return ret;
+
+ if (fw->sigs) {
+ ret = nvkm_falcon_fw_patch(fw);
+ if (ret)
+ goto done;
+
+ nvkm_falcon_fw_dtor_sigs(fw);
+ }
+
+ FLCNFW_DBG(fw, "resetting");
+ fw->func->reset(fw);
+
+ FLCNFW_DBG(fw, "loading");
+ if (fw->func->setup) {
+ ret = fw->func->setup(fw);
+ if (ret)
+ goto done;
+ }
+
+ ret = fw->func->load(fw);
+ if (ret)
+ goto done;
+
+ FLCNFW_DBG(fw, "booting");
+ ret = fw->func->boot(fw, pmbox0, pmbox1, mbox0_ok, irqsclr);
+ if (ret)
+ FLCNFW_ERR(fw, "boot failed: %d", ret);
+ else
+ FLCNFW_DBG(fw, "booted");
+
+done:
+ if (ret || release)
+ nvkm_falcon_put(falcon, user);
+ return ret;
+}
+
+int
+nvkm_falcon_fw_oneinit(struct nvkm_falcon_fw *fw, struct nvkm_falcon *falcon,
+ struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ int ret;
+
+ fw->falcon = falcon;
+ fw->vmm = nvkm_vmm_ref(vmm);
+ fw->inst = nvkm_memory_ref(inst);
+
+ if (fw->boot) {
+ FLCN_DBG(falcon, "mapping %s fw", fw->fw.name);
+ ret = nvkm_vmm_get(fw->vmm, 12, nvkm_memory_size(&fw->fw.mem.memory), &fw->vma);
+ if (ret) {
+ FLCN_ERR(falcon, "get %d", ret);
+ return ret;
+ }
+
+ ret = nvkm_memory_map(&fw->fw.mem.memory, 0, fw->vmm, fw->vma, NULL, 0);
+ if (ret) {
+ FLCN_ERR(falcon, "map %d", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void
+nvkm_falcon_fw_dtor(struct nvkm_falcon_fw *fw)
+{
+ nvkm_vmm_put(fw->vmm, &fw->vma);
+ nvkm_vmm_unref(&fw->vmm);
+ nvkm_memory_unref(&fw->inst);
+ nvkm_falcon_fw_dtor_sigs(fw);
+ nvkm_firmware_dtor(&fw->fw);
+}
+
+static const struct nvkm_firmware_func
+nvkm_falcon_fw_dma = {
+ .type = NVKM_FIRMWARE_IMG_DMA,
+};
+
+static const struct nvkm_firmware_func
+nvkm_falcon_fw = {
+ .type = NVKM_FIRMWARE_IMG_RAM,
+};
+
+int
+nvkm_falcon_fw_sign(struct nvkm_falcon_fw *fw, u32 sig_base_img, u32 sig_size, const u8 *sigs,
+ int sig_nr_prd, u32 sig_base_prd, int sig_nr_dbg, u32 sig_base_dbg)
+{
+ fw->sig_base_prd = sig_base_prd;
+ fw->sig_base_dbg = sig_base_dbg;
+ fw->sig_base_img = sig_base_img;
+ fw->sig_size = sig_size;
+ fw->sig_nr = sig_nr_prd + sig_nr_dbg;
+
+ fw->sigs = kmalloc_array(fw->sig_nr, fw->sig_size, GFP_KERNEL);
+ if (!fw->sigs)
+ return -ENOMEM;
+
+ memcpy(fw->sigs, sigs + sig_base_prd, sig_nr_prd * fw->sig_size);
+ if (sig_nr_dbg)
+ memcpy(fw->sigs + sig_size, sigs + sig_base_dbg, sig_nr_dbg * fw->sig_size);
+
+ return 0;
+}
+
+int
+nvkm_falcon_fw_ctor(const struct nvkm_falcon_fw_func *func, const char *name,
+ struct nvkm_device *device, bool dma, const void *src, u32 len,
+ struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
+{
+ const struct nvkm_firmware_func *type = dma ? &nvkm_falcon_fw_dma : &nvkm_falcon_fw;
+ int ret;
+
+ fw->func = func;
+
+ ret = nvkm_firmware_ctor(type, name, device, src, len, &fw->fw);
+ if (ret)
+ return ret;
+
+ return falcon ? nvkm_falcon_fw_oneinit(fw, falcon, NULL, NULL) : 0;
+}
+
+int
+nvkm_falcon_fw_ctor_hs(const struct nvkm_falcon_fw_func *func, const char *name,
+ struct nvkm_subdev *subdev, const char *bl, const char *img, int ver,
+ struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
+{
+ const struct firmware *blob;
+ const struct nvfw_bin_hdr *hdr;
+ const struct nvfw_hs_header *hshdr;
+ const struct nvfw_hs_load_header *lhdr;
+ const struct nvfw_bl_desc *desc;
+ u32 loc, sig;
+ int ret;
+
+ ret = nvkm_firmware_load_name(subdev, img, "", ver, &blob);
+ if (ret)
+ return ret;
+
+ hdr = nvfw_bin_hdr(subdev, blob->data);
+ hshdr = nvfw_hs_header(subdev, blob->data + hdr->header_offset);
+
+ ret = nvkm_falcon_fw_ctor(func, name, subdev->device, bl != NULL,
+ blob->data + hdr->data_offset, hdr->data_size, falcon, fw);
+ if (ret)
+ goto done;
+
+ /* Earlier FW releases by NVIDIA for Nouveau's use aren't in NVIDIA's
+ * standard format, and don't have the indirection seen in the 0x10de
+ * case.
+ */
+ switch (hdr->bin_magic) {
+ case 0x000010de:
+ loc = *(u32 *)(blob->data + hshdr->patch_loc);
+ sig = *(u32 *)(blob->data + hshdr->patch_sig);
+ break;
+ case 0x3b1d14f0:
+ loc = hshdr->patch_loc;
+ sig = hshdr->patch_sig;
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size, blob->data,
+ 1, hshdr->sig_prod_offset + sig,
+ 1, hshdr->sig_dbg_offset + sig);
+ if (ret)
+ goto done;
+
+ lhdr = nvfw_hs_load_header(subdev, blob->data + hshdr->hdr_offset);
+
+ fw->nmem_base_img = 0;
+ fw->nmem_base = lhdr->non_sec_code_off;
+ fw->nmem_size = lhdr->non_sec_code_size;
+
+ fw->imem_base_img = lhdr->apps[0];
+ fw->imem_base = ALIGN(lhdr->apps[0], 0x100);
+ fw->imem_size = lhdr->apps[lhdr->num_apps + 0];
+
+ fw->dmem_base_img = lhdr->data_dma_base;
+ fw->dmem_base = 0;
+ fw->dmem_size = lhdr->data_size;
+ fw->dmem_sign = loc - lhdr->data_dma_base;
+
+ if (bl) {
+ nvkm_firmware_put(blob);
+
+ ret = nvkm_firmware_load_name(subdev, bl, "", ver, &blob);
+ if (ret)
+ return ret;
+
+ hdr = nvfw_bin_hdr(subdev, blob->data);
+ desc = nvfw_bl_desc(subdev, blob->data + hdr->header_offset);
+
+ fw->boot_addr = desc->start_tag << 8;
+ fw->boot_size = desc->code_size;
+ fw->boot = kmemdup(blob->data + hdr->data_offset + desc->code_off,
+ fw->boot_size, GFP_KERNEL);
+ if (!fw->boot)
+ ret = -ENOMEM;
+ } else {
+ fw->boot_addr = fw->nmem_base;
+ }
+
+done:
+ if (ret)
+ nvkm_falcon_fw_dtor(fw);
+
+ nvkm_firmware_put(blob);
+ return ret;
+}
+
+int
+nvkm_falcon_fw_ctor_hs_v2(const struct nvkm_falcon_fw_func *func, const char *name,
+ struct nvkm_subdev *subdev, const char *img, int ver,
+ struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
+{
+ const struct nvfw_bin_hdr *hdr;
+ const struct nvfw_hs_header_v2 *hshdr;
+ const struct nvfw_hs_load_header_v2 *lhdr;
+ const struct firmware *blob;
+ u32 loc, sig, cnt, *meta;
+ int ret;
+
+ ret = nvkm_firmware_load_name(subdev, img, "", ver, &blob);
+ if (ret)
+ return ret;
+
+ hdr = nvfw_bin_hdr(subdev, blob->data);
+ hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset);
+ meta = (u32 *)(blob->data + hshdr->meta_data_offset);
+ loc = *(u32 *)(blob->data + hshdr->patch_loc);
+ sig = *(u32 *)(blob->data + hshdr->patch_sig);
+ cnt = *(u32 *)(blob->data + hshdr->num_sig);
+
+ ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true,
+ blob->data + hdr->data_offset, hdr->data_size, falcon, fw);
+ if (ret)
+ goto done;
+
+ ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data,
+ cnt, hshdr->sig_prod_offset + sig, 0, 0);
+ if (ret)
+ goto done;
+
+ lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset);
+
+ fw->imem_base_img = lhdr->app[0].offset;
+ fw->imem_base = 0;
+ fw->imem_size = lhdr->app[0].size;
+
+ fw->dmem_base_img = lhdr->os_data_offset;
+ fw->dmem_base = 0;
+ fw->dmem_size = lhdr->os_data_size;
+ fw->dmem_sign = loc - lhdr->os_data_offset;
+
+ fw->boot_addr = lhdr->app[0].offset;
+
+ fw->fuse_ver = meta[0];
+ fw->engine_id = meta[1];
+ fw->ucode_id = meta[2];
+
+done:
+ if (ret)
+ nvkm_falcon_fw_dtor(fw);
+
+ nvkm_firmware_put(blob);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c b/drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c
new file mode 100644
index 0000000000..49fd329439
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+int
+ga100_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *src_base_src)
+{
+ struct nvkm_falcon *falcon = fw->falcon;
+ struct nvkm_device *device = falcon->owner->device;
+ u32 reg_fuse_version;
+ int idx;
+
+ FLCN_DBG(falcon, "brom: %08x %08x", fw->engine_id, fw->ucode_id);
+ FLCN_DBG(falcon, "fuse_version: %d", fw->fuse_ver);
+
+ if (fw->engine_id & 0x00000001) {
+ reg_fuse_version = nvkm_rd32(device, 0x824140 + (fw->ucode_id - 1) * 4);
+ } else
+ if (fw->engine_id & 0x00000004) {
+ reg_fuse_version = nvkm_rd32(device, 0x824100 + (fw->ucode_id - 1) * 4);
+ } else
+ if (fw->engine_id & 0x00000400) {
+ reg_fuse_version = nvkm_rd32(device, 0x8241c0 + (fw->ucode_id - 1) * 4);
+ } else {
+ WARN_ON(1);
+ return -ENOSYS;
+ }
+
+ FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version);
+ if (reg_fuse_version) {
+ reg_fuse_version = fls(reg_fuse_version);
+ FLCN_DBG(falcon, "reg_fuse_version: %d", reg_fuse_version);
+
+ if (WARN_ON(fw->fuse_ver < reg_fuse_version))
+ return -EINVAL;
+
+ idx = fw->fuse_ver - reg_fuse_version;
+ } else {
+ idx = fw->sig_nr - 1;
+ }
+
+ return idx;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c b/drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c
new file mode 100644
index 0000000000..0ff450fe35
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2022 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+
+static bool
+ga102_flcn_dma_done(struct nvkm_falcon *falcon)
+{
+ return !!(nvkm_falcon_rd32(falcon, 0x118) & 0x00000002);
+}
+
+static void
+ga102_flcn_dma_xfer(struct nvkm_falcon *falcon, u32 mem_base, u32 dma_base, u32 cmd)
+{
+ nvkm_falcon_wr32(falcon, 0x114, mem_base);
+ nvkm_falcon_wr32(falcon, 0x11c, dma_base);
+ nvkm_falcon_wr32(falcon, 0x118, cmd);
+}
+
+static int
+ga102_flcn_dma_init(struct nvkm_falcon *falcon, u64 dma_addr, int xfer_len,
+ enum nvkm_falcon_mem mem_type, bool sec, u32 *cmd)
+{
+ *cmd = (ilog2(xfer_len) - 2) << 8;
+ if (mem_type == IMEM)
+ *cmd |= 0x00000010;
+ if (sec)
+ *cmd |= 0x00000004;
+
+ nvkm_falcon_wr32(falcon, 0x110, dma_addr >> 8);
+ nvkm_falcon_wr32(falcon, 0x128, 0x00000000);
+ return 0;
+}
+
+const struct nvkm_falcon_func_dma
+ga102_flcn_dma = {
+ .init = ga102_flcn_dma_init,
+ .xfer = ga102_flcn_dma_xfer,
+ .done = ga102_flcn_dma_done,
+};
+
+int
+ga102_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *falcon)
+{
+ nvkm_falcon_mask(falcon, 0x040, 0x00000000, 0x00000000);
+
+ if (nvkm_msec(falcon->owner->device, 20,
+ if (!(nvkm_falcon_rd32(falcon, 0x0f4) & 0x00001000))
+ break;
+ ) < 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int
+ga102_flcn_reset_prep(struct nvkm_falcon *falcon)
+{
+ nvkm_falcon_rd32(falcon, 0x0f4);
+
+ nvkm_usec(falcon->owner->device, 150,
+ if (nvkm_falcon_rd32(falcon, 0x0f4) & 0x80000000)
+ break;
+ _warn = false;
+ );
+
+ return 0;
+}
+
+int
+ga102_flcn_select(struct nvkm_falcon *falcon)
+{
+ if ((nvkm_falcon_rd32(falcon, falcon->addr2 + 0x668) & 0x00000010) != 0x00000000) {
+ nvkm_falcon_wr32(falcon, falcon->addr2 + 0x668, 0x00000000);
+ if (nvkm_msec(falcon->owner->device, 10,
+ if (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x668) & 0x00000001)
+ break;
+ ) < 0)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int
+ga102_flcn_fw_boot(struct nvkm_falcon_fw *fw, u32 *mbox0, u32 *mbox1, u32 mbox0_ok, u32 irqsclr)
+{
+ struct nvkm_falcon *falcon = fw->falcon;
+
+ nvkm_falcon_wr32(falcon, falcon->addr2 + 0x210, fw->dmem_sign);
+ nvkm_falcon_wr32(falcon, falcon->addr2 + 0x19c, fw->engine_id);
+ nvkm_falcon_wr32(falcon, falcon->addr2 + 0x198, fw->ucode_id);
+ nvkm_falcon_wr32(falcon, falcon->addr2 + 0x180, 0x00000001);
+
+ return gm200_flcn_fw_boot(fw, mbox0, mbox1, mbox0_ok, irqsclr);
+}
+
+int
+ga102_flcn_fw_load(struct nvkm_falcon_fw *fw)
+{
+ struct nvkm_falcon *falcon = fw->falcon;
+ int ret = 0;
+
+ nvkm_falcon_mask(falcon, 0x624, 0x00000080, 0x00000080);
+ nvkm_falcon_wr32(falcon, 0x10c, 0x00000000);
+ nvkm_falcon_mask(falcon, 0x600, 0x00010007, (0 << 16) | (1 << 2) | 1);
+
+ ret = nvkm_falcon_dma_wr(falcon, fw->fw.img, fw->fw.phys, fw->imem_base_img,
+ IMEM, fw->imem_base, fw->imem_size, true);
+ if (ret)
+ return ret;
+
+ ret = nvkm_falcon_dma_wr(falcon, fw->fw.img, fw->fw.phys, fw->dmem_base_img,
+ DMEM, fw->dmem_base, fw->dmem_size, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+const struct nvkm_falcon_fw_func
+ga102_flcn_fw = {
+ .signature = ga100_flcn_fw_signature,
+ .reset = gm200_flcn_fw_reset,
+ .load = ga102_flcn_fw_load,
+ .boot = ga102_flcn_fw_boot,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
new file mode 100644
index 0000000000..b7da3ab44c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright 2022 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/memory.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+
+void
+gm200_flcn_tracepc(struct nvkm_falcon *falcon)
+{
+ u32 sctl = nvkm_falcon_rd32(falcon, 0x240);
+ u32 tidx = nvkm_falcon_rd32(falcon, 0x148);
+ int nr = (tidx & 0x00ff0000) >> 16, sp, ip;
+
+ FLCN_ERR(falcon, "TRACEPC SCTL %08x TIDX %08x", sctl, tidx);
+ for (sp = 0; sp < nr; sp++) {
+ nvkm_falcon_wr32(falcon, 0x148, sp);
+ ip = nvkm_falcon_rd32(falcon, 0x14c);
+ FLCN_ERR(falcon, "TRACEPC: %08x", ip);
+ }
+}
+
+static void
+gm200_flcn_pio_dmem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len)
+{
+ while (len >= 4) {
+ *(u32 *)img = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
+ img += 4;
+ len -= 4;
+ }
+
+ /* Sigh. Tegra PMU FW's init message... */
+ if (len) {
+ u32 data = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
+
+ while (len--) {
+ *(u8 *)img++ = data & 0xff;
+ data >>= 8;
+ }
+ }
+}
+
+static void
+gm200_flcn_pio_dmem_rd_init(struct nvkm_falcon *falcon, u8 port, u32 dmem_base)
+{
+ nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), BIT(25) | dmem_base);
+}
+
+static void
+gm200_flcn_pio_dmem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
+{
+ while (len >= 4) {
+ nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), *(u32 *)img);
+ img += 4;
+ len -= 4;
+ }
+
+ WARN_ON(len);
+}
+
+static void
+gm200_flcn_pio_dmem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 dmem_base)
+{
+ nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), BIT(24) | dmem_base);
+}
+
+const struct nvkm_falcon_func_pio
+gm200_flcn_dmem_pio = {
+ .min = 1,
+ .max = 0x100,
+ .wr_init = gm200_flcn_pio_dmem_wr_init,
+ .wr = gm200_flcn_pio_dmem_wr,
+ .rd_init = gm200_flcn_pio_dmem_rd_init,
+ .rd = gm200_flcn_pio_dmem_rd,
+};
+
+static void
+gm200_flcn_pio_imem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 imem_base)
+{
+ nvkm_falcon_wr32(falcon, 0x180 + (port * 0x10), (sec ? BIT(28) : 0) | BIT(24) | imem_base);
+}
+
+static void
+gm200_flcn_pio_imem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
+{
+ nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag++);
+ while (len >= 4) {
+ nvkm_falcon_wr32(falcon, 0x184 + (port * 0x10), *(u32 *)img);
+ img += 4;
+ len -= 4;
+ }
+}
+
+const struct nvkm_falcon_func_pio
+gm200_flcn_imem_pio = {
+ .min = 0x100,
+ .max = 0x100,
+ .wr_init = gm200_flcn_pio_imem_wr_init,
+ .wr = gm200_flcn_pio_imem_wr,
+};
+
+int
+gm200_flcn_bind_stat(struct nvkm_falcon *falcon, bool intr)
+{
+ if (intr && !(nvkm_falcon_rd32(falcon, 0x008) & 0x00000008))
+ return -1;
+
+ return (nvkm_falcon_rd32(falcon, 0x0dc) & 0x00007000) >> 12;
+}
+
+void
+gm200_flcn_bind_inst(struct nvkm_falcon *falcon, int target, u64 addr)
+{
+ nvkm_falcon_mask(falcon, 0x604, 0x00000007, 0x00000000); /* DMAIDX_VIRT */
+ nvkm_falcon_wr32(falcon, 0x054, (1 << 30) | (target << 28) | (addr >> 12));
+ nvkm_falcon_mask(falcon, 0x090, 0x00010000, 0x00010000);
+ nvkm_falcon_mask(falcon, 0x0a4, 0x00000008, 0x00000008);
+}
+
+int
+gm200_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *falcon)
+{
+ nvkm_falcon_mask(falcon, 0x040, 0x00000000, 0x00000000);
+
+ if (nvkm_msec(falcon->owner->device, 10,
+ if (!(nvkm_falcon_rd32(falcon, 0x10c) & 0x00000006))
+ break;
+ ) < 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int
+gm200_flcn_enable(struct nvkm_falcon *falcon)
+{
+ struct nvkm_device *device = falcon->owner->device;
+ int ret;
+
+ if (falcon->func->reset_eng) {
+ ret = falcon->func->reset_eng(falcon);
+ if (ret)
+ return ret;
+ }
+
+ if (falcon->func->select) {
+ ret = falcon->func->select(falcon);
+ if (ret)
+ return ret;
+ }
+
+ if (falcon->func->reset_pmc)
+ nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst);
+
+ ret = falcon->func->reset_wait_mem_scrubbing(falcon);
+ if (ret)
+ return ret;
+
+ nvkm_falcon_wr32(falcon, 0x084, nvkm_rd32(device, 0x000000));
+ return 0;
+}
+
+int
+gm200_flcn_disable(struct nvkm_falcon *falcon)
+{
+ struct nvkm_device *device = falcon->owner->device;
+ int ret;
+
+ if (falcon->func->select) {
+ ret = falcon->func->select(falcon);
+ if (ret)
+ return ret;
+ }
+
+ nvkm_falcon_mask(falcon, 0x048, 0x00000003, 0x00000000);
+ nvkm_falcon_wr32(falcon, 0x014, 0xffffffff);
+
+ if (falcon->func->reset_pmc) {
+ if (falcon->func->reset_prep) {
+ ret = falcon->func->reset_prep(falcon);
+ if (ret)
+ return ret;
+ }
+
+ nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
+ }
+
+ if (falcon->func->reset_eng) {
+ ret = falcon->func->reset_eng(falcon);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+gm200_flcn_fw_boot(struct nvkm_falcon_fw *fw, u32 *pmbox0, u32 *pmbox1, u32 mbox0_ok, u32 irqsclr)
+{
+ struct nvkm_falcon *falcon = fw->falcon;
+ u32 mbox0, mbox1;
+ int ret = 0;
+
+ nvkm_falcon_wr32(falcon, 0x040, pmbox0 ? *pmbox0 : 0xcafebeef);
+ if (pmbox1)
+ nvkm_falcon_wr32(falcon, 0x044, *pmbox1);
+
+ nvkm_falcon_wr32(falcon, 0x104, fw->boot_addr);
+ nvkm_falcon_wr32(falcon, 0x100, 0x00000002);
+
+ if (nvkm_msec(falcon->owner->device, 2000,
+ if (nvkm_falcon_rd32(falcon, 0x100) & 0x00000010)
+ break;
+ ) < 0)
+ ret = -ETIMEDOUT;
+
+ mbox0 = nvkm_falcon_rd32(falcon, 0x040);
+ mbox1 = nvkm_falcon_rd32(falcon, 0x044);
+ if (FLCN_ERRON(falcon, ret || mbox0 != mbox0_ok, "mbox %08x %08x", mbox0, mbox1))
+ ret = ret ?: -EIO;
+
+ if (irqsclr)
+ nvkm_falcon_mask(falcon, 0x004, 0xffffffff, irqsclr);
+
+ return ret;
+}
+
+int
+gm200_flcn_fw_load(struct nvkm_falcon_fw *fw)
+{
+ struct nvkm_falcon *falcon = fw->falcon;
+ int target, ret;
+
+ if (fw->inst) {
+ nvkm_falcon_mask(falcon, 0x048, 0x00000001, 0x00000001);
+
+ switch (nvkm_memory_target(fw->inst)) {
+ case NVKM_MEM_TARGET_VRAM: target = 0; break;
+ case NVKM_MEM_TARGET_HOST: target = 2; break;
+ case NVKM_MEM_TARGET_NCOH: target = 3; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ falcon->func->bind_inst(falcon, target, nvkm_memory_addr(fw->inst));
+
+ if (nvkm_msec(falcon->owner->device, 10,
+ if (falcon->func->bind_stat(falcon, falcon->func->bind_intr) == 5)
+ break;
+ ) < 0)
+ return -ETIMEDOUT;
+
+ nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
+ nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
+
+ if (nvkm_msec(falcon->owner->device, 10,
+ if (falcon->func->bind_stat(falcon, false) == 0)
+ break;
+ ) < 0)
+ return -ETIMEDOUT;
+ } else {
+ nvkm_falcon_mask(falcon, 0x624, 0x00000080, 0x00000080);
+ nvkm_falcon_wr32(falcon, 0x10c, 0x00000000);
+ }
+
+ if (fw->boot) {
+ switch (nvkm_memory_target(&fw->fw.mem.memory)) {
+ case NVKM_MEM_TARGET_VRAM: target = 4; break;
+ case NVKM_MEM_TARGET_HOST: target = 5; break;
+ case NVKM_MEM_TARGET_NCOH: target = 6; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ret = nvkm_falcon_pio_wr(falcon, fw->boot, 0, 0,
+ IMEM, falcon->code.limit - fw->boot_size, fw->boot_size,
+ fw->boot_addr >> 8, false);
+ if (ret)
+ return ret;
+
+ return fw->func->load_bld(fw);
+ }
+
+ ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->nmem_base_img, fw->nmem_base_img, 0,
+ IMEM, fw->nmem_base, fw->nmem_size, fw->nmem_base >> 8, false);
+ if (ret)
+ return ret;
+
+ ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->imem_base_img, fw->imem_base_img, 0,
+ IMEM, fw->imem_base, fw->imem_size, fw->imem_base >> 8, true);
+ if (ret)
+ return ret;
+
+ ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->dmem_base_img, fw->dmem_base_img, 0,
+ DMEM, fw->dmem_base, fw->dmem_size, 0, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+gm200_flcn_fw_reset(struct nvkm_falcon_fw *fw)
+{
+ return nvkm_falcon_reset(fw->falcon);
+}
+
+int
+gm200_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *sig_base_src)
+{
+ struct nvkm_falcon *falcon = fw->falcon;
+ u32 addr = falcon->func->debug;
+ int ret = 0;
+
+ if (addr) {
+ ret = nvkm_falcon_enable(falcon);
+ if (ret)
+ return ret;
+
+ if (nvkm_falcon_rd32(falcon, addr) & 0x00100000) {
+ *sig_base_src = fw->sig_base_dbg;
+ return 1;
+ }
+ }
+
+ return ret;
+}
+
+const struct nvkm_falcon_fw_func
+gm200_flcn_fw = {
+ .signature = gm200_flcn_fw_signature,
+ .reset = gm200_flcn_fw_reset,
+ .load = gm200_flcn_fw_load,
+ .boot = gm200_flcn_fw_boot,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/gp102.c b/drivers/gpu/drm/nouveau/nvkm/falcon/gp102.c
new file mode 100644
index 0000000000..c774935f30
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/gp102.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2022 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+gp102_flcn_pio_emem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len)
+{
+ while (len >= 4) {
+ *(u32 *)img = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
+ img += 4;
+ len -= 4;
+ }
+}
+
+static void
+gp102_flcn_pio_emem_rd_init(struct nvkm_falcon *falcon, u8 port, u32 dmem_base)
+{
+ nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), BIT(25) | dmem_base);
+}
+
+static void
+gp102_flcn_pio_emem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
+{
+ while (len >= 4) {
+ nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), *(u32 *)img);
+ img += 4;
+ len -= 4;
+ }
+}
+
+static void
+gp102_flcn_pio_emem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 emem_base)
+{
+ nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), BIT(24) | emem_base);
+}
+
+const struct nvkm_falcon_func_pio
+gp102_flcn_emem_pio = {
+ .min = 4,
+ .max = 0x100,
+ .wr_init = gp102_flcn_pio_emem_wr_init,
+ .wr = gp102_flcn_pio_emem_wr,
+ .rd_init = gp102_flcn_pio_emem_rd_init,
+ .rd = gp102_flcn_pio_emem_rd,
+};
+
+int
+gp102_flcn_reset_eng(struct nvkm_falcon *falcon)
+{
+ int ret;
+
+ if (falcon->func->reset_prep) {
+ ret = falcon->func->reset_prep(falcon);
+ if (ret)
+ return ret;
+ }
+
+ nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000001);
+ udelay(10);
+ nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000000);
+
+ return falcon->func->reset_wait_mem_scrubbing(falcon);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
new file mode 100644
index 0000000000..16b246fda6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+static void
+nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq)
+{
+ spin_lock(&msgq->lock);
+ msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
+}
+
+static void
+nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+
+ if (commit)
+ nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position);
+
+ spin_unlock(&msgq->lock);
+}
+
+bool
+nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq)
+{
+ u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg);
+ u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
+ return head == tail;
+}
+
+static int
+nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+ u32 head, tail, available;
+
+ head = nvkm_falcon_rd32(falcon, msgq->head_reg);
+ /* has the buffer looped? */
+ if (head < msgq->position)
+ msgq->position = msgq->offset;
+
+ tail = msgq->position;
+
+ available = head - tail;
+ if (size > available) {
+ FLCNQ_ERR(msgq, "requested %d bytes, but only %d available",
+ size, available);
+ return -EINVAL;
+ }
+
+ nvkm_falcon_pio_rd(falcon, 0, DMEM, tail, data, 0, size);
+ msgq->position += ALIGN(size, QUEUE_ALIGNMENT);
+ return 0;
+}
+
+static int
+nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
+{
+ int ret = 0;
+
+ nvkm_falcon_msgq_open(msgq);
+
+ if (nvkm_falcon_msgq_empty(msgq))
+ goto close;
+
+ ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE);
+ if (ret) {
+ FLCNQ_ERR(msgq, "failed to read message header");
+ goto close;
+ }
+
+ if (hdr->size > MSG_BUF_SIZE) {
+ FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size);
+ ret = -ENOSPC;
+ goto close;
+ }
+
+ if (hdr->size > HDR_SIZE) {
+ u32 read_size = hdr->size - HDR_SIZE;
+
+ ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size);
+ if (ret) {
+ FLCNQ_ERR(msgq, "failed to read message data");
+ goto close;
+ }
+ }
+
+ ret = 1;
+close:
+ nvkm_falcon_msgq_close(msgq, (ret >= 0));
+ return ret;
+}
+
+static int
+nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
+{
+ struct nvkm_falcon_qmgr_seq *seq;
+
+ seq = &msgq->qmgr->seq.id[hdr->seq_id];
+ if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
+ FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id);
+ return -EINVAL;
+ }
+
+ if (seq->state == SEQ_STATE_USED) {
+ if (seq->callback)
+ seq->result = seq->callback(seq->priv, hdr);
+ }
+
+ if (seq->async) {
+ nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq);
+ return 0;
+ }
+
+ complete_all(&seq->done);
+ return 0;
+}
+
+void
+nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq)
+{
+ /*
+ * We are invoked from a worker thread, so normally we have plenty of
+ * stack space to work with.
+ */
+ u8 msg_buffer[MSG_BUF_SIZE];
+ struct nvfw_falcon_msg *hdr = (void *)msg_buffer;
+
+ while (nvkm_falcon_msgq_read(msgq, hdr) > 0)
+ nvkm_falcon_msgq_exec(msgq, hdr);
+}
+
+int
+nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq,
+ void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+ struct nvfw_falcon_msg *hdr = data;
+ int ret;
+
+ msgq->head_reg = falcon->func->msgq.head;
+ msgq->tail_reg = falcon->func->msgq.tail;
+ msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail);
+
+ nvkm_falcon_msgq_open(msgq);
+ ret = nvkm_falcon_msgq_pop(msgq, data, size);
+ if (ret == 0 && hdr->size != size) {
+ FLCN_ERR(falcon, "unexpected init message size %d vs %d",
+ hdr->size, size);
+ ret = -EINVAL;
+ }
+ nvkm_falcon_msgq_close(msgq, ret == 0);
+ return ret;
+}
+
+void
+nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq,
+ u32 index, u32 offset, u32 size)
+{
+ const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func;
+
+ msgq->head_reg = func->msgq.head + index * func->msgq.stride;
+ msgq->tail_reg = func->msgq.tail + index * func->msgq.stride;
+ msgq->offset = offset;
+
+ FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x",
+ index, msgq->offset, size);
+}
+
+void
+nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq)
+{
+ struct nvkm_falcon_msgq *msgq = *pmsgq;
+ if (msgq) {
+ kfree(*pmsgq);
+ *pmsgq = NULL;
+ }
+}
+
+int
+nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
+ struct nvkm_falcon_msgq **pmsgq)
+{
+ struct nvkm_falcon_msgq *msgq = *pmsgq;
+
+ if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL)))
+ return -ENOMEM;
+
+ msgq->qmgr = qmgr;
+ msgq->name = name;
+ spin_lock_init(&msgq->lock);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
new file mode 100644
index 0000000000..11a24b9c85
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_FALCON_PRIV_H__
+#define __NVKM_FALCON_PRIV_H__
+#include <core/falcon.h>
+
+static inline int
+nvkm_falcon_enable(struct nvkm_falcon *falcon)
+{
+ if (falcon->func->enable)
+ return falcon->func->enable(falcon);
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
new file mode 100644
index 0000000000..a453de341a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+struct nvkm_falcon_qmgr_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr)
+{
+ const struct nvkm_subdev *subdev = qmgr->falcon->owner;
+ struct nvkm_falcon_qmgr_seq *seq;
+ u32 index;
+
+ mutex_lock(&qmgr->seq.mutex);
+ index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM);
+ if (index >= NVKM_FALCON_QMGR_SEQ_NUM) {
+ nvkm_error(subdev, "no free sequence available\n");
+ mutex_unlock(&qmgr->seq.mutex);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ set_bit(index, qmgr->seq.tbl);
+ mutex_unlock(&qmgr->seq.mutex);
+
+ seq = &qmgr->seq.id[index];
+ seq->state = SEQ_STATE_PENDING;
+ return seq;
+}
+
+void
+nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr,
+ struct nvkm_falcon_qmgr_seq *seq)
+{
+ /* no need to acquire seq.mutex since clear_bit is atomic */
+ seq->state = SEQ_STATE_FREE;
+ seq->callback = NULL;
+ reinit_completion(&seq->done);
+ clear_bit(seq->id, qmgr->seq.tbl);
+}
+
+void
+nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **pqmgr)
+{
+ struct nvkm_falcon_qmgr *qmgr = *pqmgr;
+ if (qmgr) {
+ kfree(*pqmgr);
+ *pqmgr = NULL;
+ }
+}
+
+int
+nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon,
+ struct nvkm_falcon_qmgr **pqmgr)
+{
+ struct nvkm_falcon_qmgr *qmgr;
+ int i;
+
+ if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL)))
+ return -ENOMEM;
+
+ qmgr->falcon = falcon;
+ mutex_init(&qmgr->seq.mutex);
+ for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) {
+ qmgr->seq.id[i].id = i;
+ init_completion(&qmgr->seq.id[i].done);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
new file mode 100644
index 0000000000..79f0da9e74
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_FALCON_QMGR_H__
+#define __NVKM_FALCON_QMGR_H__
+#include <core/falcon.h>
+
+#define HDR_SIZE sizeof(struct nvfw_falcon_msg)
+#define QUEUE_ALIGNMENT 4
+/* max size of the messages we can receive */
+#define MSG_BUF_SIZE 128
+
+/**
+ * struct nvkm_falcon_qmgr_seq - keep track of ongoing commands
+ *
+ * Every time a command is sent, a sequence is assigned to it so the
+ * corresponding message can be matched. Upon receiving the message, a callback
+ * can be called and/or a completion signaled.
+ *
+ * @id: sequence ID
+ * @state: current state
+ * @callback: callback to call upon receiving matching message
+ * @completion: completion to signal after callback is called
+ */
+struct nvkm_falcon_qmgr_seq {
+ u16 id;
+ enum {
+ SEQ_STATE_FREE = 0,
+ SEQ_STATE_PENDING,
+ SEQ_STATE_USED,
+ SEQ_STATE_CANCELLED
+ } state;
+ bool async;
+ nvkm_falcon_qmgr_callback callback;
+ void *priv;
+ struct completion done;
+ int result;
+};
+
+/*
+ * We can have an arbitrary number of sequences, but realistically we will
+ * probably not use that much simultaneously.
+ */
+#define NVKM_FALCON_QMGR_SEQ_NUM 16
+
+struct nvkm_falcon_qmgr {
+ struct nvkm_falcon *falcon;
+
+ struct {
+ struct mutex mutex;
+ struct nvkm_falcon_qmgr_seq id[NVKM_FALCON_QMGR_SEQ_NUM];
+ unsigned long tbl[BITS_TO_LONGS(NVKM_FALCON_QMGR_SEQ_NUM)];
+ } seq;
+};
+
+struct nvkm_falcon_qmgr_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *);
+void nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *,
+ struct nvkm_falcon_qmgr_seq *);
+
+struct nvkm_falcon_cmdq {
+ struct nvkm_falcon_qmgr *qmgr;
+ const char *name;
+ struct mutex mutex;
+ struct completion ready;
+
+ u32 head_reg;
+ u32 tail_reg;
+ u32 offset;
+ u32 size;
+
+ u32 position;
+};
+
+struct nvkm_falcon_msgq {
+ struct nvkm_falcon_qmgr *qmgr;
+ const char *name;
+ spinlock_t lock;
+
+ u32 head_reg;
+ u32 tail_reg;
+ u32 offset;
+
+ u32 position;
+};
+
+#define FLCNQ_PRINTK(q,l,p,f,a...) FLCN_PRINTK((q)->qmgr->falcon, l, p, "%s: "f, (q)->name, ##a)
+#define FLCNQ_DBG(q,f,a...) FLCNQ_PRINTK((q), DEBUG, info, f, ##a)
+#define FLCNQ_ERR(q,f,a...) FLCNQ_PRINTK((q), ERROR, err, f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
new file mode 100644
index 0000000000..dd2ddc54ac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/gpuobj.h>
+#include <core/memory.h>
+#include <subdev/timer.h>
+
+void
+nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
+ u32 size, u16 tag, u8 port, bool secure)
+{
+ u8 rem = size % 4;
+ u32 reg;
+ int i;
+
+ size -= rem;
+
+ reg = start | BIT(24) | (secure ? BIT(28) : 0);
+ nvkm_falcon_wr32(falcon, 0x180 + (port * 16), reg);
+ for (i = 0; i < size / 4; i++) {
+ /* write new tag every 256B */
+ if ((i & 0x3f) == 0)
+ nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
+ nvkm_falcon_wr32(falcon, 0x184 + (port * 16), ((u32 *)data)[i]);
+ }
+
+ /*
+ * If size is not a multiple of 4, mask the last work to ensure garbage
+ * does not get written
+ */
+ if (rem) {
+ u32 extra = ((u32 *)data)[i];
+
+ /* write new tag every 256B */
+ if ((i & 0x3f) == 0)
+ nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
+ nvkm_falcon_wr32(falcon, 0x184 + (port * 16),
+ extra & (BIT(rem * 8) - 1));
+ ++i;
+ }
+
+ /* code must be padded to 0x40 words */
+ for (; i & 0x3f; i++)
+ nvkm_falcon_wr32(falcon, 0x184 + (port * 16), 0);
+}
+
+void
+nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
+ u32 size, u8 port)
+{
+ u8 rem = size % 4;
+ int i;
+
+ size -= rem;
+
+ nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 24));
+ for (i = 0; i < size / 4; i++)
+ nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), ((u32 *)data)[i]);
+
+ /*
+ * If size is not a multiple of 4, mask the last word to ensure garbage
+ * does not get written
+ */
+ if (rem) {
+ u32 extra = ((u32 *)data)[i];
+
+ nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8),
+ extra & (BIT(rem * 8) - 1));
+ }
+}
+
+void
+nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
+{
+ u32 reg = nvkm_falcon_rd32(falcon, 0x100);
+
+ if (reg & BIT(6))
+ nvkm_falcon_wr32(falcon, 0x130, 0x2);
+ else
+ nvkm_falcon_wr32(falcon, 0x100, 0x2);
+}