summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nvkm/falcon
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/gpu/drm/nouveau/nvkm/falcon
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/falcon')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c223
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c214
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/v1.c311
8 files changed, 1148 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
new file mode 100644
index 000000000..d79d78390
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/falcon/base.o
+nvkm-y += nvkm/falcon/cmdq.o
+nvkm-y += nvkm/falcon/msgq.o
+nvkm-y += nvkm/falcon/qmgr.o
+nvkm-y += nvkm/falcon/v1.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
new file mode 100644
index 000000000..f3f90c106
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/mc.h>
+#include <subdev/top.h>
+
+void
+nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
+ u32 size, u16 tag, u8 port, bool secure)
+{
+ if (secure && !falcon->secret) {
+ nvkm_warn(falcon->user,
+ "writing with secure tag on a non-secure falcon!\n");
+ return;
+ }
+
+ falcon->func->load_imem(falcon, data, start, size, tag, port,
+ secure);
+}
+
+void
+nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
+ u32 size, u8 port)
+{
+ mutex_lock(&falcon->dmem_mutex);
+
+ falcon->func->load_dmem(falcon, data, start, size, port);
+
+ mutex_unlock(&falcon->dmem_mutex);
+}
+
+void
+nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
+ void *data)
+{
+ mutex_lock(&falcon->dmem_mutex);
+
+ falcon->func->read_dmem(falcon, start, size, port, data);
+
+ mutex_unlock(&falcon->dmem_mutex);
+}
+
+void
+nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *inst)
+{
+ if (!falcon->func->bind_context) {
+ nvkm_error(falcon->user,
+ "Context binding not supported on this falcon!\n");
+ return;
+ }
+
+ falcon->func->bind_context(falcon, inst);
+}
+
+void
+nvkm_falcon_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
+{
+ falcon->func->set_start_addr(falcon, start_addr);
+}
+
+void
+nvkm_falcon_start(struct nvkm_falcon *falcon)
+{
+ falcon->func->start(falcon);
+}
+
+int
+nvkm_falcon_enable(struct nvkm_falcon *falcon)
+{
+ struct nvkm_device *device = falcon->owner->device;
+ int ret;
+
+ nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst);
+ ret = falcon->func->enable(falcon);
+ if (ret) {
+ nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nvkm_falcon_disable(struct nvkm_falcon *falcon)
+{
+ struct nvkm_device *device = falcon->owner->device;
+
+ /* already disabled, return or wait_idle will timeout */
+ if (!nvkm_mc_enabled(device, falcon->owner->type, falcon->owner->inst))
+ return;
+
+ falcon->func->disable(falcon);
+
+ nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
+}
+
+int
+nvkm_falcon_reset(struct nvkm_falcon *falcon)
+{
+ if (!falcon->func->reset) {
+ nvkm_falcon_disable(falcon);
+ return nvkm_falcon_enable(falcon);
+ }
+
+ return falcon->func->reset(falcon);
+}
+
+int
+nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
+{
+ return falcon->func->wait_for_halt(falcon, ms);
+}
+
+int
+nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
+{
+ return falcon->func->clear_interrupt(falcon, mask);
+}
+
+static int
+nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
+{
+ const struct nvkm_falcon_func *func = falcon->func;
+ const struct nvkm_subdev *subdev = falcon->owner;
+ u32 reg;
+
+ if (!falcon->addr) {
+ falcon->addr = nvkm_top_addr(subdev->device, subdev->type, subdev->inst);
+ if (WARN_ON(!falcon->addr))
+ return -ENODEV;
+ }
+
+ reg = nvkm_falcon_rd32(falcon, 0x12c);
+ falcon->version = reg & 0xf;
+ falcon->secret = (reg >> 4) & 0x3;
+ falcon->code.ports = (reg >> 8) & 0xf;
+ falcon->data.ports = (reg >> 12) & 0xf;
+
+ reg = nvkm_falcon_rd32(falcon, 0x108);
+ falcon->code.limit = (reg & 0x1ff) << 8;
+ falcon->data.limit = (reg & 0x3fe00) >> 1;
+
+ if (func->debug) {
+ u32 val = nvkm_falcon_rd32(falcon, func->debug);
+ falcon->debug = (val >> 20) & 0x1;
+ }
+
+ return 0;
+}
+
+void
+nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
+{
+ if (unlikely(!falcon))
+ return;
+
+ mutex_lock(&falcon->mutex);
+ if (falcon->user == user) {
+ nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
+ falcon->user = NULL;
+ }
+ mutex_unlock(&falcon->mutex);
+}
+
+int
+nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
+{
+ int ret = 0;
+
+ mutex_lock(&falcon->mutex);
+ if (falcon->user) {
+ nvkm_error(user, "%s falcon already acquired by %s!\n",
+ falcon->name, falcon->user->name);
+ mutex_unlock(&falcon->mutex);
+ return -EBUSY;
+ }
+
+ nvkm_debug(user, "acquired %s falcon\n", falcon->name);
+ if (!falcon->oneinit)
+ ret = nvkm_falcon_oneinit(falcon);
+ falcon->user = user;
+ mutex_unlock(&falcon->mutex);
+ return ret;
+}
+
+void
+nvkm_falcon_dtor(struct nvkm_falcon *falcon)
+{
+}
+
+int
+nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
+ struct nvkm_subdev *subdev, const char *name, u32 addr,
+ struct nvkm_falcon *falcon)
+{
+ falcon->func = func;
+ falcon->owner = subdev;
+ falcon->name = name;
+ falcon->addr = addr;
+ mutex_init(&falcon->mutex);
+ mutex_init(&falcon->dmem_mutex);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
new file mode 100644
index 000000000..44cf6a886
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+static bool
+nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind)
+{
+ u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg);
+ u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg);
+ u32 free;
+
+ size = ALIGN(size, QUEUE_ALIGNMENT);
+
+ if (head >= tail) {
+ free = cmdq->offset + cmdq->size - head;
+ free -= HDR_SIZE;
+
+ if (size > free) {
+ *rewind = true;
+ head = cmdq->offset;
+ }
+ }
+
+ if (head < tail)
+ free = tail - head - 1;
+
+ return size <= free;
+}
+
+static void
+nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
+ nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0);
+ cmdq->position += ALIGN(size, QUEUE_ALIGNMENT);
+}
+
+static void
+nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq)
+{
+ struct nvfw_falcon_cmd cmd;
+
+ cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND;
+ cmd.size = sizeof(cmd);
+ nvkm_falcon_cmdq_push(cmdq, &cmd, cmd.size);
+
+ cmdq->position = cmdq->offset;
+}
+
+static int
+nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq *cmdq, u32 size)
+{
+ struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
+ bool rewind = false;
+
+ mutex_lock(&cmdq->mutex);
+
+ if (!nvkm_falcon_cmdq_has_room(cmdq, size, &rewind)) {
+ FLCNQ_DBG(cmdq, "queue full");
+ mutex_unlock(&cmdq->mutex);
+ return -EAGAIN;
+ }
+
+ cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg);
+
+ if (rewind)
+ nvkm_falcon_cmdq_rewind(cmdq);
+
+ return 0;
+}
+
+static void
+nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq)
+{
+ nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position);
+ mutex_unlock(&cmdq->mutex);
+}
+
+static int
+nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd)
+{
+ static unsigned timeout = 2000;
+ unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
+ int ret = -EAGAIN;
+
+ while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
+ ret = nvkm_falcon_cmdq_open(cmdq, cmd->size);
+ if (ret) {
+ FLCNQ_ERR(cmdq, "timeout waiting for queue space");
+ return ret;
+ }
+
+ nvkm_falcon_cmdq_push(cmdq, cmd, cmd->size);
+ nvkm_falcon_cmdq_close(cmdq);
+ return ret;
+}
+
+/* specifies that we want to know the command status in the answer message */
+#define CMD_FLAGS_STATUS BIT(0)
+/* specifies that we want an interrupt when the answer message is queued */
+#define CMD_FLAGS_INTR BIT(1)
+
+int
+nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd,
+ nvkm_falcon_qmgr_callback cb, void *priv,
+ unsigned long timeout)
+{
+ struct nvkm_falcon_qmgr_seq *seq;
+ int ret;
+
+ if (!wait_for_completion_timeout(&cmdq->ready,
+ msecs_to_jiffies(1000))) {
+ FLCNQ_ERR(cmdq, "timeout waiting for queue ready");
+ return -ETIMEDOUT;
+ }
+
+ seq = nvkm_falcon_qmgr_seq_acquire(cmdq->qmgr);
+ if (IS_ERR(seq))
+ return PTR_ERR(seq);
+
+ cmd->seq_id = seq->id;
+ cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
+
+ seq->state = SEQ_STATE_USED;
+ seq->async = !timeout;
+ seq->callback = cb;
+ seq->priv = priv;
+
+ ret = nvkm_falcon_cmdq_write(cmdq, cmd);
+ if (ret) {
+ seq->state = SEQ_STATE_PENDING;
+ nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
+ return ret;
+ }
+
+ if (!seq->async) {
+ if (!wait_for_completion_timeout(&seq->done, timeout)) {
+ FLCNQ_ERR(cmdq, "timeout waiting for reply");
+ return -ETIMEDOUT;
+ }
+ ret = seq->result;
+ nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
+ }
+
+ return ret;
+}
+
+void
+nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *cmdq)
+{
+ reinit_completion(&cmdq->ready);
+}
+
+void
+nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *cmdq,
+ u32 index, u32 offset, u32 size)
+{
+ const struct nvkm_falcon_func *func = cmdq->qmgr->falcon->func;
+
+ cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride;
+ cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride;
+ cmdq->offset = offset;
+ cmdq->size = size;
+ complete_all(&cmdq->ready);
+
+ FLCNQ_DBG(cmdq, "initialised @ index %d offset 0x%08x size 0x%08x",
+ index, cmdq->offset, cmdq->size);
+}
+
+void
+nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **pcmdq)
+{
+ struct nvkm_falcon_cmdq *cmdq = *pcmdq;
+ if (cmdq) {
+ kfree(*pcmdq);
+ *pcmdq = NULL;
+ }
+}
+
+int
+nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
+ struct nvkm_falcon_cmdq **pcmdq)
+{
+ struct nvkm_falcon_cmdq *cmdq = *pcmdq;
+
+ if (!(cmdq = *pcmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL)))
+ return -ENOMEM;
+
+ cmdq->qmgr = qmgr;
+ cmdq->name = name;
+ mutex_init(&cmdq->mutex);
+ init_completion(&cmdq->ready);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
new file mode 100644
index 000000000..e74371dff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+static void
+nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq)
+{
+ mutex_lock(&msgq->mutex);
+ msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
+}
+
+static void
+nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+
+ if (commit)
+ nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position);
+
+ mutex_unlock(&msgq->mutex);
+}
+
+static bool
+nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq)
+{
+ u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg);
+ u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
+ return head == tail;
+}
+
+static int
+nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+ u32 head, tail, available;
+
+ head = nvkm_falcon_rd32(falcon, msgq->head_reg);
+ /* has the buffer looped? */
+ if (head < msgq->position)
+ msgq->position = msgq->offset;
+
+ tail = msgq->position;
+
+ available = head - tail;
+ if (size > available) {
+ FLCNQ_ERR(msgq, "requested %d bytes, but only %d available",
+ size, available);
+ return -EINVAL;
+ }
+
+ nvkm_falcon_read_dmem(falcon, tail, size, 0, data);
+ msgq->position += ALIGN(size, QUEUE_ALIGNMENT);
+ return 0;
+}
+
+static int
+nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
+{
+ int ret = 0;
+
+ nvkm_falcon_msgq_open(msgq);
+
+ if (nvkm_falcon_msgq_empty(msgq))
+ goto close;
+
+ ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE);
+ if (ret) {
+ FLCNQ_ERR(msgq, "failed to read message header");
+ goto close;
+ }
+
+ if (hdr->size > MSG_BUF_SIZE) {
+ FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size);
+ ret = -ENOSPC;
+ goto close;
+ }
+
+ if (hdr->size > HDR_SIZE) {
+ u32 read_size = hdr->size - HDR_SIZE;
+
+ ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size);
+ if (ret) {
+ FLCNQ_ERR(msgq, "failed to read message data");
+ goto close;
+ }
+ }
+
+ ret = 1;
+close:
+ nvkm_falcon_msgq_close(msgq, (ret >= 0));
+ return ret;
+}
+
+static int
+nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
+{
+ struct nvkm_falcon_qmgr_seq *seq;
+
+ seq = &msgq->qmgr->seq.id[hdr->seq_id];
+ if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
+ FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id);
+ return -EINVAL;
+ }
+
+ if (seq->state == SEQ_STATE_USED) {
+ if (seq->callback)
+ seq->result = seq->callback(seq->priv, hdr);
+ }
+
+ if (seq->async) {
+ nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq);
+ return 0;
+ }
+
+ complete_all(&seq->done);
+ return 0;
+}
+
+void
+nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq)
+{
+ /*
+ * We are invoked from a worker thread, so normally we have plenty of
+ * stack space to work with.
+ */
+ u8 msg_buffer[MSG_BUF_SIZE];
+ struct nvfw_falcon_msg *hdr = (void *)msg_buffer;
+
+ while (nvkm_falcon_msgq_read(msgq, hdr) > 0)
+ nvkm_falcon_msgq_exec(msgq, hdr);
+}
+
+int
+nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq,
+ void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+ struct nvfw_falcon_msg *hdr = data;
+ int ret;
+
+ msgq->head_reg = falcon->func->msgq.head;
+ msgq->tail_reg = falcon->func->msgq.tail;
+ msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail);
+
+ nvkm_falcon_msgq_open(msgq);
+ ret = nvkm_falcon_msgq_pop(msgq, data, size);
+ if (ret == 0 && hdr->size != size) {
+ FLCN_ERR(falcon, "unexpected init message size %d vs %d",
+ hdr->size, size);
+ ret = -EINVAL;
+ }
+ nvkm_falcon_msgq_close(msgq, ret == 0);
+ return ret;
+}
+
+void
+nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq,
+ u32 index, u32 offset, u32 size)
+{
+ const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func;
+
+ msgq->head_reg = func->msgq.head + index * func->msgq.stride;
+ msgq->tail_reg = func->msgq.tail + index * func->msgq.stride;
+ msgq->offset = offset;
+
+ FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x",
+ index, msgq->offset, size);
+}
+
+void
+nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq)
+{
+ struct nvkm_falcon_msgq *msgq = *pmsgq;
+ if (msgq) {
+ kfree(*pmsgq);
+ *pmsgq = NULL;
+ }
+}
+
+int
+nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
+ struct nvkm_falcon_msgq **pmsgq)
+{
+ struct nvkm_falcon_msgq *msgq = *pmsgq;
+
+ if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL)))
+ return -ENOMEM;
+
+ msgq->qmgr = qmgr;
+ msgq->name = name;
+ mutex_init(&msgq->mutex);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
new file mode 100644
index 000000000..466188752
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_FALCON_PRIV_H__
+#define __NVKM_FALCON_PRIV_H__
+#include <core/falcon.h>
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
new file mode 100644
index 000000000..a453de341
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+struct nvkm_falcon_qmgr_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr)
+{
+ const struct nvkm_subdev *subdev = qmgr->falcon->owner;
+ struct nvkm_falcon_qmgr_seq *seq;
+ u32 index;
+
+ mutex_lock(&qmgr->seq.mutex);
+ index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM);
+ if (index >= NVKM_FALCON_QMGR_SEQ_NUM) {
+ nvkm_error(subdev, "no free sequence available\n");
+ mutex_unlock(&qmgr->seq.mutex);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ set_bit(index, qmgr->seq.tbl);
+ mutex_unlock(&qmgr->seq.mutex);
+
+ seq = &qmgr->seq.id[index];
+ seq->state = SEQ_STATE_PENDING;
+ return seq;
+}
+
+void
+nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr,
+ struct nvkm_falcon_qmgr_seq *seq)
+{
+ /* no need to acquire seq.mutex since clear_bit is atomic */
+ seq->state = SEQ_STATE_FREE;
+ seq->callback = NULL;
+ reinit_completion(&seq->done);
+ clear_bit(seq->id, qmgr->seq.tbl);
+}
+
+void
+nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **pqmgr)
+{
+ struct nvkm_falcon_qmgr *qmgr = *pqmgr;
+ if (qmgr) {
+ kfree(*pqmgr);
+ *pqmgr = NULL;
+ }
+}
+
+int
+nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon,
+ struct nvkm_falcon_qmgr **pqmgr)
+{
+ struct nvkm_falcon_qmgr *qmgr;
+ int i;
+
+ if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL)))
+ return -ENOMEM;
+
+ qmgr->falcon = falcon;
+ mutex_init(&qmgr->seq.mutex);
+ for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) {
+ qmgr->seq.id[i].id = i;
+ init_completion(&qmgr->seq.id[i].done);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
new file mode 100644
index 000000000..976cb7b7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_FALCON_QMGR_H__
+#define __NVKM_FALCON_QMGR_H__
+#include <core/falcon.h>
+
+#define HDR_SIZE sizeof(struct nvfw_falcon_msg)
+#define QUEUE_ALIGNMENT 4
+/* max size of the messages we can receive */
+#define MSG_BUF_SIZE 128
+
+/**
+ * struct nvkm_falcon_qmgr_seq - keep track of ongoing commands
+ *
+ * Every time a command is sent, a sequence is assigned to it so the
+ * corresponding message can be matched. Upon receiving the message, a callback
+ * can be called and/or a completion signaled.
+ *
+ * @id: sequence ID
+ * @state: current state
+ * @callback: callback to call upon receiving matching message
+ * @completion: completion to signal after callback is called
+ */
+struct nvkm_falcon_qmgr_seq {
+ u16 id;
+ enum {
+ SEQ_STATE_FREE = 0,
+ SEQ_STATE_PENDING,
+ SEQ_STATE_USED,
+ SEQ_STATE_CANCELLED
+ } state;
+ bool async;
+ nvkm_falcon_qmgr_callback callback;
+ void *priv;
+ struct completion done;
+ int result;
+};
+
+/*
+ * We can have an arbitrary number of sequences, but realistically we will
+ * probably not use that much simultaneously.
+ */
+#define NVKM_FALCON_QMGR_SEQ_NUM 16
+
+struct nvkm_falcon_qmgr {
+ struct nvkm_falcon *falcon;
+
+ struct {
+ struct mutex mutex;
+ struct nvkm_falcon_qmgr_seq id[NVKM_FALCON_QMGR_SEQ_NUM];
+ unsigned long tbl[BITS_TO_LONGS(NVKM_FALCON_QMGR_SEQ_NUM)];
+ } seq;
+};
+
+struct nvkm_falcon_qmgr_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *);
+void nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *,
+ struct nvkm_falcon_qmgr_seq *);
+
+struct nvkm_falcon_cmdq {
+ struct nvkm_falcon_qmgr *qmgr;
+ const char *name;
+ struct mutex mutex;
+ struct completion ready;
+
+ u32 head_reg;
+ u32 tail_reg;
+ u32 offset;
+ u32 size;
+
+ u32 position;
+};
+
+struct nvkm_falcon_msgq {
+ struct nvkm_falcon_qmgr *qmgr;
+ const char *name;
+ struct mutex mutex;
+
+ u32 head_reg;
+ u32 tail_reg;
+ u32 offset;
+
+ u32 position;
+};
+
+#define FLCNQ_PRINTK(t,q,f,a...) \
+ FLCN_PRINTK(t, (q)->qmgr->falcon, "%s: "f, (q)->name, ##a)
+#define FLCNQ_DBG(q,f,a...) FLCNQ_PRINTK(debug, (q), f, ##a)
+#define FLCNQ_ERR(q,f,a...) FLCNQ_PRINTK(error, (q), f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
new file mode 100644
index 000000000..b0ee4c314
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/gpuobj.h>
+#include <core/memory.h>
+#include <subdev/timer.h>
+
+void
+nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
+ u32 size, u16 tag, u8 port, bool secure)
+{
+ u8 rem = size % 4;
+ u32 reg;
+ int i;
+
+ size -= rem;
+
+ reg = start | BIT(24) | (secure ? BIT(28) : 0);
+ nvkm_falcon_wr32(falcon, 0x180 + (port * 16), reg);
+ for (i = 0; i < size / 4; i++) {
+ /* write new tag every 256B */
+ if ((i & 0x3f) == 0)
+ nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
+ nvkm_falcon_wr32(falcon, 0x184 + (port * 16), ((u32 *)data)[i]);
+ }
+
+ /*
+ * If size is not a multiple of 4, mask the last work to ensure garbage
+ * does not get written
+ */
+ if (rem) {
+ u32 extra = ((u32 *)data)[i];
+
+ /* write new tag every 256B */
+ if ((i & 0x3f) == 0)
+ nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
+ nvkm_falcon_wr32(falcon, 0x184 + (port * 16),
+ extra & (BIT(rem * 8) - 1));
+ ++i;
+ }
+
+ /* code must be padded to 0x40 words */
+ for (; i & 0x3f; i++)
+ nvkm_falcon_wr32(falcon, 0x184 + (port * 16), 0);
+}
+
+static void
+nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start,
+ u32 size, u8 port)
+{
+ u8 rem = size % 4;
+ int i;
+
+ size -= rem;
+
+ nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 24));
+ for (i = 0; i < size / 4; i++)
+ nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), ((u32 *)data)[i]);
+
+ /*
+ * If size is not a multiple of 4, mask the last word to ensure garbage
+ * does not get written
+ */
+ if (rem) {
+ u32 extra = ((u32 *)data)[i];
+
+ nvkm_falcon_wr32(falcon, 0xac4 + (port * 8),
+ extra & (BIT(rem * 8) - 1));
+ }
+}
+
+void
+nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
+ u32 size, u8 port)
+{
+ const struct nvkm_falcon_func *func = falcon->func;
+ u8 rem = size % 4;
+ int i;
+
+ if (func->emem_addr && start >= func->emem_addr)
+ return nvkm_falcon_v1_load_emem(falcon, data,
+ start - func->emem_addr, size,
+ port);
+
+ size -= rem;
+
+ nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 24));
+ for (i = 0; i < size / 4; i++)
+ nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), ((u32 *)data)[i]);
+
+ /*
+ * If size is not a multiple of 4, mask the last word to ensure garbage
+ * does not get written
+ */
+ if (rem) {
+ u32 extra = ((u32 *)data)[i];
+
+ nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8),
+ extra & (BIT(rem * 8) - 1));
+ }
+}
+
+static void
+nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size,
+ u8 port, void *data)
+{
+ u8 rem = size % 4;
+ int i;
+
+ size -= rem;
+
+ nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 25));
+ for (i = 0; i < size / 4; i++)
+ ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
+
+ /*
+ * If size is not a multiple of 4, mask the last word to ensure garbage
+ * does not get read
+ */
+ if (rem) {
+ u32 extra = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
+
+ for (i = size; i < size + rem; i++) {
+ ((u8 *)data)[i] = (u8)(extra & 0xff);
+ extra >>= 8;
+ }
+ }
+}
+
+void
+nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
+ u8 port, void *data)
+{
+ const struct nvkm_falcon_func *func = falcon->func;
+ u8 rem = size % 4;
+ int i;
+
+ if (func->emem_addr && start >= func->emem_addr)
+ return nvkm_falcon_v1_read_emem(falcon, start - func->emem_addr,
+ size, port, data);
+
+ size -= rem;
+
+ nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 25));
+ for (i = 0; i < size / 4; i++)
+ ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
+
+ /*
+ * If size is not a multiple of 4, mask the last word to ensure garbage
+ * does not get read
+ */
+ if (rem) {
+ u32 extra = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
+
+ for (i = size; i < size + rem; i++) {
+ ((u8 *)data)[i] = (u8)(extra & 0xff);
+ extra >>= 8;
+ }
+ }
+}
+
+void
+nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
+{
+ const u32 fbif = falcon->func->fbif;
+ u32 inst_loc;
+
+ /* disable instance block binding */
+ if (ctx == NULL) {
+ nvkm_falcon_wr32(falcon, 0x10c, 0x0);
+ return;
+ }
+
+ nvkm_falcon_wr32(falcon, 0x10c, 0x1);
+
+ /* setup apertures - virtual */
+ nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_UCODE, 0x4);
+ nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_VIRT, 0x0);
+ /* setup apertures - physical */
+ nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_VID, 0x4);
+ nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5);
+ nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
+
+ /* Set context */
+ switch (nvkm_memory_target(ctx)) {
+ case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
+ case NVKM_MEM_TARGET_HOST: inst_loc = 2; break;
+ case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ /* Enable context */
+ nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
+ nvkm_falcon_wr32(falcon, 0x054,
+ ((nvkm_memory_addr(ctx) >> 12) & 0xfffffff) |
+ (inst_loc << 28) | (1 << 30));
+
+ nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
+ nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8);
+}
+
+void
+nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
+{
+ nvkm_falcon_wr32(falcon, 0x104, start_addr);
+}
+
+void
+nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
+{
+ u32 reg = nvkm_falcon_rd32(falcon, 0x100);
+
+ if (reg & BIT(6))
+ nvkm_falcon_wr32(falcon, 0x130, 0x2);
+ else
+ nvkm_falcon_wr32(falcon, 0x100, 0x2);
+}
+
+int
+nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
+{
+ struct nvkm_device *device = falcon->owner->device;
+ int ret;
+
+ ret = nvkm_wait_msec(device, ms, falcon->addr + 0x100, 0x10, 0x10);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int
+nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
+{
+ struct nvkm_device *device = falcon->owner->device;
+ int ret;
+
+ /* clear interrupt(s) */
+ nvkm_falcon_mask(falcon, 0x004, mask, mask);
+ /* wait until interrupts are cleared */
+ ret = nvkm_wait_msec(device, 10, falcon->addr + 0x008, mask, 0x0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+falcon_v1_wait_idle(struct nvkm_falcon *falcon)
+{
+ struct nvkm_device *device = falcon->owner->device;
+ int ret;
+
+ ret = nvkm_wait_msec(device, 10, falcon->addr + 0x04c, 0xffff, 0x0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int
+nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
+{
+ struct nvkm_device *device = falcon->owner->device;
+ int ret;
+
+ ret = nvkm_wait_msec(device, 10, falcon->addr + 0x10c, 0x6, 0x0);
+ if (ret < 0) {
+ nvkm_error(falcon->user, "Falcon mem scrubbing timeout\n");
+ return ret;
+ }
+
+ ret = falcon_v1_wait_idle(falcon);
+ if (ret)
+ return ret;
+
+ /* enable IRQs */
+ nvkm_falcon_wr32(falcon, 0x010, 0xff);
+
+ return 0;
+}
+
+void
+nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
+{
+ /* disable IRQs and wait for any previous code to complete */
+ nvkm_falcon_wr32(falcon, 0x014, 0xff);
+ falcon_v1_wait_idle(falcon);
+}