summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nvkm/engine/fifo
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/engine/fifo')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c357
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c393
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c263
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c276
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c226
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c254
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c311
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c699
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c1249
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h168
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c113
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c61
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c308
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c361
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c241
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c308
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c399
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c61
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c130
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h133
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c477
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/usergv100.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu102.c45
49 files changed, 8473 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
new file mode 100644
index 000000000..5e831d347
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/engine/fifo/base.o
+nvkm-y += nvkm/engine/fifo/nv04.o
+nvkm-y += nvkm/engine/fifo/nv10.o
+nvkm-y += nvkm/engine/fifo/nv17.o
+nvkm-y += nvkm/engine/fifo/nv40.o
+nvkm-y += nvkm/engine/fifo/nv50.o
+nvkm-y += nvkm/engine/fifo/g84.o
+nvkm-y += nvkm/engine/fifo/gf100.o
+nvkm-y += nvkm/engine/fifo/gk104.o
+nvkm-y += nvkm/engine/fifo/gk110.o
+nvkm-y += nvkm/engine/fifo/gk208.o
+nvkm-y += nvkm/engine/fifo/gk20a.o
+nvkm-y += nvkm/engine/fifo/gm107.o
+nvkm-y += nvkm/engine/fifo/gm200.o
+nvkm-y += nvkm/engine/fifo/gm20b.o
+nvkm-y += nvkm/engine/fifo/gp100.o
+nvkm-y += nvkm/engine/fifo/gp10b.o
+nvkm-y += nvkm/engine/fifo/gv100.o
+nvkm-y += nvkm/engine/fifo/tu102.o
+nvkm-y += nvkm/engine/fifo/ga102.o
+
+nvkm-y += nvkm/engine/fifo/chan.o
+nvkm-y += nvkm/engine/fifo/channv50.o
+nvkm-y += nvkm/engine/fifo/chang84.o
+
+nvkm-y += nvkm/engine/fifo/dmanv04.o
+nvkm-y += nvkm/engine/fifo/dmanv10.o
+nvkm-y += nvkm/engine/fifo/dmanv17.o
+nvkm-y += nvkm/engine/fifo/dmanv40.o
+
+nvkm-y += nvkm/engine/fifo/gpfifonv50.o
+nvkm-y += nvkm/engine/fifo/gpfifog84.o
+nvkm-y += nvkm/engine/fifo/gpfifogf100.o
+nvkm-y += nvkm/engine/fifo/gpfifogk104.o
+nvkm-y += nvkm/engine/fifo/gpfifogv100.o
+nvkm-y += nvkm/engine/fifo/gpfifotu102.o
+
+nvkm-y += nvkm/engine/fifo/usergv100.o
+nvkm-y += nvkm/engine/fifo/usertu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
new file mode 100644
index 000000000..58b8df75f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "chan.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <core/notify.h>
+#include <subdev/mc.h>
+
+#include <nvif/event.h>
+#include <nvif/cl0080.h>
+#include <nvif/unpack.h>
+
+void
+nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid)
+{
+ unsigned long flags;
+ if (WARN_ON(!fifo->func->recover_chan))
+ return;
+ spin_lock_irqsave(&fifo->lock, flags);
+ fifo->func->recover_chan(fifo, chid);
+ spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
+void
+nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
+{
+ return fifo->func->pause(fifo, flags);
+}
+
+void
+nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
+{
+ return fifo->func->start(fifo, flags);
+}
+
+void
+nvkm_fifo_fault(struct nvkm_fifo *fifo, struct nvkm_fault_data *info)
+{
+ return fifo->func->fault(fifo, info);
+}
+
+void
+nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
+ struct nvkm_fifo_chan **pchan)
+{
+ struct nvkm_fifo_chan *chan = *pchan;
+ if (likely(chan)) {
+ *pchan = NULL;
+ spin_unlock_irqrestore(&fifo->lock, flags);
+ }
+}
+
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_inst_locked(struct nvkm_fifo *fifo, u64 inst)
+{
+ struct nvkm_fifo_chan *chan;
+ list_for_each_entry(chan, &fifo->chan, head) {
+ if (chan->inst->addr == inst) {
+ list_del(&chan->head);
+ list_add(&chan->head, &fifo->chan);
+ return chan;
+ }
+ }
+ return NULL;
+}
+
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
+{
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ spin_lock_irqsave(&fifo->lock, flags);
+ if ((chan = nvkm_fifo_chan_inst_locked(fifo, inst))) {
+ *rflags = flags;
+ return chan;
+ }
+ spin_unlock_irqrestore(&fifo->lock, flags);
+ return NULL;
+}
+
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
+{
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ spin_lock_irqsave(&fifo->lock, flags);
+ list_for_each_entry(chan, &fifo->chan, head) {
+ if (chan->chid == chid) {
+ list_del(&chan->head);
+ list_add(&chan->head, &fifo->chan);
+ *rflags = flags;
+ return chan;
+ }
+ }
+ spin_unlock_irqrestore(&fifo->lock, flags);
+ return NULL;
+}
+
+void
+nvkm_fifo_kevent(struct nvkm_fifo *fifo, int chid)
+{
+ nvkm_event_send(&fifo->kevent, 1, chid, NULL, 0);
+}
+
+static int
+nvkm_fifo_kevent_ctor(struct nvkm_object *object, void *data, u32 size,
+ struct nvkm_notify *notify)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ if (size == 0) {
+ notify->size = 0;
+ notify->types = 1;
+ notify->index = chan->chid;
+ return 0;
+ }
+ return -ENOSYS;
+}
+
+static const struct nvkm_event_func
+nvkm_fifo_kevent_func = {
+ .ctor = nvkm_fifo_kevent_ctor,
+};
+
+static void
+nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
+{
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ fifo->func->uevent_fini(fifo);
+}
+
+static void
+nvkm_fifo_uevent_init(struct nvkm_event *event, int type, int index)
+{
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ fifo->func->uevent_init(fifo);
+}
+
+static int
+nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
+ struct nvkm_notify *notify)
+{
+ union {
+ struct nvif_notify_uevent_req none;
+ } *req = data;
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unvers(ret, &data, &size, req->none))) {
+ notify->size = sizeof(struct nvif_notify_uevent_rep);
+ notify->types = 1;
+ notify->index = 0;
+ }
+
+ return ret;
+}
+
+static const struct nvkm_event_func
+nvkm_fifo_uevent_func = {
+ .ctor = nvkm_fifo_uevent_ctor,
+ .init = nvkm_fifo_uevent_init,
+ .fini = nvkm_fifo_uevent_fini,
+};
+
+void
+nvkm_fifo_uevent(struct nvkm_fifo *fifo)
+{
+ struct nvif_notify_uevent_rep rep = {
+ };
+ nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
+}
+
+static int
+nvkm_fifo_class_new_(struct nvkm_device *device,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
+ return fifo->func->class_new(fifo, oclass, data, size, pobject);
+}
+
+static const struct nvkm_device_oclass
+nvkm_fifo_class_ = {
+ .ctor = nvkm_fifo_class_new_,
+};
+
+static int
+nvkm_fifo_class_new(struct nvkm_device *device,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ const struct nvkm_fifo_chan_oclass *sclass = oclass->engn;
+ struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
+ return sclass->ctor(fifo, oclass, data, size, pobject);
+}
+
+static const struct nvkm_device_oclass
+nvkm_fifo_class = {
+ .ctor = nvkm_fifo_class_new,
+};
+
+static int
+nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index,
+ const struct nvkm_device_oclass **class)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
+ const struct nvkm_fifo_chan_oclass *sclass;
+ int c = 0;
+
+ if (fifo->func->class_get) {
+ int ret = fifo->func->class_get(fifo, index, oclass);
+ if (ret == 0)
+ *class = &nvkm_fifo_class_;
+ return ret;
+ }
+
+ while ((sclass = fifo->func->chan[c])) {
+ if (c++ == index) {
+ oclass->base = sclass->base;
+ oclass->engn = sclass;
+ *class = &nvkm_fifo_class;
+ return 0;
+ }
+ }
+
+ return c;
+}
+
+static void
+nvkm_fifo_intr(struct nvkm_engine *engine)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ fifo->func->intr(fifo);
+}
+
+static int
+nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ if (fifo->func->fini)
+ fifo->func->fini(fifo);
+ return 0;
+}
+
+static int
+nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ switch (mthd) {
+ case NV_DEVICE_HOST_CHANNELS: *data = fifo->nr; return 0;
+ default:
+ if (fifo->func->info)
+ return fifo->func->info(fifo, mthd, data);
+ break;
+ }
+ return -ENOSYS;
+}
+
+static int
+nvkm_fifo_oneinit(struct nvkm_engine *engine)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ if (fifo->func->oneinit)
+ return fifo->func->oneinit(fifo);
+ return 0;
+}
+
+static void
+nvkm_fifo_preinit(struct nvkm_engine *engine)
+{
+ nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO, 0);
+}
+
+static int
+nvkm_fifo_init(struct nvkm_engine *engine)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ fifo->func->init(fifo);
+ return 0;
+}
+
+static void *
+nvkm_fifo_dtor(struct nvkm_engine *engine)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ void *data = fifo;
+ if (fifo->func->dtor)
+ data = fifo->func->dtor(fifo);
+ nvkm_event_fini(&fifo->kevent);
+ nvkm_event_fini(&fifo->uevent);
+ mutex_destroy(&fifo->mutex);
+ return data;
+}
+
+static const struct nvkm_engine_func
+nvkm_fifo = {
+ .dtor = nvkm_fifo_dtor,
+ .preinit = nvkm_fifo_preinit,
+ .oneinit = nvkm_fifo_oneinit,
+ .info = nvkm_fifo_info,
+ .init = nvkm_fifo_init,
+ .fini = nvkm_fifo_fini,
+ .intr = nvkm_fifo_intr,
+ .base.sclass = nvkm_fifo_class_get,
+};
+
+int
+nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo *fifo)
+{
+ int ret;
+
+ fifo->func = func;
+ INIT_LIST_HEAD(&fifo->chan);
+ spin_lock_init(&fifo->lock);
+ mutex_init(&fifo->mutex);
+
+ if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
+ fifo->nr = NVKM_FIFO_CHID_NR;
+ else
+ fifo->nr = nr;
+ bitmap_clear(fifo->mask, 0, fifo->nr);
+
+ ret = nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
+ if (ret)
+ return ret;
+
+ if (func->uevent_init) {
+ ret = nvkm_event_init(&nvkm_fifo_uevent_func, 1, 1,
+ &fifo->uevent);
+ if (ret)
+ return ret;
+ }
+
+ return nvkm_event_init(&nvkm_fifo_kevent_func, 1, nr, &fifo->kevent);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h
new file mode 100644
index 000000000..d0ac60b06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h
@@ -0,0 +1,11 @@
+#ifndef __NVKM_FIFO_CGRP_H__
+#define __NVKM_FIFO_CGRP_H__
+#include "priv.h"
+
+struct nvkm_fifo_cgrp {
+ int id;
+ struct list_head head;
+ struct list_head chan;
+ int chan_nr;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
new file mode 100644
index 000000000..2e7f32ceb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -0,0 +1,393 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "chan.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <core/oproxy.h>
+#include <subdev/mmu.h>
+#include <engine/dma.h>
+
+struct nvkm_fifo_chan_object {
+ struct nvkm_oproxy oproxy;
+ struct nvkm_fifo_chan *chan;
+ int hash;
+};
+
+static struct nvkm_fifo_engn *
+nvkm_fifo_chan_engn(struct nvkm_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->fifo->func->engine_id(chan->fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
+static int
+nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
+{
+ struct nvkm_fifo_chan_object *object =
+ container_of(base, typeof(*object), oproxy);
+ struct nvkm_engine *engine = object->oproxy.object->engine;
+ struct nvkm_fifo_chan *chan = object->chan;
+ struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
+ const char *name = engine->subdev.name;
+ int ret = 0;
+
+ if (--engn->usecount)
+ return 0;
+
+ if (chan->func->engine_fini) {
+ ret = chan->func->engine_fini(chan, engine, suspend);
+ if (ret) {
+ nvif_error(&chan->object,
+ "detach %s failed, %d\n", name, ret);
+ return ret;
+ }
+ }
+
+ if (engn->object) {
+ ret = nvkm_object_fini(engn->object, suspend);
+ if (ret && suspend)
+ return ret;
+ }
+
+ nvif_trace(&chan->object, "detached %s\n", name);
+ return ret;
+}
+
+static int
+nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
+{
+ struct nvkm_fifo_chan_object *object =
+ container_of(base, typeof(*object), oproxy);
+ struct nvkm_engine *engine = object->oproxy.object->engine;
+ struct nvkm_fifo_chan *chan = object->chan;
+ struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
+ const char *name = engine->subdev.name;
+ int ret;
+
+ if (engn->usecount++)
+ return 0;
+
+ if (engn->object) {
+ ret = nvkm_object_init(engn->object);
+ if (ret)
+ return ret;
+ }
+
+ if (chan->func->engine_init) {
+ ret = chan->func->engine_init(chan, engine);
+ if (ret) {
+ nvif_error(&chan->object,
+ "attach %s failed, %d\n", name, ret);
+ return ret;
+ }
+ }
+
+ nvif_trace(&chan->object, "attached %s\n", name);
+ return 0;
+}
+
+static void
+nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
+{
+ struct nvkm_fifo_chan_object *object =
+ container_of(base, typeof(*object), oproxy);
+ struct nvkm_engine *engine = object->oproxy.base.engine;
+ struct nvkm_fifo_chan *chan = object->chan;
+ struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
+
+ if (chan->func->object_dtor)
+ chan->func->object_dtor(chan, object->hash);
+
+ if (!--engn->refcount) {
+ if (chan->func->engine_dtor)
+ chan->func->engine_dtor(chan, engine);
+ nvkm_object_del(&engn->object);
+ if (chan->vmm)
+ atomic_dec(&chan->vmm->engref[engine->subdev.type]);
+ }
+}
+
+static const struct nvkm_oproxy_func
+nvkm_fifo_chan_child_func = {
+ .dtor[0] = nvkm_fifo_chan_child_del,
+ .init[0] = nvkm_fifo_chan_child_init,
+ .fini[0] = nvkm_fifo_chan_child_fini,
+};
+
+static int
+nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_engine *engine = oclass->engine;
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
+ struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
+ struct nvkm_fifo_chan_object *object;
+ int ret = 0;
+
+ if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy);
+ object->chan = chan;
+ *pobject = &object->oproxy.base;
+
+ if (!engn->refcount++) {
+ struct nvkm_oclass cclass = {
+ .client = oclass->client,
+ .engine = oclass->engine,
+ };
+
+ if (chan->vmm)
+ atomic_inc(&chan->vmm->engref[engine->subdev.type]);
+
+ if (engine->func->fifo.cclass) {
+ ret = engine->func->fifo.cclass(chan, &cclass,
+ &engn->object);
+ } else
+ if (engine->func->cclass) {
+ ret = nvkm_object_new_(engine->func->cclass, &cclass,
+ NULL, 0, &engn->object);
+ }
+ if (ret)
+ return ret;
+
+ if (chan->func->engine_ctor) {
+ ret = chan->func->engine_ctor(chan, oclass->engine,
+ engn->object);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = oclass->base.ctor(&(const struct nvkm_oclass) {
+ .base = oclass->base,
+ .engn = oclass->engn,
+ .handle = oclass->handle,
+ .object = oclass->object,
+ .client = oclass->client,
+ .parent = engn->object ?
+ engn->object :
+ oclass->parent,
+ .engine = engine,
+ }, data, size, &object->oproxy.object);
+ if (ret)
+ return ret;
+
+ if (chan->func->object_ctor) {
+ object->hash =
+ chan->func->object_ctor(chan, object->oproxy.object);
+ if (object->hash < 0)
+ return object->hash;
+ }
+
+ return 0;
+}
+
+static int
+nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
+ struct nvkm_oclass *oclass)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ struct nvkm_fifo *fifo = chan->fifo;
+ struct nvkm_engine *engine;
+ u32 engm = chan->engm;
+ int engi, ret, c;
+
+ for (; c = 0, engi = __ffs(engm), engm; engm &= ~(1ULL << engi)) {
+ if (!(engine = fifo->func->id_engine(fifo, engi)))
+ continue;
+ oclass->engine = engine;
+ oclass->base.oclass = 0;
+
+ if (engine->func->fifo.sclass) {
+ ret = engine->func->fifo.sclass(oclass, index);
+ if (oclass->base.oclass) {
+ if (!oclass->base.ctor)
+ oclass->base.ctor = nvkm_object_new;
+ oclass->ctor = nvkm_fifo_chan_child_new;
+ return 0;
+ }
+
+ index -= ret;
+ continue;
+ }
+
+ while (engine->func->sclass[c].oclass) {
+ if (c++ == index) {
+ oclass->base = engine->func->sclass[index];
+ if (!oclass->base.ctor)
+ oclass->base.ctor = nvkm_object_new;
+ oclass->ctor = nvkm_fifo_chan_child_new;
+ return 0;
+ }
+ }
+ index -= c;
+ }
+
+ return -EINVAL;
+}
+
+static int
+nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
+ struct nvkm_event **pevent)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ if (chan->func->ntfy)
+ return chan->func->ntfy(chan, type, pevent);
+ return -ENODEV;
+}
+
+static int
+nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ *type = NVKM_OBJECT_MAP_IO;
+ *addr = chan->addr;
+ *size = chan->size;
+ return 0;
+}
+
+static int
+nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ chan->func->fini(chan);
+ return 0;
+}
+
+static int
+nvkm_fifo_chan_init(struct nvkm_object *object)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ chan->func->init(chan);
+ return 0;
+}
+
+static void *
+nvkm_fifo_chan_dtor(struct nvkm_object *object)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ struct nvkm_fifo *fifo = chan->fifo;
+ void *data = chan->func->dtor(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ if (!list_empty(&chan->head)) {
+ __clear_bit(chan->chid, fifo->mask);
+ list_del(&chan->head);
+ }
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ if (chan->vmm) {
+ nvkm_vmm_part(chan->vmm, chan->inst->memory);
+ nvkm_vmm_unref(&chan->vmm);
+ }
+
+ nvkm_gpuobj_del(&chan->push);
+ nvkm_gpuobj_del(&chan->inst);
+ return data;
+}
+
+static const struct nvkm_object_func
+nvkm_fifo_chan_func = {
+ .dtor = nvkm_fifo_chan_dtor,
+ .init = nvkm_fifo_chan_init,
+ .fini = nvkm_fifo_chan_fini,
+ .ntfy = nvkm_fifo_chan_ntfy,
+ .map = nvkm_fifo_chan_map,
+ .sclass = nvkm_fifo_chan_child_get,
+};
+
+int
+nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
+ struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
+ u64 hvmm, u64 push, u32 engm, int bar, u32 base,
+ u32 user, const struct nvkm_oclass *oclass,
+ struct nvkm_fifo_chan *chan)
+{
+ struct nvkm_client *client = oclass->client;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_dmaobj *dmaobj;
+ unsigned long flags;
+ int ret;
+
+ nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
+ chan->func = func;
+ chan->fifo = fifo;
+ chan->engm = engm;
+ INIT_LIST_HEAD(&chan->head);
+
+ /* instance memory */
+ ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
+ if (ret)
+ return ret;
+
+ /* allocate push buffer ctxdma instance */
+ if (push) {
+ dmaobj = nvkm_dmaobj_search(client, push);
+ if (IS_ERR(dmaobj))
+ return PTR_ERR(dmaobj);
+
+ ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
+ &chan->push);
+ if (ret)
+ return ret;
+ }
+
+ /* channel address space */
+ if (hvmm) {
+ struct nvkm_vmm *vmm = nvkm_uvmm_search(client, hvmm);
+ if (IS_ERR(vmm))
+ return PTR_ERR(vmm);
+
+ if (vmm->mmu != device->mmu)
+ return -EINVAL;
+
+ ret = nvkm_vmm_join(vmm, chan->inst->memory);
+ if (ret)
+ return ret;
+
+ chan->vmm = nvkm_vmm_ref(vmm);
+ }
+
+ /* allocate channel id */
+ spin_lock_irqsave(&fifo->lock, flags);
+ chan->chid = find_first_zero_bit(fifo->mask, NVKM_FIFO_CHID_NR);
+ if (chan->chid >= NVKM_FIFO_CHID_NR) {
+ spin_unlock_irqrestore(&fifo->lock, flags);
+ return -ENOSPC;
+ }
+ list_add(&chan->head, &fifo->chan);
+ __set_bit(chan->chid, fifo->mask);
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ /* determine address of this channel's user registers */
+ chan->addr = device->func->resource_addr(device, bar) +
+ base + user * chan->chid;
+ chan->size = user;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
new file mode 100644
index 000000000..e53504354
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_FIFO_CHAN_H__
+#define __NVKM_FIFO_CHAN_H__
+#define nvkm_fifo_chan(p) container_of((p), struct nvkm_fifo_chan, object)
+#include "priv.h"
+
+struct nvkm_fifo_chan_func {
+ void *(*dtor)(struct nvkm_fifo_chan *);
+ void (*init)(struct nvkm_fifo_chan *);
+ void (*fini)(struct nvkm_fifo_chan *);
+ int (*ntfy)(struct nvkm_fifo_chan *, u32 type, struct nvkm_event **);
+ int (*engine_ctor)(struct nvkm_fifo_chan *, struct nvkm_engine *,
+ struct nvkm_object *);
+ void (*engine_dtor)(struct nvkm_fifo_chan *, struct nvkm_engine *);
+ int (*engine_init)(struct nvkm_fifo_chan *, struct nvkm_engine *);
+ int (*engine_fini)(struct nvkm_fifo_chan *, struct nvkm_engine *,
+ bool suspend);
+ int (*object_ctor)(struct nvkm_fifo_chan *, struct nvkm_object *);
+ void (*object_dtor)(struct nvkm_fifo_chan *, int);
+ u32 (*submit_token)(struct nvkm_fifo_chan *);
+};
+
+int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
+ u32 size, u32 align, bool zero, u64 vm, u64 push,
+ u32 engm, int bar, u32 base, u32 user,
+ const struct nvkm_oclass *, struct nvkm_fifo_chan *);
+
+struct nvkm_fifo_chan_oclass {
+ int (*ctor)(struct nvkm_fifo *, const struct nvkm_oclass *,
+ void *data, u32 size, struct nvkm_object **);
+ struct nvkm_sclass base;
+};
+
+int gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
new file mode 100644
index 000000000..3492c561f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+#include <nvif/cl826e.h>
+
+static int
+g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
+ struct nvkm_event **pevent)
+{
+ switch (type) {
+ case NV826E_V0_NTFY_NON_STALL_INTERRUPT:
+ *pevent = &chan->fifo->uevent;
+ return 0;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int
+g84_fifo_chan_engine_addr(struct nvkm_engine *engine)
+{
+ switch (engine->subdev.type) {
+ case NVKM_ENGINE_DMAOBJ:
+ case NVKM_ENGINE_SW : return -1;
+ case NVKM_ENGINE_GR : return 0x0020;
+ case NVKM_ENGINE_VP :
+ case NVKM_ENGINE_MSPDEC: return 0x0040;
+ case NVKM_ENGINE_MPEG :
+ case NVKM_ENGINE_MSPPP : return 0x0060;
+ case NVKM_ENGINE_BSP :
+ case NVKM_ENGINE_MSVLD : return 0x0080;
+ case NVKM_ENGINE_CIPHER:
+ case NVKM_ENGINE_SEC : return 0x00a0;
+ case NVKM_ENGINE_CE : return 0x00c0;
+ default:
+ WARN_ON(1);
+ return -1;
+ }
+}
+
+static int
+g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine, bool suspend)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ struct nv50_fifo *fifo = chan->fifo;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 engn, save;
+ int offset;
+ bool done;
+
+ offset = g84_fifo_chan_engine_addr(engine);
+ if (offset < 0)
+ return 0;
+
+ engn = fifo->base.func->engine_id(&fifo->base, engine) - 1;
+ save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
+ nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
+ done = nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
+ break;
+ ) >= 0;
+ nvkm_wr32(device, 0x002520, save);
+ if (!done) {
+ nvkm_error(subdev, "channel %d [%s] unload timeout\n",
+ chan->base.chid, chan->base.object.client->name);
+ if (suspend)
+ return -EBUSY;
+ }
+
+ nvkm_kmap(chan->eng);
+ nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+ nvkm_done(chan->eng);
+ return 0;
+}
+
+
+static int
+g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ struct nvkm_gpuobj *engn = *nv50_fifo_chan_engine(chan, engine);
+ u64 limit, start;
+ int offset;
+
+ offset = g84_fifo_chan_engine_addr(engine);
+ if (offset < 0)
+ return 0;
+ limit = engn->addr + engn->size - 1;
+ start = engn->addr;
+
+ nvkm_kmap(chan->eng);
+ nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
+ nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
+ nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
+ nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
+ upper_32_bits(start));
+ nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+ nvkm_done(chan->eng);
+ return 0;
+}
+
+static int
+g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine,
+ struct nvkm_object *object)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+
+ if (g84_fifo_chan_engine_addr(engine) < 0)
+ return 0;
+
+ return nvkm_object_bind(object, NULL, 0, nv50_fifo_chan_engine(chan, engine));
+}
+
+static int
+g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_object *object)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ u32 handle = object->handle;
+ u32 context;
+
+ switch (object->engine->subdev.type) {
+ case NVKM_ENGINE_DMAOBJ:
+ case NVKM_ENGINE_SW : context = 0x00000000; break;
+ case NVKM_ENGINE_GR : context = 0x00100000; break;
+ case NVKM_ENGINE_MPEG :
+ case NVKM_ENGINE_MSPPP : context = 0x00200000; break;
+ case NVKM_ENGINE_ME :
+ case NVKM_ENGINE_CE : context = 0x00300000; break;
+ case NVKM_ENGINE_VP :
+ case NVKM_ENGINE_MSPDEC: context = 0x00400000; break;
+ case NVKM_ENGINE_CIPHER:
+ case NVKM_ENGINE_SEC :
+ case NVKM_ENGINE_VIC : context = 0x00500000; break;
+ case NVKM_ENGINE_BSP :
+ case NVKM_ENGINE_MSVLD : context = 0x00600000; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context);
+}
+
+static void
+g84_fifo_chan_init(struct nvkm_fifo_chan *base)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ struct nv50_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u64 addr = chan->ramfc->addr >> 8;
+ u32 chid = chan->base.chid;
+
+ nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr);
+ nv50_fifo_runlist_update(fifo);
+}
+
+static const struct nvkm_fifo_chan_func
+g84_fifo_chan_func = {
+ .dtor = nv50_fifo_chan_dtor,
+ .init = g84_fifo_chan_init,
+ .fini = nv50_fifo_chan_fini,
+ .ntfy = g84_fifo_chan_ntfy,
+ .engine_ctor = g84_fifo_chan_engine_ctor,
+ .engine_dtor = nv50_fifo_chan_engine_dtor,
+ .engine_init = g84_fifo_chan_engine_init,
+ .engine_fini = g84_fifo_chan_engine_fini,
+ .object_ctor = g84_fifo_chan_object_ctor,
+ .object_dtor = nv50_fifo_chan_object_dtor,
+};
+
+int
+g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
+ const struct nvkm_oclass *oclass,
+ struct nv50_fifo_chan *chan)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ int ret;
+
+ if (!vmm)
+ return -EINVAL;
+
+ ret = nvkm_fifo_chan_ctor(&g84_fifo_chan_func, &fifo->base,
+ 0x10000, 0x1000, false, vmm, push,
+ BIT(G84_FIFO_ENGN_SW) |
+ BIT(G84_FIFO_ENGN_GR) |
+ BIT(G84_FIFO_ENGN_MPEG) |
+ BIT(G84_FIFO_ENGN_MSPPP) |
+ BIT(G84_FIFO_ENGN_ME) |
+ BIT(G84_FIFO_ENGN_CE0) |
+ BIT(G84_FIFO_ENGN_VP) |
+ BIT(G84_FIFO_ENGN_MSPDEC) |
+ BIT(G84_FIFO_ENGN_CIPHER) |
+ BIT(G84_FIFO_ENGN_SEC) |
+ BIT(G84_FIFO_ENGN_VIC) |
+ BIT(G84_FIFO_ENGN_BSP) |
+ BIT(G84_FIFO_ENGN_MSVLD) |
+ BIT(G84_FIFO_ENGN_DMA),
+ 0, 0xc00000, 0x2000, oclass, &chan->base);
+ chan->fifo = fifo;
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x0200, 0, true, chan->base.inst,
+ &chan->eng);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
+ &chan->pgd);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, chan->base.inst,
+ &chan->cache);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, chan->base.inst,
+ &chan->ramfc);
+ if (ret)
+ return ret;
+
+ return nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
new file mode 100644
index 000000000..f7ac1061f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __GF100_FIFO_CHAN_H__
+#define __GF100_FIFO_CHAN_H__
+#define gf100_fifo_chan(p) container_of((p), struct gf100_fifo_chan, base)
+#include "chan.h"
+#include "gf100.h"
+
+struct gf100_fifo_chan {
+ struct nvkm_fifo_chan base;
+ struct gf100_fifo *fifo;
+
+ struct list_head head;
+ bool killed;
+
+#define GF100_FIFO_ENGN_GR 0
+#define GF100_FIFO_ENGN_MSPDEC 1
+#define GF100_FIFO_ENGN_MSPPP 2
+#define GF100_FIFO_ENGN_MSVLD 3
+#define GF100_FIFO_ENGN_CE0 4
+#define GF100_FIFO_ENGN_CE1 5
+#define GF100_FIFO_ENGN_SW 15
+ struct gf100_fifo_engn {
+ struct nvkm_gpuobj *inst;
+ struct nvkm_vma *vma;
+ } engn[NVKM_FIFO_ENGN_NR];
+};
+
+extern const struct nvkm_fifo_chan_oclass gf100_fifo_gpfifo_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
new file mode 100644
index 000000000..9713daee6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __GK104_FIFO_CHAN_H__
+#define __GK104_FIFO_CHAN_H__
+#define gk104_fifo_chan(p) container_of((p), struct gk104_fifo_chan, base)
+#include "chan.h"
+#include "gk104.h"
+
+struct gk104_fifo_chan {
+ struct nvkm_fifo_chan base;
+ struct gk104_fifo *fifo;
+ int runl;
+
+ struct nvkm_fifo_cgrp *cgrp;
+ struct list_head head;
+ bool killed;
+
+#define GK104_FIFO_ENGN_SW 15
+ struct gk104_fifo_engn {
+ struct nvkm_gpuobj *inst;
+ struct nvkm_vma *vma;
+ } engn[NVKM_FIFO_ENGN_NR];
+};
+
+extern const struct nvkm_fifo_chan_func gk104_fifo_gpfifo_func;
+
+int gk104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
+ void *data, u32 size, struct nvkm_object **);
+void *gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *);
+void gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *);
+void gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *);
+struct gk104_fifo_engn *gk104_fifo_gpfifo_engine(struct gk104_fifo_chan *, struct nvkm_engine *);
+int gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *, struct nvkm_engine *,
+ struct nvkm_object *);
+void gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *,
+ struct nvkm_engine *);
+int gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *);
+int gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *);
+
+int gv100_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
+ void *data, u32 size, struct nvkm_object **);
+int gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *,
+ struct gk104_fifo *, u64 *, u16 *, u64, u64, u64,
+ u64 *, bool, u32 *, const struct nvkm_oclass *,
+ struct nvkm_object **);
+int gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *,
+ struct nvkm_engine *);
+int gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *,
+ struct nvkm_engine *, bool);
+
+int tu102_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
+ void *data, u32 size, struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
new file mode 100644
index 000000000..727bc8976
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NV04_FIFO_CHAN_H__
+#define __NV04_FIFO_CHAN_H__
+#define nv04_fifo_chan(p) container_of((p), struct nv04_fifo_chan, base)
+#include "chan.h"
+#include "nv04.h"
+
+struct nv04_fifo_chan {
+ struct nvkm_fifo_chan base;
+ struct nv04_fifo *fifo;
+ u32 ramfc;
+#define NV04_FIFO_ENGN_SW 0
+#define NV04_FIFO_ENGN_GR 1
+#define NV04_FIFO_ENGN_MPEG 2
+#define NV04_FIFO_ENGN_DMA 3
+ struct nvkm_gpuobj *engn[NVKM_FIFO_ENGN_NR];
+};
+
+extern const struct nvkm_fifo_chan_func nv04_fifo_dma_func;
+void *nv04_fifo_dma_dtor(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_init(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_fini(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *, int);
+
+extern const struct nvkm_fifo_chan_oclass nv04_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv10_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv17_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv40_fifo_dma_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
new file mode 100644
index 000000000..c44d7c81d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+static int
+nv50_fifo_chan_engine_addr(struct nvkm_engine *engine)
+{
+ switch (engine->subdev.type) {
+ case NVKM_ENGINE_DMAOBJ:
+ case NVKM_ENGINE_SW : return -1;
+ case NVKM_ENGINE_GR : return 0x0000;
+ case NVKM_ENGINE_MPEG : return 0x0060;
+ default:
+ WARN_ON(1);
+ return -1;
+ }
+}
+
+struct nvkm_gpuobj **
+nv50_fifo_chan_engine(struct nv50_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
+static int
+nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine, bool suspend)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ struct nv50_fifo *fifo = chan->fifo;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int offset, ret = 0;
+ u32 me;
+
+ offset = nv50_fifo_chan_engine_addr(engine);
+ if (offset < 0)
+ return 0;
+
+ /* HW bug workaround:
+ *
+ * PFIFO will hang forever if the connected engines don't report
+ * that they've processed the context switch request.
+ *
+ * In order for the kickoff to work, we need to ensure all the
+ * connected engines are in a state where they can answer.
+ *
+ * Newer chipsets don't seem to suffer from this issue, and well,
+ * there's also a "ignore these engines" bitmask reg we can use
+ * if we hit the issue there..
+ */
+ me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
+
+ /* do the kickoff... */
+ nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "channel %d [%s] unload timeout\n",
+ chan->base.chid, chan->base.object.client->name);
+ if (suspend)
+ ret = -EBUSY;
+ }
+ nvkm_wr32(device, 0x00b860, me);
+
+ if (ret == 0) {
+ nvkm_kmap(chan->eng);
+ nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+ nvkm_done(chan->eng);
+ }
+
+ return ret;
+}
+
+static int
+nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ struct nvkm_gpuobj *engn = *nv50_fifo_chan_engine(chan, engine);
+ u64 limit, start;
+ int offset;
+
+ offset = nv50_fifo_chan_engine_addr(engine);
+ if (offset < 0)
+ return 0;
+ limit = engn->addr + engn->size - 1;
+ start = engn->addr;
+
+ nvkm_kmap(chan->eng);
+ nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
+ nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
+ nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
+ nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
+ upper_32_bits(start));
+ nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+ nvkm_done(chan->eng);
+ return 0;
+}
+
+void
+nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ nvkm_gpuobj_del(nv50_fifo_chan_engine(chan, engine));
+}
+
+static int
+nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine,
+ struct nvkm_object *object)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+
+ if (nv50_fifo_chan_engine_addr(engine) < 0)
+ return 0;
+
+ return nvkm_object_bind(object, NULL, 0, nv50_fifo_chan_engine(chan, engine));
+}
+
+void
+nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *base, int cookie)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ nvkm_ramht_remove(chan->ramht, cookie);
+}
+
+static int
+nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_object *object)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ u32 handle = object->handle;
+ u32 context;
+
+ switch (object->engine->subdev.type) {
+ case NVKM_ENGINE_DMAOBJ:
+ case NVKM_ENGINE_SW : context = 0x00000000; break;
+ case NVKM_ENGINE_GR : context = 0x00100000; break;
+ case NVKM_ENGINE_MPEG : context = 0x00200000; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context);
+}
+
+void
+nv50_fifo_chan_fini(struct nvkm_fifo_chan *base)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ struct nv50_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 chid = chan->base.chid;
+
+ /* remove channel from runlist, fifo will unload context */
+ nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
+ nv50_fifo_runlist_update(fifo);
+ nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000);
+}
+
+static void
+nv50_fifo_chan_init(struct nvkm_fifo_chan *base)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ struct nv50_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u64 addr = chan->ramfc->addr >> 12;
+ u32 chid = chan->base.chid;
+
+ nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr);
+ nv50_fifo_runlist_update(fifo);
+}
+
+void *
+nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ nvkm_ramht_del(&chan->ramht);
+ nvkm_gpuobj_del(&chan->pgd);
+ nvkm_gpuobj_del(&chan->eng);
+ nvkm_gpuobj_del(&chan->cache);
+ nvkm_gpuobj_del(&chan->ramfc);
+ return chan;
+}
+
+static const struct nvkm_fifo_chan_func
+nv50_fifo_chan_func = {
+ .dtor = nv50_fifo_chan_dtor,
+ .init = nv50_fifo_chan_init,
+ .fini = nv50_fifo_chan_fini,
+ .engine_ctor = nv50_fifo_chan_engine_ctor,
+ .engine_dtor = nv50_fifo_chan_engine_dtor,
+ .engine_init = nv50_fifo_chan_engine_init,
+ .engine_fini = nv50_fifo_chan_engine_fini,
+ .object_ctor = nv50_fifo_chan_object_ctor,
+ .object_dtor = nv50_fifo_chan_object_dtor,
+};
+
+int
+nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
+ const struct nvkm_oclass *oclass,
+ struct nv50_fifo_chan *chan)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ int ret;
+
+ if (!vmm)
+ return -EINVAL;
+
+ ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base,
+ 0x10000, 0x1000, false, vmm, push,
+ BIT(NV50_FIFO_ENGN_SW) |
+ BIT(NV50_FIFO_ENGN_GR) |
+ BIT(NV50_FIFO_ENGN_MPEG) |
+ BIT(NV50_FIFO_ENGN_DMA),
+ 0, 0xc00000, 0x2000, oclass, &chan->base);
+ chan->fifo = fifo;
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, chan->base.inst,
+ &chan->ramfc);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x1200, 0, true, chan->base.inst,
+ &chan->eng);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
+ &chan->pgd);
+ if (ret)
+ return ret;
+
+ return nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
new file mode 100644
index 000000000..3a95730d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NV50_FIFO_CHAN_H__
+#define __NV50_FIFO_CHAN_H__
+#define nv50_fifo_chan(p) container_of((p), struct nv50_fifo_chan, base)
+#include "chan.h"
+#include "nv50.h"
+
+struct nv50_fifo_chan {
+ struct nv50_fifo *fifo;
+ struct nvkm_fifo_chan base;
+
+ struct nvkm_gpuobj *ramfc;
+ struct nvkm_gpuobj *cache;
+ struct nvkm_gpuobj *eng;
+ struct nvkm_gpuobj *pgd;
+ struct nvkm_ramht *ramht;
+
+#define NV50_FIFO_ENGN_SW 0
+#define NV50_FIFO_ENGN_GR 1
+#define NV50_FIFO_ENGN_MPEG 2
+#define NV50_FIFO_ENGN_DMA 3
+
+#define G84_FIFO_ENGN_SW 0
+#define G84_FIFO_ENGN_GR 1
+#define G84_FIFO_ENGN_MPEG 2
+#define G84_FIFO_ENGN_MSPPP 2
+#define G84_FIFO_ENGN_ME 3
+#define G84_FIFO_ENGN_CE0 3
+#define G84_FIFO_ENGN_VP 4
+#define G84_FIFO_ENGN_MSPDEC 4
+#define G84_FIFO_ENGN_CIPHER 5
+#define G84_FIFO_ENGN_SEC 5
+#define G84_FIFO_ENGN_VIC 5
+#define G84_FIFO_ENGN_BSP 6
+#define G84_FIFO_ENGN_MSVLD 6
+#define G84_FIFO_ENGN_DMA 7
+ struct nvkm_gpuobj *engn[NVKM_FIFO_ENGN_NR];
+};
+
+int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
+ const struct nvkm_oclass *, struct nv50_fifo_chan *);
+void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
+void nv50_fifo_chan_fini(struct nvkm_fifo_chan *);
+struct nvkm_gpuobj **nv50_fifo_chan_engine(struct nv50_fifo_chan *, struct nvkm_engine *);
+void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
+void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
+
+int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
+ const struct nvkm_oclass *, struct nv50_fifo_chan *);
+
+extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
new file mode 100644
index 000000000..dbcdc5fab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/cl006b.h>
+#include <nvif/unpack.h>
+
+void
+nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+
+ mutex_lock(&chan->fifo->base.mutex);
+ nvkm_ramht_remove(imem->ramht, cookie);
+ mutex_unlock(&chan->fifo->base.mutex);
+}
+
+static int
+nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_object *object)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+ u32 context = 0x80000000 | chan->base.chid << 24;
+ u32 handle = object->handle;
+ int hash;
+
+ switch (object->engine->subdev.type) {
+ case NVKM_ENGINE_DMAOBJ:
+ case NVKM_ENGINE_SW : context |= 0x00000000; break;
+ case NVKM_ENGINE_GR : context |= 0x00010000; break;
+ case NVKM_ENGINE_MPEG : context |= 0x00020000; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ mutex_lock(&chan->fifo->base.mutex);
+ hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
+ handle, context);
+ mutex_unlock(&chan->fifo->base.mutex);
+ return hash;
+}
+
+void
+nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nv04_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_memory *fctx = device->imem->ramfc;
+ const struct nv04_fifo_ramfc *c;
+ unsigned long flags;
+ u32 mask = fifo->base.nr - 1;
+ u32 data = chan->ramfc;
+ u32 chid;
+
+ /* prevent fifo context switches */
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
+
+ /* if this channel is active, replace it with a null context */
+ chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & mask;
+ if (chid == chan->base.chid) {
+ nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
+ nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+
+ c = fifo->ramfc;
+ nvkm_kmap(fctx);
+ do {
+ u32 rm = ((1ULL << c->bits) - 1) << c->regs;
+ u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
+ u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs;
+ u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
+ nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
+ } while ((++c)->bits);
+ nvkm_done(fctx);
+
+ c = fifo->ramfc;
+ do {
+ nvkm_wr32(device, c->regp, 0x00000000);
+ } while ((++c)->bits);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, mask);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
+ /* restore normal operation, after disabling dma mode */
+ nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+void
+nv04_fifo_dma_init(struct nvkm_fifo_chan *base)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nv04_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 mask = 1 << chan->base.chid;
+ unsigned long flags;
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+void *
+nv04_fifo_dma_dtor(struct nvkm_fifo_chan *base)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nv04_fifo *fifo = chan->fifo;
+ struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
+ const struct nv04_fifo_ramfc *c = fifo->ramfc;
+
+ nvkm_kmap(imem->ramfc);
+ do {
+ nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000);
+ } while ((++c)->bits);
+ nvkm_done(imem->ramfc);
+ return chan;
+}
+
+const struct nvkm_fifo_chan_func
+nv04_fifo_dma_func = {
+ .dtor = nv04_fifo_dma_dtor,
+ .init = nv04_fifo_dma_init,
+ .fini = nv04_fifo_dma_fini,
+ .object_ctor = nv04_fifo_dma_object_ctor,
+ .object_dtor = nv04_fifo_dma_object_dtor,
+};
+
+static int
+nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nv04_fifo_chan *chan = NULL;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ int ret = -ENOSYS;
+
+ nvif_ioctl(parent, "create channel dma size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+ "offset %08x\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ if (!args->v0.pushbuf)
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+ 0x1000, 0x1000, false, 0, args->v0.pushbuf,
+ BIT(NV04_FIFO_ENGN_SW) |
+ BIT(NV04_FIFO_ENGN_GR) |
+ BIT(NV04_FIFO_ENGN_DMA),
+ 0, 0x800000, 0x10000, oclass, &chan->base);
+ chan->fifo = fifo;
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+ chan->ramfc = chan->base.chid * 32;
+
+ nvkm_kmap(imem->ramfc);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.push->addr >> 4);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x10,
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nvkm_done(imem->ramfc);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv04_fifo_dma_oclass = {
+ .base.oclass = NV03_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv04_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
new file mode 100644
index 000000000..07d80d54a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/cl006b.h>
+#include <nvif/unpack.h>
+
+static int
+nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nv04_fifo_chan *chan = NULL;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ int ret = -ENOSYS;
+
+ nvif_ioctl(parent, "create channel dma size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+ "offset %08x\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ if (!args->v0.pushbuf)
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+ 0x1000, 0x1000, false, 0, args->v0.pushbuf,
+ BIT(NV04_FIFO_ENGN_SW) |
+ BIT(NV04_FIFO_ENGN_GR) |
+ BIT(NV04_FIFO_ENGN_DMA),
+ 0, 0x800000, 0x10000, oclass, &chan->base);
+ chan->fifo = fifo;
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+ chan->ramfc = chan->base.chid * 32;
+
+ nvkm_kmap(imem->ramfc);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nvkm_done(imem->ramfc);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv10_fifo_dma_oclass = {
+ .base.oclass = NV10_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv10_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
new file mode 100644
index 000000000..edd70a114
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/cl006b.h>
+#include <nvif/unpack.h>
+
+static int
+nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nv04_fifo_chan *chan = NULL;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ int ret = -ENOSYS;
+
+ nvif_ioctl(parent, "create channel dma size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+ "offset %08x\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ if (!args->v0.pushbuf)
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+ 0x1000, 0x1000, false, 0, args->v0.pushbuf,
+ BIT(NV04_FIFO_ENGN_SW) |
+ BIT(NV04_FIFO_ENGN_GR) |
+ BIT(NV04_FIFO_ENGN_MPEG) | /* NV31- */
+ BIT(NV04_FIFO_ENGN_DMA),
+ 0, 0x800000, 0x10000, oclass, &chan->base);
+ chan->fifo = fifo;
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+ chan->ramfc = chan->base.chid * 64;
+
+ nvkm_kmap(imem->ramfc);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nvkm_done(imem->ramfc);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv17_fifo_dma_oclass = {
+ .base.oclass = NV17_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv17_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
new file mode 100644
index 000000000..0411fb908
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/cl006b.h>
+#include <nvif/unpack.h>
+
+static bool
+nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
+{
+ switch (engine->subdev.type) {
+ case NVKM_ENGINE_DMAOBJ:
+ case NVKM_ENGINE_SW:
+ return false;
+ case NVKM_ENGINE_GR:
+ *reg = 0x0032e0;
+ *ctx = 0x38;
+ return true;
+ case NVKM_ENGINE_MPEG:
+ if (engine->subdev.device->chipset < 0x44)
+ return false;
+ *reg = 0x00330c;
+ *ctx = 0x54;
+ return true;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+}
+
+static struct nvkm_gpuobj **
+nv40_fifo_dma_engn(struct nv04_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
+static int
+nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine, bool suspend)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nv04_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ unsigned long flags;
+ u32 reg, ctx;
+ int chid;
+
+ if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+ return 0;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
+
+ chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
+ if (chid == chan->base.chid)
+ nvkm_wr32(device, reg, 0x00000000);
+ nvkm_kmap(imem->ramfc);
+ nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000);
+ nvkm_done(imem->ramfc);
+
+ nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+ return 0;
+}
+
+static int
+nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nv04_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ unsigned long flags;
+ u32 inst, reg, ctx;
+ int chid;
+
+ if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+ return 0;
+ inst = (*nv40_fifo_dma_engn(chan, engine))->addr >> 4;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
+
+ chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
+ if (chid == chan->base.chid)
+ nvkm_wr32(device, reg, inst);
+ nvkm_kmap(imem->ramfc);
+ nvkm_wo32(imem->ramfc, chan->ramfc + ctx, inst);
+ nvkm_done(imem->ramfc);
+
+ nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+ return 0;
+}
+
+static void
+nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ nvkm_gpuobj_del(nv40_fifo_dma_engn(chan, engine));
+}
+
+static int
+nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine,
+ struct nvkm_object *object)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ u32 reg, ctx;
+
+ if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+ return 0;
+
+ return nvkm_object_bind(object, NULL, 0, nv40_fifo_dma_engn(chan, engine));
+}
+
+static int
+nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_object *object)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+ u32 context = chan->base.chid << 23;
+ u32 handle = object->handle;
+ int hash;
+
+ switch (object->engine->subdev.type) {
+ case NVKM_ENGINE_DMAOBJ:
+ case NVKM_ENGINE_SW : context |= 0x00000000; break;
+ case NVKM_ENGINE_GR : context |= 0x00100000; break;
+ case NVKM_ENGINE_MPEG : context |= 0x00200000; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ mutex_lock(&chan->fifo->base.mutex);
+ hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
+ handle, context);
+ mutex_unlock(&chan->fifo->base.mutex);
+ return hash;
+}
+
+static const struct nvkm_fifo_chan_func
+nv40_fifo_dma_func = {
+ .dtor = nv04_fifo_dma_dtor,
+ .init = nv04_fifo_dma_init,
+ .fini = nv04_fifo_dma_fini,
+ .engine_ctor = nv40_fifo_dma_engine_ctor,
+ .engine_dtor = nv40_fifo_dma_engine_dtor,
+ .engine_init = nv40_fifo_dma_engine_init,
+ .engine_fini = nv40_fifo_dma_engine_fini,
+ .object_ctor = nv40_fifo_dma_object_ctor,
+ .object_dtor = nv04_fifo_dma_object_dtor,
+};
+
+static int
+nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nv04_fifo_chan *chan = NULL;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ int ret = -ENOSYS;
+
+ nvif_ioctl(parent, "create channel dma size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+ "offset %08x\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ if (!args->v0.pushbuf)
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base,
+ 0x1000, 0x1000, false, 0, args->v0.pushbuf,
+ BIT(NV04_FIFO_ENGN_SW) |
+ BIT(NV04_FIFO_ENGN_GR) |
+ BIT(NV04_FIFO_ENGN_MPEG) |
+ BIT(NV04_FIFO_ENGN_DMA),
+ 0, 0xc00000, 0x1000, oclass, &chan->base);
+ chan->fifo = fifo;
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+ chan->ramfc = chan->base.chid * 128;
+
+ nvkm_kmap(imem->ramfc);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 |
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
+ nvkm_done(imem->ramfc);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv40_fifo_dma_oclass = {
+ .base.oclass = NV40_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv40_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
new file mode 100644
index 000000000..3885c3830
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+#include "channv50.h"
+
+static void
+g84_fifo_uevent_fini(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ nvkm_mask(device, 0x002140, 0x40000000, 0x00000000);
+}
+
+static void
+g84_fifo_uevent_init(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ nvkm_mask(device, 0x002140, 0x40000000, 0x40000000);
+}
+
+static struct nvkm_engine *
+g84_fifo_id_engine(struct nvkm_fifo *fifo, int engi)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_engine *engine;
+ enum nvkm_subdev_type type;
+
+ switch (engi) {
+ case G84_FIFO_ENGN_SW : type = NVKM_ENGINE_SW; break;
+ case G84_FIFO_ENGN_GR : type = NVKM_ENGINE_GR; break;
+ case G84_FIFO_ENGN_MPEG :
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSPPP, 0)))
+ return engine;
+ type = NVKM_ENGINE_MPEG;
+ break;
+ case G84_FIFO_ENGN_ME :
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_CE, 0)))
+ return engine;
+ type = NVKM_ENGINE_ME;
+ break;
+ case G84_FIFO_ENGN_VP :
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSPDEC, 0)))
+ return engine;
+ type = NVKM_ENGINE_VP;
+ break;
+ case G84_FIFO_ENGN_CIPHER:
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_VIC, 0)))
+ return engine;
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_SEC, 0)))
+ return engine;
+ type = NVKM_ENGINE_CIPHER;
+ break;
+ case G84_FIFO_ENGN_BSP :
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSVLD, 0)))
+ return engine;
+ type = NVKM_ENGINE_BSP;
+ break;
+ case G84_FIFO_ENGN_DMA : type = NVKM_ENGINE_DMAOBJ; break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return nvkm_device_engine(fifo->engine.subdev.device, type, 0);
+}
+
+static int
+g84_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
+{
+ switch (engine->subdev.type) {
+ case NVKM_ENGINE_SW : return G84_FIFO_ENGN_SW;
+ case NVKM_ENGINE_GR : return G84_FIFO_ENGN_GR;
+ case NVKM_ENGINE_MPEG :
+ case NVKM_ENGINE_MSPPP : return G84_FIFO_ENGN_MPEG;
+ case NVKM_ENGINE_CE : return G84_FIFO_ENGN_CE0;
+ case NVKM_ENGINE_VP :
+ case NVKM_ENGINE_MSPDEC: return G84_FIFO_ENGN_VP;
+ case NVKM_ENGINE_CIPHER:
+ case NVKM_ENGINE_SEC : return G84_FIFO_ENGN_CIPHER;
+ case NVKM_ENGINE_BSP :
+ case NVKM_ENGINE_MSVLD : return G84_FIFO_ENGN_BSP;
+ case NVKM_ENGINE_DMAOBJ: return G84_FIFO_ENGN_DMA;
+ default:
+ WARN_ON(1);
+ return -1;
+ }
+}
+
+static const struct nvkm_fifo_func
+g84_fifo = {
+ .dtor = nv50_fifo_dtor,
+ .oneinit = nv50_fifo_oneinit,
+ .init = nv50_fifo_init,
+ .intr = nv04_fifo_intr,
+ .engine_id = g84_fifo_engine_id,
+ .id_engine = g84_fifo_id_engine,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .uevent_init = g84_fifo_uevent_init,
+ .uevent_fini = g84_fifo_uevent_fini,
+ .chan = {
+ &g84_fifo_gpfifo_oclass,
+ NULL
+ },
+};
+
+int
+g84_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nv50_fifo_new_(&g84_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
new file mode 100644
index 000000000..c630dbd29
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define ga102_fifo(p) container_of((p), struct ga102_fifo, base.engine)
+#define ga102_chan(p) container_of((p), struct ga102_chan, object)
+#include <engine/fifo.h>
+#include "user.h"
+
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+#include <subdev/top.h>
+
+#include <nvif/cl0080.h>
+#include <nvif/clc36f.h>
+#include <nvif/class.h>
+
+struct ga102_fifo {
+ struct nvkm_fifo base;
+};
+
+struct ga102_chan {
+ struct nvkm_object object;
+
+ struct {
+ u32 runl;
+ u32 chan;
+ } ctrl;
+
+ struct nvkm_memory *mthd;
+ struct nvkm_memory *inst;
+ struct nvkm_memory *user;
+ struct nvkm_memory *runl;
+
+ struct nvkm_vmm *vmm;
+};
+
+static int
+ga102_chan_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
+{
+ if (index == 0) {
+ oclass->ctor = nvkm_object_new;
+ oclass->base = (struct nvkm_sclass) { -1, -1, AMPERE_DMA_COPY_B };
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+ga102_chan_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+ struct ga102_chan *chan = ga102_chan(object);
+ struct nvkm_device *device = chan->object.engine->subdev.device;
+ u64 bar2 = nvkm_memory_bar2(chan->user);
+
+ if (bar2 == ~0ULL)
+ return -EFAULT;
+
+ *type = NVKM_OBJECT_MAP_IO;
+ *addr = device->func->resource_addr(device, 3) + bar2;
+ *size = 0x1000;
+ return 0;
+}
+
+static int
+ga102_chan_fini(struct nvkm_object *object, bool suspend)
+{
+ struct ga102_chan *chan = ga102_chan(object);
+ struct nvkm_device *device = chan->object.engine->subdev.device;
+
+ nvkm_wr32(device, chan->ctrl.chan, 0x00000003);
+
+ nvkm_wr32(device, chan->ctrl.runl + 0x098, 0x01000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, chan->ctrl.runl + 0x098) & 0x00100000))
+ break;
+ );
+
+ nvkm_wr32(device, chan->ctrl.runl + 0x088, 0);
+
+ nvkm_wr32(device, chan->ctrl.chan, 0xffffffff);
+ return 0;
+}
+
+static int
+ga102_chan_init(struct nvkm_object *object)
+{
+ struct ga102_chan *chan = ga102_chan(object);
+ struct nvkm_device *device = chan->object.engine->subdev.device;
+
+ nvkm_mask(device, chan->ctrl.runl + 0x300, 0x80000000, 0x80000000);
+
+ nvkm_wr32(device, chan->ctrl.runl + 0x080, lower_32_bits(nvkm_memory_addr(chan->runl)));
+ nvkm_wr32(device, chan->ctrl.runl + 0x084, upper_32_bits(nvkm_memory_addr(chan->runl)));
+ nvkm_wr32(device, chan->ctrl.runl + 0x088, 2);
+
+ nvkm_wr32(device, chan->ctrl.chan, 0x00000002);
+ nvkm_wr32(device, chan->ctrl.runl + 0x0090, 0);
+ return 0;
+}
+
+static void *
+ga102_chan_dtor(struct nvkm_object *object)
+{
+ struct ga102_chan *chan = ga102_chan(object);
+
+ if (chan->vmm) {
+ nvkm_vmm_part(chan->vmm, chan->inst);
+ nvkm_vmm_unref(&chan->vmm);
+ }
+
+ nvkm_memory_unref(&chan->runl);
+ nvkm_memory_unref(&chan->user);
+ nvkm_memory_unref(&chan->inst);
+ nvkm_memory_unref(&chan->mthd);
+ return chan;
+}
+
+static const struct nvkm_object_func
+ga102_chan = {
+ .dtor = ga102_chan_dtor,
+ .init = ga102_chan_init,
+ .fini = ga102_chan_fini,
+ .map = ga102_chan_map,
+ .sclass = ga102_chan_sclass,
+};
+
+static int
+ga102_chan_new(struct nvkm_device *device,
+ const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
+{
+ struct volta_channel_gpfifo_a_v0 *args = argv;
+ struct nvkm_top_device *tdev;
+ struct nvkm_vmm *vmm;
+ struct ga102_chan *chan;
+ int ret;
+
+ if (argc != sizeof(*args))
+ return -ENOSYS;
+
+ vmm = nvkm_uvmm_search(oclass->client, args->vmm);
+ if (IS_ERR(vmm))
+ return PTR_ERR(vmm);
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&ga102_chan, oclass, &chan->object);
+ *pobject = &chan->object;
+
+ list_for_each_entry(tdev, &device->top->device, head) {
+ if (tdev->type == NVKM_ENGINE_CE) {
+ chan->ctrl.runl = tdev->runlist;
+ break;
+ }
+ }
+
+ if (!chan->ctrl.runl)
+ return -ENODEV;
+
+ chan->ctrl.chan = nvkm_rd32(device, chan->ctrl.runl + 0x004) & 0xfffffff0;
+
+ args->chid = 0;
+ args->inst = 0;
+ args->token = nvkm_rd32(device, chan->ctrl.runl + 0x008) & 0xffff0000;
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->mthd);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->inst);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x010, 0x0000face);
+ nvkm_wo32(chan->inst, 0x030, 0x7ffff902);
+ nvkm_wo32(chan->inst, 0x048, lower_32_bits(args->ioffset));
+ nvkm_wo32(chan->inst, 0x04c, upper_32_bits(args->ioffset) |
+ (order_base_2(args->ilength / 8) << 16));
+ nvkm_wo32(chan->inst, 0x084, 0x20400000);
+ nvkm_wo32(chan->inst, 0x094, 0x30000001);
+ nvkm_wo32(chan->inst, 0x0ac, 0x00020000);
+ nvkm_wo32(chan->inst, 0x0e4, 0x00000000);
+ nvkm_wo32(chan->inst, 0x0e8, 0);
+ nvkm_wo32(chan->inst, 0x0f4, 0x00001000);
+ nvkm_wo32(chan->inst, 0x0f8, 0x10003080);
+ nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000);
+ nvkm_wo32(chan->inst, 0x220, lower_32_bits(nvkm_memory_bar2(chan->mthd)));
+ nvkm_wo32(chan->inst, 0x224, upper_32_bits(nvkm_memory_bar2(chan->mthd)));
+ nvkm_done(chan->inst);
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->user);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->runl);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(chan->runl);
+ nvkm_wo32(chan->runl, 0x00, 0x80030001);
+ nvkm_wo32(chan->runl, 0x04, 1);
+ nvkm_wo32(chan->runl, 0x08, 0);
+ nvkm_wo32(chan->runl, 0x0c, 0x00000000);
+ nvkm_wo32(chan->runl, 0x10, lower_32_bits(nvkm_memory_addr(chan->user)));
+ nvkm_wo32(chan->runl, 0x14, upper_32_bits(nvkm_memory_addr(chan->user)));
+ nvkm_wo32(chan->runl, 0x18, lower_32_bits(nvkm_memory_addr(chan->inst)));
+ nvkm_wo32(chan->runl, 0x1c, upper_32_bits(nvkm_memory_addr(chan->inst)));
+ nvkm_done(chan->runl);
+
+ ret = nvkm_vmm_join(vmm, chan->inst);
+ if (ret)
+ return ret;
+
+ chan->vmm = nvkm_vmm_ref(vmm);
+ return 0;
+}
+
+static const struct nvkm_device_oclass
+ga102_chan_oclass = {
+ .ctor = ga102_chan_new,
+};
+
+static int
+ga102_user_new(struct nvkm_device *device,
+ const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
+{
+ return tu102_fifo_user_new(oclass, argv, argc, pobject);
+}
+
+static const struct nvkm_device_oclass
+ga102_user_oclass = {
+ .ctor = ga102_user_new,
+};
+
+static int
+ga102_fifo_sclass(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class)
+{
+ if (index == 0) {
+ oclass->base = (struct nvkm_sclass) { -1, -1, VOLTA_USERMODE_A };
+ *class = &ga102_user_oclass;
+ return 0;
+ } else
+ if (index == 1) {
+ oclass->base = (struct nvkm_sclass) { 0, 0, AMPERE_CHANNEL_GPFIFO_B };
+ *class = &ga102_chan_oclass;
+ return 0;
+ }
+
+ return 2;
+}
+
+static int
+ga102_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
+{
+ switch (mthd) {
+ case NV_DEVICE_HOST_CHANNELS: *data = 1; return 0;
+ default:
+ break;
+ }
+
+ return -ENOSYS;
+}
+
+static void *
+ga102_fifo_dtor(struct nvkm_engine *engine)
+{
+ return ga102_fifo(engine);
+}
+
+static const struct nvkm_engine_func
+ga102_fifo = {
+ .dtor = ga102_fifo_dtor,
+ .info = ga102_fifo_info,
+ .base.sclass = ga102_fifo_sclass,
+};
+
+int
+ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ struct ga102_fifo *fifo;
+
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_engine_ctor(&ga102_fifo, device, type, inst, true, &fifo->base.engine);
+ *pfifo = &fifo->base;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
new file mode 100644
index 000000000..8b4f36b3e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -0,0 +1,699 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gf100.h"
+#include "changf100.h"
+
+#include <core/client.h>
+#include <core/enum.h>
+#include <core/gpuobj.h>
+#include <subdev/bar.h>
+#include <subdev/fault.h>
+#include <engine/sw.h>
+
+#include <nvif/class.h>
+
+static void
+gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
+}
+
+static void
+gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
+}
+
+void
+gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
+{
+ struct gf100_fifo_chan *chan;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_memory *cur;
+ int nr = 0;
+ int target;
+
+ mutex_lock(&fifo->base.mutex);
+ cur = fifo->runlist.mem[fifo->runlist.active];
+ fifo->runlist.active = !fifo->runlist.active;
+
+ nvkm_kmap(cur);
+ list_for_each_entry(chan, &fifo->chan, head) {
+ nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
+ nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
+ nr++;
+ }
+ nvkm_done(cur);
+
+ switch (nvkm_memory_target(cur)) {
+ case NVKM_MEM_TARGET_VRAM: target = 0; break;
+ case NVKM_MEM_TARGET_NCOH: target = 3; break;
+ default:
+ mutex_unlock(&fifo->base.mutex);
+ WARN_ON(1);
+ return;
+ }
+
+ nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
+ (target << 28));
+ nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
+
+ if (wait_event_timeout(fifo->runlist.wait,
+ !(nvkm_rd32(device, 0x00227c) & 0x00100000),
+ msecs_to_jiffies(2000)) == 0)
+ nvkm_error(subdev, "runlist update timeout\n");
+ mutex_unlock(&fifo->base.mutex);
+}
+
+void
+gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
+{
+ mutex_lock(&fifo->base.mutex);
+ list_del_init(&chan->head);
+ mutex_unlock(&fifo->base.mutex);
+}
+
+void
+gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
+{
+ mutex_lock(&fifo->base.mutex);
+ list_add_tail(&chan->head, &fifo->chan);
+ mutex_unlock(&fifo->base.mutex);
+}
+
+static struct nvkm_engine *
+gf100_fifo_id_engine(struct nvkm_fifo *fifo, int engi)
+{
+ enum nvkm_subdev_type type;
+ int inst;
+
+ switch (engi) {
+ case GF100_FIFO_ENGN_GR : type = NVKM_ENGINE_GR ; inst = 0; break;
+ case GF100_FIFO_ENGN_MSPDEC: type = NVKM_ENGINE_MSPDEC; inst = 0; break;
+ case GF100_FIFO_ENGN_MSPPP : type = NVKM_ENGINE_MSPPP ; inst = 0; break;
+ case GF100_FIFO_ENGN_MSVLD : type = NVKM_ENGINE_MSVLD ; inst = 0; break;
+ case GF100_FIFO_ENGN_CE0 : type = NVKM_ENGINE_CE ; inst = 0; break;
+ case GF100_FIFO_ENGN_CE1 : type = NVKM_ENGINE_CE ; inst = 1; break;
+ case GF100_FIFO_ENGN_SW : type = NVKM_ENGINE_SW ; inst = 0; break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return nvkm_device_engine(fifo->engine.subdev.device, type, inst);
+}
+
+static int
+gf100_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
+{
+ switch (engine->subdev.type) {
+ case NVKM_ENGINE_GR : return GF100_FIFO_ENGN_GR;
+ case NVKM_ENGINE_MSPDEC: return GF100_FIFO_ENGN_MSPDEC;
+ case NVKM_ENGINE_MSPPP : return GF100_FIFO_ENGN_MSPPP;
+ case NVKM_ENGINE_MSVLD : return GF100_FIFO_ENGN_MSVLD;
+ case NVKM_ENGINE_CE : return GF100_FIFO_ENGN_CE0 + engine->subdev.inst;
+ case NVKM_ENGINE_SW : return GF100_FIFO_ENGN_SW;
+ default:
+ WARN_ON(1);
+ return -1;
+ }
+}
+
+static void
+gf100_fifo_recover_work(struct work_struct *w)
+{
+ struct gf100_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_engine *engine;
+ unsigned long flags;
+ u32 engm, engn, todo;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ engm = fifo->recover.mask;
+ fifo->recover.mask = 0ULL;
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+
+ nvkm_mask(device, 0x002630, engm, engm);
+
+ for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT_ULL(engn)) {
+ if ((engine = gf100_fifo_id_engine(&fifo->base, engn))) {
+ nvkm_subdev_fini(&engine->subdev, false);
+ WARN_ON(nvkm_subdev_init(&engine->subdev));
+ }
+ }
+
+ gf100_fifo_runlist_commit(fifo);
+ nvkm_wr32(device, 0x00262c, engm);
+ nvkm_mask(device, 0x002630, engm, 0x00000000);
+}
+
+static void
+gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
+ struct gf100_fifo_chan *chan)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 chid = chan->base.chid;
+ int engi = gf100_fifo_engine_id(&fifo->base, engine);
+
+ nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
+ engine->subdev.name, chid);
+ assert_spin_locked(&fifo->base.lock);
+
+ nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
+ list_del_init(&chan->head);
+ chan->killed = true;
+
+ if (engi >= 0 && engi != GF100_FIFO_ENGN_SW)
+ fifo->recover.mask |= BIT(engi);
+ schedule_work(&fifo->recover.work);
+ nvkm_fifo_kevent(&fifo->base, chid);
+}
+
+static const struct nvkm_enum
+gf100_fifo_fault_engine[] = {
+ { 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
+ { 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x07, "PFIFO", NULL, NVKM_ENGINE_FIFO },
+ { 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
+ { 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
+ { 0x13, "PCOUNTER" },
+ { 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
+ { 0x15, "PCE0", NULL, NVKM_ENGINE_CE, 0 },
+ { 0x16, "PCE1", NULL, NVKM_ENGINE_CE, 1 },
+ { 0x17, "PMU" },
+ {}
+};
+
+static const struct nvkm_enum
+gf100_fifo_fault_reason[] = {
+ { 0x00, "PT_NOT_PRESENT" },
+ { 0x01, "PT_TOO_SHORT" },
+ { 0x02, "PAGE_NOT_PRESENT" },
+ { 0x03, "VM_LIMIT_EXCEEDED" },
+ { 0x04, "NO_CHANNEL" },
+ { 0x05, "PAGE_SYSTEM_ONLY" },
+ { 0x06, "PAGE_READ_ONLY" },
+ { 0x0a, "COMPRESSED_SYSRAM" },
+ { 0x0c, "INVALID_STORAGE_TYPE" },
+ {}
+};
+
+static const struct nvkm_enum
+gf100_fifo_fault_hubclient[] = {
+ { 0x01, "PCOPY0" },
+ { 0x02, "PCOPY1" },
+ { 0x04, "DISPATCH" },
+ { 0x05, "CTXCTL" },
+ { 0x06, "PFIFO" },
+ { 0x07, "BAR_READ" },
+ { 0x08, "BAR_WRITE" },
+ { 0x0b, "PVP" },
+ { 0x0c, "PMSPPP" },
+ { 0x0d, "PMSVLD" },
+ { 0x11, "PCOUNTER" },
+ { 0x12, "PMU" },
+ { 0x14, "CCACHE" },
+ { 0x15, "CCACHE_POST" },
+ {}
+};
+
+static const struct nvkm_enum
+gf100_fifo_fault_gpcclient[] = {
+ { 0x01, "TEX" },
+ { 0x0c, "ESETUP" },
+ { 0x0e, "CTXCTL" },
+ { 0x0f, "PROP" },
+ {}
+};
+
+static void
+gf100_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
+{
+ struct gf100_fifo *fifo = gf100_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct nvkm_enum *er, *eu, *ec;
+ struct nvkm_engine *engine = NULL;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ char gpcid[8] = "";
+
+ er = nvkm_enum_find(gf100_fifo_fault_reason, info->reason);
+ eu = nvkm_enum_find(gf100_fifo_fault_engine, info->engine);
+ if (info->hub) {
+ ec = nvkm_enum_find(gf100_fifo_fault_hubclient, info->client);
+ } else {
+ ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, info->client);
+ snprintf(gpcid, sizeof(gpcid), "GPC%d/", info->gpc);
+ }
+
+ if (eu && eu->data2) {
+ switch (eu->data2) {
+ case NVKM_SUBDEV_BAR:
+ nvkm_bar_bar1_reset(device);
+ break;
+ case NVKM_SUBDEV_INSTMEM:
+ nvkm_bar_bar2_reset(device);
+ break;
+ case NVKM_ENGINE_IFB:
+ nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
+ break;
+ default:
+ engine = nvkm_device_engine(device, eu->data2, eu->inst);
+ break;
+ }
+ }
+
+ chan = nvkm_fifo_chan_inst(&fifo->base, info->inst, &flags);
+
+ nvkm_error(subdev,
+ "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
+ "reason %02x [%s] on channel %d [%010llx %s]\n",
+ info->access ? "write" : "read", info->addr,
+ info->engine, eu ? eu->name : "",
+ info->client, gpcid, ec ? ec->name : "",
+ info->reason, er ? er->name : "", chan ? chan->chid : -1,
+ info->inst, chan ? chan->object.client->name : "unknown");
+
+ if (engine && chan)
+ gf100_fifo_recover(fifo, engine, (void *)chan);
+ nvkm_fifo_chan_put(&fifo->base, flags, &chan);
+}
+
+static const struct nvkm_enum
+gf100_fifo_sched_reason[] = {
+ { 0x0a, "CTXSW_TIMEOUT" },
+ {}
+};
+
+static void
+gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_engine *engine;
+ struct gf100_fifo_chan *chan;
+ unsigned long flags;
+ u32 engn;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ for (engn = 0; engn < 6; engn++) {
+ u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
+ u32 busy = (stat & 0x80000000);
+ u32 save = (stat & 0x00100000); /* maybe? */
+ u32 unk0 = (stat & 0x00040000);
+ u32 unk1 = (stat & 0x00001000);
+ u32 chid = (stat & 0x0000007f);
+ (void)save;
+
+ if (busy && unk0 && unk1) {
+ list_for_each_entry(chan, &fifo->chan, head) {
+ if (chan->base.chid == chid) {
+ engine = gf100_fifo_id_engine(&fifo->base, engn);
+ if (!engine)
+ break;
+ gf100_fifo_recover(fifo, engine, chan);
+ break;
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static void
+gf100_fifo_intr_sched(struct gf100_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x00254c);
+ u32 code = intr & 0x000000ff;
+ const struct nvkm_enum *en;
+
+ en = nvkm_enum_find(gf100_fifo_sched_reason, code);
+
+ nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
+
+ switch (code) {
+ case 0x0a:
+ gf100_fifo_intr_sched_ctxsw(fifo);
+ break;
+ default:
+ break;
+ }
+}
+
+void
+gf100_fifo_intr_fault(struct nvkm_fifo *fifo, int unit)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+ u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+ u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+ u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
+ struct nvkm_fault_data info;
+
+ info.inst = (u64)inst << 12;
+ info.addr = ((u64)vahi << 32) | valo;
+ info.time = 0;
+ info.engine = unit;
+ info.valid = 1;
+ info.gpc = (type & 0x1f000000) >> 24;
+ info.client = (type & 0x00001f00) >> 8;
+ info.access = (type & 0x00000080) >> 7;
+ info.hub = (type & 0x00000040) >> 6;
+ info.reason = (type & 0x0000000f);
+
+ nvkm_fifo_fault(fifo, &info);
+}
+
+static const struct nvkm_bitfield
+gf100_fifo_pbdma_intr[] = {
+/* { 0x00008000, "" } seen with null ib push */
+ { 0x00200000, "ILLEGAL_MTHD" },
+ { 0x00800000, "EMPTY_SUBC" },
+ {}
+};
+
+static void
+gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
+ u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
+ u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00003ffc);
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ u32 show= stat;
+ char msg[128];
+
+ if (stat & 0x00800000) {
+ if (device->sw) {
+ if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
+ show &= ~0x00800000;
+ }
+ }
+
+ if (show) {
+ nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
+ chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+ nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
+ "subc %d mthd %04x data %08x\n",
+ unit, show, msg, chid, chan ? chan->inst->addr : 0,
+ chan ? chan->object.client->name : "unknown",
+ subc, mthd, data);
+ nvkm_fifo_chan_put(&fifo->base, flags, &chan);
+ }
+
+ nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x002a00);
+
+ if (intr & 0x10000000) {
+ wake_up(&fifo->runlist.wait);
+ nvkm_wr32(device, 0x002a00, 0x10000000);
+ intr &= ~0x10000000;
+ }
+
+ if (intr) {
+ nvkm_error(subdev, "RUNLIST %08x\n", intr);
+ nvkm_wr32(device, 0x002a00, intr);
+ }
+}
+
+static void
+gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
+ u32 inte = nvkm_rd32(device, 0x002628);
+ u32 unkn;
+
+ nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
+
+ for (unkn = 0; unkn < 8; unkn++) {
+ u32 ints = (intr >> (unkn * 0x04)) & inte;
+ if (ints & 0x1) {
+ nvkm_fifo_uevent(&fifo->base);
+ ints &= ~1;
+ }
+ if (ints) {
+ nvkm_error(subdev, "ENGINE %d %d %01x",
+ engn, unkn, ints);
+ nvkm_mask(device, 0x002628, ints, 0);
+ }
+ }
+}
+
+void
+gf100_fifo_intr_engine(struct gf100_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 mask = nvkm_rd32(device, 0x0025a4);
+ while (mask) {
+ u32 unit = __ffs(mask);
+ gf100_fifo_intr_engine_unit(fifo, unit);
+ mask &= ~(1 << unit);
+ }
+}
+
+static void
+gf100_fifo_intr(struct nvkm_fifo *base)
+{
+ struct gf100_fifo *fifo = gf100_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x002140);
+ u32 stat = nvkm_rd32(device, 0x002100) & mask;
+
+ if (stat & 0x00000001) {
+ u32 intr = nvkm_rd32(device, 0x00252c);
+ nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
+ nvkm_wr32(device, 0x002100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000100) {
+ gf100_fifo_intr_sched(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x00010000) {
+ u32 intr = nvkm_rd32(device, 0x00256c);
+ nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
+ nvkm_wr32(device, 0x002100, 0x00010000);
+ stat &= ~0x00010000;
+ }
+
+ if (stat & 0x01000000) {
+ u32 intr = nvkm_rd32(device, 0x00258c);
+ nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
+ nvkm_wr32(device, 0x002100, 0x01000000);
+ stat &= ~0x01000000;
+ }
+
+ if (stat & 0x10000000) {
+ u32 mask = nvkm_rd32(device, 0x00259c);
+ while (mask) {
+ u32 unit = __ffs(mask);
+ gf100_fifo_intr_fault(&fifo->base, unit);
+ nvkm_wr32(device, 0x00259c, (1 << unit));
+ mask &= ~(1 << unit);
+ }
+ stat &= ~0x10000000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 mask = nvkm_rd32(device, 0x0025a0);
+ while (mask) {
+ u32 unit = __ffs(mask);
+ gf100_fifo_intr_pbdma(fifo, unit);
+ nvkm_wr32(device, 0x0025a0, (1 << unit));
+ mask &= ~(1 << unit);
+ }
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ gf100_fifo_intr_runlist(fifo);
+ stat &= ~0x40000000;
+ }
+
+ if (stat & 0x80000000) {
+ gf100_fifo_intr_engine(fifo);
+ stat &= ~0x80000000;
+ }
+
+ if (stat) {
+ nvkm_error(subdev, "INTR %08x\n", stat);
+ nvkm_mask(device, 0x002140, stat, 0x00000000);
+ nvkm_wr32(device, 0x002100, stat);
+ }
+}
+
+static int
+gf100_fifo_oneinit(struct nvkm_fifo *base)
+{
+ struct gf100_fifo *fifo = gf100_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
+ int ret;
+
+ /* Determine number of PBDMAs by checking valid enable bits. */
+ nvkm_wr32(device, 0x002204, 0xffffffff);
+ fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x002204));
+ nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
+
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
+ false, &fifo->runlist.mem[0]);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
+ false, &fifo->runlist.mem[1]);
+ if (ret)
+ return ret;
+
+ init_waitqueue_head(&fifo->runlist.wait);
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
+ 0x1000, false, &fifo->user.mem);
+ if (ret)
+ return ret;
+
+ ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
+ &fifo->user.bar);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
+}
+
+static void
+gf100_fifo_fini(struct nvkm_fifo *base)
+{
+ struct gf100_fifo *fifo = gf100_fifo(base);
+ flush_work(&fifo->recover.work);
+}
+
+static void
+gf100_fifo_init(struct nvkm_fifo *base)
+{
+ struct gf100_fifo *fifo = gf100_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ int i;
+
+ /* Enable PBDMAs. */
+ nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
+ nvkm_wr32(device, 0x002204, (1 << fifo->pbdma_nr) - 1);
+
+ /* Assign engines to PBDMAs. */
+ if (fifo->pbdma_nr >= 3) {
+ nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
+ nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
+ nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
+ nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
+ nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
+ nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
+ }
+
+ /* PBDMA[n] */
+ for (i = 0; i < fifo->pbdma_nr; i++) {
+ nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+ }
+
+ nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
+ nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
+
+ nvkm_wr32(device, 0x002100, 0xffffffff);
+ nvkm_wr32(device, 0x002140, 0x7fffffff);
+ nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
+}
+
+static void *
+gf100_fifo_dtor(struct nvkm_fifo *base)
+{
+ struct gf100_fifo *fifo = gf100_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
+ nvkm_memory_unref(&fifo->user.mem);
+ nvkm_memory_unref(&fifo->runlist.mem[0]);
+ nvkm_memory_unref(&fifo->runlist.mem[1]);
+ return fifo;
+}
+
+static const struct nvkm_fifo_func
+gf100_fifo = {
+ .dtor = gf100_fifo_dtor,
+ .oneinit = gf100_fifo_oneinit,
+ .init = gf100_fifo_init,
+ .fini = gf100_fifo_fini,
+ .intr = gf100_fifo_intr,
+ .fault = gf100_fifo_fault,
+ .engine_id = gf100_fifo_engine_id,
+ .id_engine = gf100_fifo_id_engine,
+ .uevent_init = gf100_fifo_uevent_init,
+ .uevent_fini = gf100_fifo_uevent_fini,
+ .chan = {
+ &gf100_fifo_gpfifo_oclass,
+ NULL
+ },
+};
+
+int
+gf100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ struct gf100_fifo *fifo;
+
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+ INIT_LIST_HEAD(&fifo->chan);
+ INIT_WORK(&fifo->recover.work, gf100_fifo_recover_work);
+ *pfifo = &fifo->base;
+
+ return nvkm_fifo_ctor(&gf100_fifo, device, type, inst, 128, &fifo->base);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
new file mode 100644
index 000000000..b8642490e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __GF100_FIFO_H__
+#define __GF100_FIFO_H__
+#define gf100_fifo(p) container_of((p), struct gf100_fifo, base)
+#include "priv.h"
+
+#include <subdev/mmu.h>
+
+struct gf100_fifo_chan;
+struct gf100_fifo {
+ struct nvkm_fifo base;
+
+ struct list_head chan;
+
+ struct {
+ struct work_struct work;
+ u64 mask;
+ } recover;
+
+ int pbdma_nr;
+
+ struct {
+ struct nvkm_memory *mem[2];
+ int active;
+ wait_queue_head_t wait;
+ } runlist;
+
+ struct {
+ struct nvkm_memory *mem;
+ struct nvkm_vma *bar;
+ } user;
+};
+
+void gf100_fifo_intr_engine(struct gf100_fifo *);
+void gf100_fifo_runlist_insert(struct gf100_fifo *, struct gf100_fifo_chan *);
+void gf100_fifo_runlist_remove(struct gf100_fifo *, struct gf100_fifo_chan *);
+void gf100_fifo_runlist_commit(struct gf100_fifo *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
new file mode 100644
index 000000000..e771bd519
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -0,0 +1,1249 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+#include "cgrp.h"
+#include "changk104.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/bar.h>
+#include <subdev/fault.h>
+#include <subdev/timer.h>
+#include <subdev/top.h>
+#include <engine/sw.h>
+
+#include <nvif/class.h>
+#include <nvif/cl0080.h>
+
+void
+gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
+ struct gk104_fifo_engine_status *status)
+{
+ struct nvkm_engine *engine = fifo->engine[engn].engine;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
+
+ status->busy = !!(stat & 0x80000000);
+ status->faulted = !!(stat & 0x40000000);
+ status->next.tsg = !!(stat & 0x10000000);
+ status->next.id = (stat & 0x0fff0000) >> 16;
+ status->chsw = !!(stat & 0x00008000);
+ status->save = !!(stat & 0x00004000);
+ status->load = !!(stat & 0x00002000);
+ status->prev.tsg = !!(stat & 0x00001000);
+ status->prev.id = (stat & 0x00000fff);
+ status->chan = NULL;
+
+ if (status->busy && status->chsw) {
+ if (status->load && status->save) {
+ if (engine && nvkm_engine_chsw_load(engine))
+ status->chan = &status->next;
+ else
+ status->chan = &status->prev;
+ } else
+ if (status->load) {
+ status->chan = &status->next;
+ } else {
+ status->chan = &status->prev;
+ }
+ } else
+ if (status->load) {
+ status->chan = &status->prev;
+ }
+
+ nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d "
+ "save %d load %d %sid %d%s-> %sid %d%s\n",
+ engn, status->busy, status->faulted,
+ status->chsw, status->save, status->load,
+ status->prev.tsg ? "tsg" : "ch", status->prev.id,
+ status->chan == &status->prev ? "*" : " ",
+ status->next.tsg ? "tsg" : "ch", status->next.id,
+ status->chan == &status->next ? "*" : " ");
+}
+
+int
+gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *argv, u32 argc, struct nvkm_object **pobject)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ if (oclass->engn == &fifo->func->chan) {
+ const struct gk104_fifo_chan_user *user = oclass->engn;
+ return user->ctor(fifo, oclass, argv, argc, pobject);
+ } else
+ if (oclass->engn == &fifo->func->user) {
+ const struct gk104_fifo_user_user *user = oclass->engn;
+ return user->ctor(oclass, argv, argc, pobject);
+ }
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+int
+gk104_fifo_class_get(struct nvkm_fifo *base, int index,
+ struct nvkm_oclass *oclass)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ int c = 0;
+
+ if (fifo->func->user.ctor && c++ == index) {
+ oclass->base = fifo->func->user.user;
+ oclass->engn = &fifo->func->user;
+ return 0;
+ }
+
+ if (fifo->func->chan.ctor && c++ == index) {
+ oclass->base = fifo->func->chan.user;
+ oclass->engn = &fifo->func->chan;
+ return 0;
+ }
+
+ return c;
+}
+
+void
+gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
+}
+
+void
+gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
+}
+
+void
+gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
+ struct nvkm_memory *mem, int nr)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int target;
+
+ switch (nvkm_memory_target(mem)) {
+ case NVKM_MEM_TARGET_VRAM: target = 0; break;
+ case NVKM_MEM_TARGET_NCOH: target = 3; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
+ (target << 28));
+ nvkm_wr32(device, 0x002274, (runl << 20) | nr);
+
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
+ break;
+ ) < 0)
+ nvkm_error(subdev, "runlist %d update timeout\n", runl);
+}
+
+void
+gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
+{
+ const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
+ struct gk104_fifo_chan *chan;
+ struct nvkm_memory *mem;
+ struct nvkm_fifo_cgrp *cgrp;
+ int nr = 0;
+
+ mutex_lock(&fifo->base.mutex);
+ mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
+ fifo->runlist[runl].next = !fifo->runlist[runl].next;
+
+ nvkm_kmap(mem);
+ list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
+ func->chan(chan, mem, nr++ * func->size);
+ }
+
+ list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
+ func->cgrp(cgrp, mem, nr++ * func->size);
+ list_for_each_entry(chan, &cgrp->chan, head) {
+ func->chan(chan, mem, nr++ * func->size);
+ }
+ }
+ nvkm_done(mem);
+
+ func->commit(fifo, runl, mem, nr);
+ mutex_unlock(&fifo->base.mutex);
+}
+
+void
+gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
+{
+ struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
+ mutex_lock(&fifo->base.mutex);
+ if (!list_empty(&chan->head)) {
+ list_del_init(&chan->head);
+ if (cgrp && !--cgrp->chan_nr)
+ list_del_init(&cgrp->head);
+ }
+ mutex_unlock(&fifo->base.mutex);
+}
+
+void
+gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
+{
+ struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
+ mutex_lock(&fifo->base.mutex);
+ if (cgrp) {
+ if (!cgrp->chan_nr++)
+ list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp);
+ list_add_tail(&chan->head, &cgrp->chan);
+ } else {
+ list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
+ }
+ mutex_unlock(&fifo->base.mutex);
+}
+
+void
+gk104_fifo_runlist_chan(struct gk104_fifo_chan *chan,
+ struct nvkm_memory *memory, u32 offset)
+{
+ nvkm_wo32(memory, offset + 0, chan->base.chid);
+ nvkm_wo32(memory, offset + 4, 0x00000000);
+}
+
+const struct gk104_fifo_runlist_func
+gk104_fifo_runlist = {
+ .size = 8,
+ .chan = gk104_fifo_runlist_chan,
+ .commit = gk104_fifo_runlist_commit,
+};
+
+void
+gk104_fifo_pbdma_init(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
+}
+
+int
+gk104_fifo_pbdma_nr(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ /* Determine number of PBDMAs by checking valid enable bits. */
+ nvkm_wr32(device, 0x000204, 0xffffffff);
+ return hweight32(nvkm_rd32(device, 0x000204));
+}
+
+const struct gk104_fifo_pbdma_func
+gk104_fifo_pbdma = {
+ .nr = gk104_fifo_pbdma_nr,
+ .init = gk104_fifo_pbdma_init,
+};
+
+struct nvkm_engine *
+gk104_fifo_id_engine(struct nvkm_fifo *base, int engi)
+{
+ if (engi == GK104_FIFO_ENGN_SW)
+ return nvkm_device_engine(base->engine.subdev.device, NVKM_ENGINE_SW, 0);
+
+ return gk104_fifo(base)->engine[engi].engine;
+}
+
+int
+gk104_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ int engn;
+
+ if (engine->subdev.type == NVKM_ENGINE_SW)
+ return GK104_FIFO_ENGN_SW;
+
+ for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
+ if (fifo->engine[engn].engine == engine)
+ return engn;
+ }
+
+ WARN_ON(1);
+ return -1;
+}
+
+static void
+gk104_fifo_recover_work(struct work_struct *w)
+{
+ struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_engine *engine;
+ unsigned long flags;
+ u32 engm, runm, todo;
+ int engn, runl;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ runm = fifo->recover.runm;
+ engm = fifo->recover.engm;
+ fifo->recover.engm = 0;
+ fifo->recover.runm = 0;
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+
+ nvkm_mask(device, 0x002630, runm, runm);
+
+ for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) {
+ if ((engine = fifo->engine[engn].engine)) {
+ nvkm_subdev_fini(&engine->subdev, false);
+ WARN_ON(nvkm_subdev_init(&engine->subdev));
+ }
+ }
+
+ for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
+ gk104_fifo_runlist_update(fifo, runl);
+
+ nvkm_wr32(device, 0x00262c, runm);
+ nvkm_mask(device, 0x002630, runm, 0x00000000);
+}
+
+static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
+
+static void
+gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 runm = BIT(runl);
+
+ assert_spin_locked(&fifo->base.lock);
+ if (fifo->recover.runm & runm)
+ return;
+ fifo->recover.runm |= runm;
+
+ /* Block runlist to prevent channel assignment(s) from changing. */
+ nvkm_mask(device, 0x002630, runm, runm);
+
+ /* Schedule recovery. */
+ nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl);
+ schedule_work(&fifo->recover.work);
+}
+
+static struct gk104_fifo_chan *
+gk104_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid)
+{
+ struct gk104_fifo_chan *chan;
+ struct nvkm_fifo_cgrp *cgrp;
+
+ list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
+ if (chan->base.chid == chid) {
+ list_del_init(&chan->head);
+ return chan;
+ }
+ }
+
+ list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
+ if (cgrp->id == chid) {
+ chan = list_first_entry(&cgrp->chan, typeof(*chan), head);
+ list_del_init(&chan->head);
+ if (!--cgrp->chan_nr)
+ list_del_init(&cgrp->head);
+ return chan;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08));
+ const u32 runl = (stat & 0x000f0000) >> 16;
+ const bool used = (stat & 0x00000001);
+ unsigned long engn, engm = fifo->runlist[runl].engm;
+ struct gk104_fifo_chan *chan;
+
+ assert_spin_locked(&fifo->base.lock);
+ if (!used)
+ return;
+
+ /* Lookup SW state for channel, and mark it as dead. */
+ chan = gk104_fifo_recover_chid(fifo, runl, chid);
+ if (chan) {
+ chan->killed = true;
+ nvkm_fifo_kevent(&fifo->base, chid);
+ }
+
+ /* Disable channel. */
+ nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
+ nvkm_warn(subdev, "channel %d: killed\n", chid);
+
+ /* Block channel assignments from changing during recovery. */
+ gk104_fifo_recover_runl(fifo, runl);
+
+ /* Schedule recovery for any engines the channel is on. */
+ for_each_set_bit(engn, &engm, fifo->engine_nr) {
+ struct gk104_fifo_engine_status status;
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (!status.chan || status.chan->id != chid)
+ continue;
+ gk104_fifo_recover_engn(fifo, engn);
+ }
+}
+
+static void
+gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
+{
+ struct nvkm_engine *engine = fifo->engine[engn].engine;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 runl = fifo->engine[engn].runl;
+ const u32 engm = BIT(engn);
+ struct gk104_fifo_engine_status status;
+ int mmui = -1;
+
+ assert_spin_locked(&fifo->base.lock);
+ if (fifo->recover.engm & engm)
+ return;
+ fifo->recover.engm |= engm;
+
+ /* Block channel assignments from changing during recovery. */
+ gk104_fifo_recover_runl(fifo, runl);
+
+ /* Determine which channel (if any) is currently on the engine. */
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (status.chan) {
+ /* The channel is not longer viable, kill it. */
+ gk104_fifo_recover_chan(&fifo->base, status.chan->id);
+ }
+
+ /* Determine MMU fault ID for the engine, if we're not being
+ * called from the fault handler already.
+ */
+ if (!status.faulted && engine) {
+ mmui = nvkm_top_fault_id(device, engine->subdev.type, engine->subdev.inst);
+ if (mmui < 0) {
+ const struct nvkm_enum *en = fifo->func->fault.engine;
+ for (; en && en->name; en++) {
+ if (en->data2 == engine->subdev.type &&
+ en->inst == engine->subdev.inst) {
+ mmui = en->value;
+ break;
+ }
+ }
+ }
+ WARN_ON(mmui < 0);
+ }
+
+ /* Trigger a MMU fault for the engine.
+ *
+ * No good idea why this is needed, but nvgpu does something similar,
+ * and it makes recovery from CTXSW_TIMEOUT a lot more reliable.
+ */
+ if (mmui >= 0) {
+ nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui);
+
+ /* Wait for fault to trigger. */
+ nvkm_msec(device, 2000,
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (status.faulted)
+ break;
+ );
+
+ /* Release MMU fault trigger, and ACK the fault. */
+ nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000);
+ nvkm_wr32(device, 0x00259c, BIT(mmui));
+ nvkm_wr32(device, 0x002100, 0x10000000);
+ }
+
+ /* Schedule recovery. */
+ nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn);
+ schedule_work(&fifo->recover.work);
+}
+
+static void
+gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct nvkm_enum *er, *ee, *ec, *ea;
+ struct nvkm_engine *engine = NULL;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ const char *en = "";
+ char ct[8] = "HUB/";
+
+ er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
+ ee = nvkm_enum_find(fifo->func->fault.engine, info->engine);
+ if (info->hub) {
+ ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client);
+ } else {
+ ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client);
+ snprintf(ct, sizeof(ct), "GPC%d/", info->gpc);
+ }
+ ea = nvkm_enum_find(fifo->func->fault.access, info->access);
+
+ if (ee && ee->data2) {
+ switch (ee->data2) {
+ case NVKM_SUBDEV_BAR:
+ nvkm_bar_bar1_reset(device);
+ break;
+ case NVKM_SUBDEV_INSTMEM:
+ nvkm_bar_bar2_reset(device);
+ break;
+ case NVKM_ENGINE_IFB:
+ nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
+ break;
+ default:
+ engine = nvkm_device_engine(device, ee->data2, 0);
+ break;
+ }
+ }
+
+ if (ee == NULL) {
+ struct nvkm_subdev *subdev = nvkm_top_fault(device, info->engine);
+ if (subdev) {
+ if (subdev->func == &nvkm_engine)
+ engine = container_of(subdev, typeof(*engine), subdev);
+ en = engine->subdev.name;
+ }
+ } else {
+ en = ee->name;
+ }
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst);
+
+ nvkm_error(subdev,
+ "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
+ "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
+ info->access, ea ? ea->name : "", info->addr,
+ info->engine, ee ? ee->name : en,
+ info->client, ct, ec ? ec->name : "",
+ info->reason, er ? er->name : "", chan ? chan->chid : -1,
+ info->inst, chan ? chan->object.client->name : "unknown");
+
+ /* Kill the channel that caused the fault. */
+ if (chan)
+ gk104_fifo_recover_chan(&fifo->base, chan->chid);
+
+ /* Channel recovery will probably have already done this for the
+ * correct engine(s), but just in case we can't find the channel
+ * information...
+ */
+ if (engine) {
+ int engn = fifo->base.func->engine_id(&fifo->base, engine);
+ if (engn >= 0 && engn != GK104_FIFO_ENGN_SW)
+ gk104_fifo_recover_engn(fifo, engn);
+ }
+
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static const struct nvkm_enum
+gk104_fifo_bind_reason[] = {
+ { 0x01, "BIND_NOT_UNBOUND" },
+ { 0x02, "SNOOP_WITHOUT_BAR1" },
+ { 0x03, "UNBIND_WHILE_RUNNING" },
+ { 0x05, "INVALID_RUNLIST" },
+ { 0x06, "INVALID_CTX_TGT" },
+ { 0x0b, "UNBIND_WHILE_PARKED" },
+ {}
+};
+
+void
+gk104_fifo_intr_bind(struct gk104_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x00252c);
+ u32 code = intr & 0x000000ff;
+ const struct nvkm_enum *en =
+ nvkm_enum_find(gk104_fifo_bind_reason, code);
+
+ nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
+}
+
+static const struct nvkm_enum
+gk104_fifo_sched_reason[] = {
+ { 0x0a, "CTXSW_TIMEOUT" },
+ {}
+};
+
+static void
+gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ unsigned long flags, engm = 0;
+ u32 engn;
+
+ /* We need to ACK the SCHED_ERROR here, and prevent it reasserting,
+ * as MMU_FAULT cannot be triggered while it's pending.
+ */
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ nvkm_mask(device, 0x002140, 0x00000100, 0x00000000);
+ nvkm_wr32(device, 0x002100, 0x00000100);
+
+ for (engn = 0; engn < fifo->engine_nr; engn++) {
+ struct gk104_fifo_engine_status status;
+
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (!status.busy || !status.chsw)
+ continue;
+
+ engm |= BIT(engn);
+ }
+
+ for_each_set_bit(engn, &engm, fifo->engine_nr)
+ gk104_fifo_recover_engn(fifo, engn);
+
+ nvkm_mask(device, 0x002140, 0x00000100, 0x00000100);
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static void
+gk104_fifo_intr_sched(struct gk104_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x00254c);
+ u32 code = intr & 0x000000ff;
+ const struct nvkm_enum *en =
+ nvkm_enum_find(gk104_fifo_sched_reason, code);
+
+ nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
+
+ switch (code) {
+ case 0x0a:
+ gk104_fifo_intr_sched_ctxsw(fifo);
+ break;
+ default:
+ break;
+ }
+}
+
+void
+gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x00256c);
+ nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
+ nvkm_wr32(device, 0x00256c, stat);
+}
+
+void
+gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x00259c);
+ nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
+}
+
+static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
+ { 0x00000001, "MEMREQ" },
+ { 0x00000002, "MEMACK_TIMEOUT" },
+ { 0x00000004, "MEMACK_EXTRA" },
+ { 0x00000008, "MEMDAT_TIMEOUT" },
+ { 0x00000010, "MEMDAT_EXTRA" },
+ { 0x00000020, "MEMFLUSH" },
+ { 0x00000040, "MEMOP" },
+ { 0x00000080, "LBCONNECT" },
+ { 0x00000100, "LBREQ" },
+ { 0x00000200, "LBACK_TIMEOUT" },
+ { 0x00000400, "LBACK_EXTRA" },
+ { 0x00000800, "LBDAT_TIMEOUT" },
+ { 0x00001000, "LBDAT_EXTRA" },
+ { 0x00002000, "GPFIFO" },
+ { 0x00004000, "GPPTR" },
+ { 0x00008000, "GPENTRY" },
+ { 0x00010000, "GPCRC" },
+ { 0x00020000, "PBPTR" },
+ { 0x00040000, "PBENTRY" },
+ { 0x00080000, "PBCRC" },
+ { 0x00100000, "XBARCONNECT" },
+ { 0x00200000, "METHOD" },
+ { 0x00400000, "METHODCRC" },
+ { 0x00800000, "DEVICE" },
+ { 0x02000000, "SEMAPHORE" },
+ { 0x04000000, "ACQUIRE" },
+ { 0x08000000, "PRI" },
+ { 0x20000000, "NO_CTXSW_SEG" },
+ { 0x40000000, "PBSEG" },
+ { 0x80000000, "SIGNATURE" },
+ {}
+};
+
+void
+gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
+ u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
+ u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
+ u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00003ffc);
+ u32 show = stat;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ char msg[128];
+
+ if (stat & 0x00800000) {
+ if (device->sw) {
+ if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
+ show &= ~0x00800000;
+ }
+ }
+
+ nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
+
+ if (show) {
+ nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
+ chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+ nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
+ "subc %d mthd %04x data %08x\n",
+ unit, show, msg, chid, chan ? chan->inst->addr : 0,
+ chan ? chan->object.client->name : "unknown",
+ subc, mthd, data);
+ nvkm_fifo_chan_put(&fifo->base, flags, &chan);
+ }
+
+ nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
+}
+
+static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
+ { 0x00000001, "HCE_RE_ILLEGAL_OP" },
+ { 0x00000002, "HCE_RE_ALIGNB" },
+ { 0x00000004, "HCE_PRIV" },
+ { 0x00000008, "HCE_ILLEGAL_MTHD" },
+ { 0x00000010, "HCE_ILLEGAL_CLASS" },
+ {}
+};
+
+void
+gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
+ u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
+ u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
+ char msg[128];
+
+ if (stat) {
+ nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
+ nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
+ unit, stat, msg, chid,
+ nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
+ nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
+ }
+
+ nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
+}
+
+void
+gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 mask = nvkm_rd32(device, 0x002a00);
+ while (mask) {
+ int runl = __ffs(mask);
+ wake_up(&fifo->runlist[runl].wait);
+ nvkm_wr32(device, 0x002a00, 1 << runl);
+ mask &= ~(1 << runl);
+ }
+}
+
+void
+gk104_fifo_intr_engine(struct gk104_fifo *fifo)
+{
+ nvkm_fifo_uevent(&fifo->base);
+}
+
+static void
+gk104_fifo_intr(struct nvkm_fifo *base)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x002140);
+ u32 stat = nvkm_rd32(device, 0x002100) & mask;
+
+ if (stat & 0x00000001) {
+ gk104_fifo_intr_bind(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000010) {
+ nvkm_error(subdev, "PIO_ERROR\n");
+ nvkm_wr32(device, 0x002100, 0x00000010);
+ stat &= ~0x00000010;
+ }
+
+ if (stat & 0x00000100) {
+ gk104_fifo_intr_sched(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x00010000) {
+ gk104_fifo_intr_chsw(fifo);
+ nvkm_wr32(device, 0x002100, 0x00010000);
+ stat &= ~0x00010000;
+ }
+
+ if (stat & 0x00800000) {
+ nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
+ nvkm_wr32(device, 0x002100, 0x00800000);
+ stat &= ~0x00800000;
+ }
+
+ if (stat & 0x01000000) {
+ nvkm_error(subdev, "LB_ERROR\n");
+ nvkm_wr32(device, 0x002100, 0x01000000);
+ stat &= ~0x01000000;
+ }
+
+ if (stat & 0x08000000) {
+ gk104_fifo_intr_dropped_fault(fifo);
+ nvkm_wr32(device, 0x002100, 0x08000000);
+ stat &= ~0x08000000;
+ }
+
+ if (stat & 0x10000000) {
+ u32 mask = nvkm_rd32(device, 0x00259c);
+ while (mask) {
+ u32 unit = __ffs(mask);
+ fifo->func->intr.fault(&fifo->base, unit);
+ nvkm_wr32(device, 0x00259c, (1 << unit));
+ mask &= ~(1 << unit);
+ }
+ stat &= ~0x10000000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 mask = nvkm_rd32(device, 0x0025a0);
+ while (mask) {
+ u32 unit = __ffs(mask);
+ gk104_fifo_intr_pbdma_0(fifo, unit);
+ gk104_fifo_intr_pbdma_1(fifo, unit);
+ nvkm_wr32(device, 0x0025a0, (1 << unit));
+ mask &= ~(1 << unit);
+ }
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ gk104_fifo_intr_runlist(fifo);
+ stat &= ~0x40000000;
+ }
+
+ if (stat & 0x80000000) {
+ nvkm_wr32(device, 0x002100, 0x80000000);
+ gk104_fifo_intr_engine(fifo);
+ stat &= ~0x80000000;
+ }
+
+ if (stat) {
+ nvkm_error(subdev, "INTR %08x\n", stat);
+ nvkm_mask(device, 0x002140, stat, 0x00000000);
+ nvkm_wr32(device, 0x002100, stat);
+ }
+}
+
+void
+gk104_fifo_fini(struct nvkm_fifo *base)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ flush_work(&fifo->recover.work);
+ /* allow mmu fault interrupts, even when we're not using fifo */
+ nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
+}
+
+int
+gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ switch (mthd) {
+ case NV_DEVICE_HOST_RUNLISTS:
+ *data = (1ULL << fifo->runlist_nr) - 1;
+ return 0;
+ case NV_DEVICE_HOST_RUNLIST_ENGINES: {
+ if (*data < fifo->runlist_nr) {
+ unsigned long engm = fifo->runlist[*data].engm;
+ struct nvkm_engine *engine;
+ int engn;
+ *data = 0;
+ for_each_set_bit(engn, &engm, fifo->engine_nr) {
+ if ((engine = fifo->engine[engn].engine)) {
+#define CASE(n) case NVKM_ENGINE_##n: *data |= NV_DEVICE_HOST_RUNLIST_ENGINES_##n; break
+ switch (engine->subdev.type) {
+ CASE(SW );
+ CASE(GR );
+ CASE(MPEG );
+ CASE(ME );
+ CASE(CIPHER);
+ CASE(BSP );
+ CASE(VP );
+ CASE(CE );
+ CASE(SEC );
+ CASE(MSVLD );
+ CASE(MSPDEC);
+ CASE(MSPPP );
+ CASE(MSENC );
+ CASE(VIC );
+ CASE(SEC2 );
+ CASE(NVDEC );
+ CASE(NVENC );
+ default:
+ WARN_ON(1);
+ break;
+ }
+ }
+ }
+ return 0;
+ }
+ }
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+gk104_fifo_oneinit(struct nvkm_fifo *base)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
+ struct nvkm_top_device *tdev;
+ int pbid, ret, i, j;
+ u32 *map;
+
+ fifo->pbdma_nr = fifo->func->pbdma->nr(fifo);
+ nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
+
+ /* Read PBDMA->runlist(s) mapping from HW. */
+ if (!(map = kcalloc(fifo->pbdma_nr, sizeof(*map), GFP_KERNEL)))
+ return -ENOMEM;
+
+ for (i = 0; i < fifo->pbdma_nr; i++)
+ map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
+
+ /* Determine runlist configuration from topology device info. */
+ list_for_each_entry(tdev, &device->top->device, head) {
+ const int engn = tdev->engine;
+ char _en[16], *en;
+
+ if (engn < 0)
+ continue;
+
+ /* Determine which PBDMA handles requests for this engine. */
+ for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
+ if (map[j] & BIT(tdev->runlist)) {
+ pbid = j;
+ break;
+ }
+ }
+
+ fifo->engine[engn].engine = nvkm_device_engine(device, tdev->type, tdev->inst);
+ if (!fifo->engine[engn].engine) {
+ snprintf(_en, sizeof(_en), "%s, %d",
+ nvkm_subdev_type[tdev->type], tdev->inst);
+ en = _en;
+ } else {
+ en = fifo->engine[engn].engine->subdev.name;
+ }
+
+ nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
+ tdev->engine, tdev->runlist, pbid, en);
+
+ fifo->engine[engn].runl = tdev->runlist;
+ fifo->engine[engn].pbid = pbid;
+ fifo->engine_nr = max(fifo->engine_nr, engn + 1);
+ fifo->runlist[tdev->runlist].engm |= BIT(engn);
+ fifo->runlist[tdev->runlist].engm_sw |= BIT(engn);
+ if (tdev->type == NVKM_ENGINE_GR)
+ fifo->runlist[tdev->runlist].engm_sw |= BIT(GK104_FIFO_ENGN_SW);
+ fifo->runlist_nr = max(fifo->runlist_nr, tdev->runlist + 1);
+ }
+
+ kfree(map);
+
+ for (i = 0; i < fifo->runlist_nr; i++) {
+ for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) {
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ fifo->base.nr * 2/* TSG+chan */ *
+ fifo->func->runlist->size,
+ 0x1000, false,
+ &fifo->runlist[i].mem[j]);
+ if (ret)
+ return ret;
+ }
+
+ init_waitqueue_head(&fifo->runlist[i].wait);
+ INIT_LIST_HEAD(&fifo->runlist[i].cgrp);
+ INIT_LIST_HEAD(&fifo->runlist[i].chan);
+ }
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ fifo->base.nr * 0x200, 0x1000, true,
+ &fifo->user.mem);
+ if (ret)
+ return ret;
+
+ ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
+ &fifo->user.bar);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
+}
+
+void
+gk104_fifo_init(struct nvkm_fifo *base)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ int i;
+
+ /* Enable PBDMAs. */
+ fifo->func->pbdma->init(fifo);
+
+ /* PBDMA[n] */
+ for (i = 0; i < fifo->pbdma_nr; i++) {
+ nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+ }
+
+ /* PBDMA[n].HCE */
+ for (i = 0; i < fifo->pbdma_nr; i++) {
+ nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
+ nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
+ }
+
+ nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
+
+ if (fifo->func->pbdma->init_timeout)
+ fifo->func->pbdma->init_timeout(fifo);
+
+ nvkm_wr32(device, 0x002100, 0xffffffff);
+ nvkm_wr32(device, 0x002140, 0x7fffffff);
+}
+
+void *
+gk104_fifo_dtor(struct nvkm_fifo *base)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ int i;
+
+ nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
+ nvkm_memory_unref(&fifo->user.mem);
+
+ for (i = 0; i < fifo->runlist_nr; i++) {
+ nvkm_memory_unref(&fifo->runlist[i].mem[1]);
+ nvkm_memory_unref(&fifo->runlist[i].mem[0]);
+ }
+
+ return fifo;
+}
+
+static const struct nvkm_fifo_func
+gk104_fifo_ = {
+ .dtor = gk104_fifo_dtor,
+ .oneinit = gk104_fifo_oneinit,
+ .info = gk104_fifo_info,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
+ .intr = gk104_fifo_intr,
+ .fault = gk104_fifo_fault,
+ .engine_id = gk104_fifo_engine_id,
+ .id_engine = gk104_fifo_id_engine,
+ .uevent_init = gk104_fifo_uevent_init,
+ .uevent_fini = gk104_fifo_uevent_fini,
+ .recover_chan = gk104_fifo_recover_chan,
+ .class_get = gk104_fifo_class_get,
+ .class_new = gk104_fifo_class_new,
+};
+
+int
+gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo **pfifo)
+{
+ struct gk104_fifo *fifo;
+
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+ fifo->func = func;
+ INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
+ *pfifo = &fifo->base;
+
+ return nvkm_fifo_ctor(&gk104_fifo_, device, type, inst, nr, &fifo->base);
+}
+
+const struct nvkm_enum
+gk104_fifo_fault_access[] = {
+ { 0x0, "READ" },
+ { 0x1, "WRITE" },
+ {}
+};
+
+const struct nvkm_enum
+gk104_fifo_fault_engine[] = {
+ { 0x00, "GR", NULL, NVKM_ENGINE_GR },
+ { 0x01, "DISPLAY" },
+ { 0x02, "CAPTURE" },
+ { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x06, "SCHED" },
+ { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO },
+ { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO },
+ { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO },
+ { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO },
+ { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO },
+ { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO },
+ { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO },
+ { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO },
+ { 0x0f, "HOSTSR" },
+ { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
+ { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
+ { 0x13, "PERF" },
+ { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
+ { 0x15, "CE0", NULL, NVKM_ENGINE_CE, 0 },
+ { 0x16, "CE1", NULL, NVKM_ENGINE_CE, 1 },
+ { 0x17, "PMU" },
+ { 0x18, "PTP" },
+ { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
+ { 0x1b, "CE2", NULL, NVKM_ENGINE_CE, 2 },
+ {}
+};
+
+const struct nvkm_enum
+gk104_fifo_fault_reason[] = {
+ { 0x00, "PDE" },
+ { 0x01, "PDE_SIZE" },
+ { 0x02, "PTE" },
+ { 0x03, "VA_LIMIT_VIOLATION" },
+ { 0x04, "UNBOUND_INST_BLOCK" },
+ { 0x05, "PRIV_VIOLATION" },
+ { 0x06, "RO_VIOLATION" },
+ { 0x07, "WO_VIOLATION" },
+ { 0x08, "PITCH_MASK_VIOLATION" },
+ { 0x09, "WORK_CREATION" },
+ { 0x0a, "UNSUPPORTED_APERTURE" },
+ { 0x0b, "COMPRESSION_FAILURE" },
+ { 0x0c, "UNSUPPORTED_KIND" },
+ { 0x0d, "REGION_VIOLATION" },
+ { 0x0e, "BOTH_PTES_VALID" },
+ { 0x0f, "INFO_TYPE_POISONED" },
+ {}
+};
+
+const struct nvkm_enum
+gk104_fifo_fault_hubclient[] = {
+ { 0x00, "VIP" },
+ { 0x01, "CE0" },
+ { 0x02, "CE1" },
+ { 0x03, "DNISO" },
+ { 0x04, "FE" },
+ { 0x05, "FECS" },
+ { 0x06, "HOST" },
+ { 0x07, "HOST_CPU" },
+ { 0x08, "HOST_CPU_NB" },
+ { 0x09, "ISO" },
+ { 0x0a, "MMU" },
+ { 0x0b, "MSPDEC" },
+ { 0x0c, "MSPPP" },
+ { 0x0d, "MSVLD" },
+ { 0x0e, "NISO" },
+ { 0x0f, "P2P" },
+ { 0x10, "PD" },
+ { 0x11, "PERF" },
+ { 0x12, "PMU" },
+ { 0x13, "RASTERTWOD" },
+ { 0x14, "SCC" },
+ { 0x15, "SCC_NB" },
+ { 0x16, "SEC" },
+ { 0x17, "SSYNC" },
+ { 0x18, "GR_CE" },
+ { 0x19, "CE2" },
+ { 0x1a, "XV" },
+ { 0x1b, "MMU_NB" },
+ { 0x1c, "MSENC" },
+ { 0x1d, "DFALCON" },
+ { 0x1e, "SKED" },
+ { 0x1f, "AFALCON" },
+ {}
+};
+
+const struct nvkm_enum
+gk104_fifo_fault_gpcclient[] = {
+ { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
+ { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
+ { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
+ { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
+ { 0x0c, "RAST" },
+ { 0x0d, "GCC" },
+ { 0x0e, "GPCCS" },
+ { 0x0f, "PROP_0" },
+ { 0x10, "PROP_1" },
+ { 0x11, "PROP_2" },
+ { 0x12, "PROP_3" },
+ { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
+ { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
+ { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
+ { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
+ { 0x1f, "GPM" },
+ { 0x20, "LTP_UTLB_0" },
+ { 0x21, "LTP_UTLB_1" },
+ { 0x22, "LTP_UTLB_2" },
+ { 0x23, "LTP_UTLB_3" },
+ { 0x24, "GPC_RGG_UTLB" },
+ {}
+};
+
+static const struct gk104_fifo_func
+gk104_fifo = {
+ .intr.fault = gf100_fifo_intr_fault,
+ .pbdma = &gk104_fifo_pbdma,
+ .fault.access = gk104_fifo_fault_access,
+ .fault.engine = gk104_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
+ .runlist = &gk104_fifo_runlist,
+ .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
+};
+
+int
+gk104_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gk104_fifo, device, type, inst, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
new file mode 100644
index 000000000..f2d12ae73
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __GK104_FIFO_H__
+#define __GK104_FIFO_H__
+#define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
+#include "priv.h"
+struct nvkm_fifo_cgrp;
+
+#include <core/enum.h>
+#include <subdev/mmu.h>
+
+struct gk104_fifo_chan;
+struct gk104_fifo {
+ const struct gk104_fifo_func *func;
+ struct nvkm_fifo base;
+
+ struct {
+ struct work_struct work;
+ u32 engm;
+ u32 runm;
+ } recover;
+
+ int pbdma_nr;
+
+ struct {
+ struct nvkm_engine *engine;
+ int runl;
+ int pbid;
+ } engine[16];
+ int engine_nr;
+
+ struct {
+ struct nvkm_memory *mem[2];
+ int next;
+ wait_queue_head_t wait;
+ struct list_head cgrp;
+ struct list_head chan;
+ u32 engm;
+ u32 engm_sw;
+ } runlist[16];
+ int runlist_nr;
+
+ struct {
+ struct nvkm_memory *mem;
+ struct nvkm_vma *bar;
+ } user;
+};
+
+struct gk104_fifo_func {
+ struct {
+ void (*fault)(struct nvkm_fifo *, int unit);
+ } intr;
+
+ const struct gk104_fifo_pbdma_func {
+ int (*nr)(struct gk104_fifo *);
+ void (*init)(struct gk104_fifo *);
+ void (*init_timeout)(struct gk104_fifo *);
+ } *pbdma;
+
+ struct {
+ const struct nvkm_enum *access;
+ const struct nvkm_enum *engine;
+ const struct nvkm_enum *reason;
+ const struct nvkm_enum *hubclient;
+ const struct nvkm_enum *gpcclient;
+ } fault;
+
+ const struct gk104_fifo_runlist_func {
+ u8 size;
+ void (*cgrp)(struct nvkm_fifo_cgrp *,
+ struct nvkm_memory *, u32 offset);
+ void (*chan)(struct gk104_fifo_chan *,
+ struct nvkm_memory *, u32 offset);
+ void (*commit)(struct gk104_fifo *, int runl,
+ struct nvkm_memory *, int entries);
+ } *runlist;
+
+ struct gk104_fifo_user_user {
+ struct nvkm_sclass user;
+ int (*ctor)(const struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+ } user;
+
+ struct gk104_fifo_chan_user {
+ struct nvkm_sclass user;
+ int (*ctor)(struct gk104_fifo *, const struct nvkm_oclass *,
+ void *, u32, struct nvkm_object **);
+ } chan;
+ bool cgrp_force;
+};
+
+struct gk104_fifo_engine_status {
+ bool busy;
+ bool faulted;
+ bool chsw;
+ bool save;
+ bool load;
+ struct {
+ bool tsg;
+ u32 id;
+ } prev, next, *chan;
+};
+
+int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int index, int nr, struct nvkm_fifo **);
+void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
+void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
+void gk104_fifo_runlist_update(struct gk104_fifo *, int runl);
+void gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
+ struct gk104_fifo_engine_status *status);
+void gk104_fifo_intr_bind(struct gk104_fifo *fifo);
+void gk104_fifo_intr_chsw(struct gk104_fifo *fifo);
+void gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo);
+void gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit);
+void gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit);
+void gk104_fifo_intr_runlist(struct gk104_fifo *fifo);
+void gk104_fifo_intr_engine(struct gk104_fifo *fifo);
+void *gk104_fifo_dtor(struct nvkm_fifo *base);
+int gk104_fifo_oneinit(struct nvkm_fifo *base);
+int gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data);
+void gk104_fifo_init(struct nvkm_fifo *base);
+void gk104_fifo_fini(struct nvkm_fifo *base);
+int gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *argv, u32 argc, struct nvkm_object **pobject);
+int gk104_fifo_class_get(struct nvkm_fifo *base, int index,
+ struct nvkm_oclass *oclass);
+void gk104_fifo_uevent_fini(struct nvkm_fifo *fifo);
+void gk104_fifo_uevent_init(struct nvkm_fifo *fifo);
+
+extern const struct gk104_fifo_pbdma_func gk104_fifo_pbdma;
+int gk104_fifo_pbdma_nr(struct gk104_fifo *);
+void gk104_fifo_pbdma_init(struct gk104_fifo *);
+extern const struct nvkm_enum gk104_fifo_fault_access[];
+extern const struct nvkm_enum gk104_fifo_fault_engine[];
+extern const struct nvkm_enum gk104_fifo_fault_reason[];
+extern const struct nvkm_enum gk104_fifo_fault_hubclient[];
+extern const struct nvkm_enum gk104_fifo_fault_gpcclient[];
+extern const struct gk104_fifo_runlist_func gk104_fifo_runlist;
+void gk104_fifo_runlist_chan(struct gk104_fifo_chan *,
+ struct nvkm_memory *, u32);
+void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl,
+ struct nvkm_memory *, int);
+
+extern const struct gk104_fifo_runlist_func gk110_fifo_runlist;
+void gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
+ struct nvkm_memory *, u32);
+
+extern const struct gk104_fifo_pbdma_func gk208_fifo_pbdma;
+void gk208_fifo_pbdma_init_timeout(struct gk104_fifo *);
+
+void gm107_fifo_intr_fault(struct nvkm_fifo *, int);
+extern const struct nvkm_enum gm107_fifo_fault_engine[];
+extern const struct gk104_fifo_runlist_func gm107_fifo_runlist;
+
+extern const struct gk104_fifo_pbdma_func gm200_fifo_pbdma;
+int gm200_fifo_pbdma_nr(struct gk104_fifo *);
+
+void gp100_fifo_intr_fault(struct nvkm_fifo *, int);
+extern const struct nvkm_enum gp100_fifo_fault_engine[];
+
+extern const struct nvkm_enum gv100_fifo_fault_access[];
+extern const struct nvkm_enum gv100_fifo_fault_reason[];
+extern const struct nvkm_enum gv100_fifo_fault_hubclient[];
+extern const struct nvkm_enum gv100_fifo_fault_gpcclient[];
+void gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
+ struct nvkm_memory *, u32);
+void gv100_fifo_runlist_chan(struct gk104_fifo_chan *,
+ struct nvkm_memory *, u32);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
new file mode 100644
index 000000000..915278c7e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+#include "cgrp.h"
+#include "changk104.h"
+
+#include <core/memory.h>
+
+#include <nvif/class.h>
+
+void
+gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *cgrp,
+ struct nvkm_memory *memory, u32 offset)
+{
+ nvkm_wo32(memory, offset + 0, (cgrp->chan_nr << 26) | (128 << 18) |
+ (3 << 14) | 0x00002000 | cgrp->id);
+ nvkm_wo32(memory, offset + 4, 0x00000000);
+}
+
+const struct gk104_fifo_runlist_func
+gk110_fifo_runlist = {
+ .size = 8,
+ .cgrp = gk110_fifo_runlist_cgrp,
+ .chan = gk104_fifo_runlist_chan,
+ .commit = gk104_fifo_runlist_commit,
+};
+
+static const struct gk104_fifo_func
+gk110_fifo = {
+ .intr.fault = gf100_fifo_intr_fault,
+ .pbdma = &gk104_fifo_pbdma,
+ .fault.access = gk104_fifo_fault_access,
+ .fault.engine = gk104_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
+ .runlist = &gk110_fifo_runlist,
+ .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_B}, gk104_fifo_gpfifo_new },
+};
+
+int
+gk110_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gk110_fifo, device, type, inst, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
new file mode 100644
index 000000000..cb703693d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+void
+gk208_fifo_pbdma_init_timeout(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ int i;
+
+ for (i = 0; i < fifo->pbdma_nr; i++)
+ nvkm_wr32(device, 0x04012c + (i * 0x2000), 0x0000ffff);
+}
+
+const struct gk104_fifo_pbdma_func
+gk208_fifo_pbdma = {
+ .nr = gk104_fifo_pbdma_nr,
+ .init = gk104_fifo_pbdma_init,
+ .init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
+static const struct gk104_fifo_func
+gk208_fifo = {
+ .intr.fault = gf100_fifo_intr_fault,
+ .pbdma = &gk208_fifo_pbdma,
+ .fault.access = gk104_fifo_fault_access,
+ .fault.engine = gk104_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
+ .runlist = &gk110_fifo_runlist,
+ .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
+};
+
+int
+gk208_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gk208_fifo, device, type, inst, 1024, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
new file mode 100644
index 000000000..6e35cf44c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+static const struct gk104_fifo_func
+gk20a_fifo = {
+ .intr.fault = gf100_fifo_intr_fault,
+ .pbdma = &gk208_fifo_pbdma,
+ .fault.access = gk104_fifo_fault_access,
+ .fault.engine = gk104_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
+ .runlist = &gk110_fifo_runlist,
+ .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
+};
+
+int
+gk20a_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gk20a_fifo, device, type, inst, 128, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
new file mode 100644
index 000000000..7af6e687d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+#include <core/gpuobj.h>
+#include <subdev/fault.h>
+
+#include <nvif/class.h>
+
+static void
+gm107_fifo_runlist_chan(struct gk104_fifo_chan *chan,
+ struct nvkm_memory *memory, u32 offset)
+{
+ nvkm_wo32(memory, offset + 0, chan->base.chid);
+ nvkm_wo32(memory, offset + 4, chan->base.inst->addr >> 12);
+}
+
+const struct gk104_fifo_runlist_func
+gm107_fifo_runlist = {
+ .size = 8,
+ .cgrp = gk110_fifo_runlist_cgrp,
+ .chan = gm107_fifo_runlist_chan,
+ .commit = gk104_fifo_runlist_commit,
+};
+
+const struct nvkm_enum
+gm107_fifo_fault_engine[] = {
+ { 0x01, "DISPLAY" },
+ { 0x02, "CAPTURE" },
+ { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x06, "SCHED" },
+ { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO },
+ { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO },
+ { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO },
+ { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO },
+ { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO },
+ { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO },
+ { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO },
+ { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO },
+ { 0x0f, "HOSTSR" },
+ { 0x13, "PERF" },
+ { 0x17, "PMU" },
+ { 0x18, "PTP" },
+ {}
+};
+
+void
+gm107_fifo_intr_fault(struct nvkm_fifo *fifo, int unit)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+ u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+ u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+ u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
+ struct nvkm_fault_data info;
+
+ info.inst = (u64)inst << 12;
+ info.addr = ((u64)vahi << 32) | valo;
+ info.time = 0;
+ info.engine = unit;
+ info.valid = 1;
+ info.gpc = (type & 0x1f000000) >> 24;
+ info.client = (type & 0x00003f00) >> 8;
+ info.access = (type & 0x00000080) >> 7;
+ info.hub = (type & 0x00000040) >> 6;
+ info.reason = (type & 0x0000000f);
+
+ nvkm_fifo_fault(fifo, &info);
+}
+
+static const struct gk104_fifo_func
+gm107_fifo = {
+ .intr.fault = gm107_fifo_intr_fault,
+ .pbdma = &gk208_fifo_pbdma,
+ .fault.access = gk104_fifo_fault_access,
+ .fault.engine = gm107_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
+ .runlist = &gm107_fifo_runlist,
+ .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_B}, gk104_fifo_gpfifo_new },
+};
+
+int
+gm107_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gm107_fifo, device, type, inst, 2048, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
new file mode 100644
index 000000000..573658cb6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+int
+gm200_fifo_pbdma_nr(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ return nvkm_rd32(device, 0x002004) & 0x000000ff;
+}
+
+const struct gk104_fifo_pbdma_func
+gm200_fifo_pbdma = {
+ .nr = gm200_fifo_pbdma_nr,
+ .init = gk104_fifo_pbdma_init,
+ .init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
+static const struct gk104_fifo_func
+gm200_fifo = {
+ .intr.fault = gm107_fifo_intr_fault,
+ .pbdma = &gm200_fifo_pbdma,
+ .fault.access = gk104_fifo_fault_access,
+ .fault.engine = gm107_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
+ .runlist = &gm107_fifo_runlist,
+ .chan = {{0,0,MAXWELL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
+};
+
+int
+gm200_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gm200_fifo, device, type, inst, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
new file mode 100644
index 000000000..556c97e54
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+static const struct gk104_fifo_func
+gm20b_fifo = {
+ .intr.fault = gm107_fifo_intr_fault,
+ .pbdma = &gm200_fifo_pbdma,
+ .fault.access = gk104_fifo_fault_access,
+ .fault.engine = gm107_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
+ .runlist = &gm107_fifo_runlist,
+ .chan = {{0,0,MAXWELL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
+};
+
+int
+gm20b_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gm20b_fifo, device, type, inst, 512, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
new file mode 100644
index 000000000..6b46b6b65
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+#include <subdev/fault.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_enum
+gp100_fifo_fault_engine[] = {
+ { 0x01, "DISPLAY" },
+ { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x06, "HOST0", NULL, NVKM_ENGINE_FIFO },
+ { 0x07, "HOST1", NULL, NVKM_ENGINE_FIFO },
+ { 0x08, "HOST2", NULL, NVKM_ENGINE_FIFO },
+ { 0x09, "HOST3", NULL, NVKM_ENGINE_FIFO },
+ { 0x0a, "HOST4", NULL, NVKM_ENGINE_FIFO },
+ { 0x0b, "HOST5", NULL, NVKM_ENGINE_FIFO },
+ { 0x0c, "HOST6", NULL, NVKM_ENGINE_FIFO },
+ { 0x0d, "HOST7", NULL, NVKM_ENGINE_FIFO },
+ { 0x0e, "HOST8", NULL, NVKM_ENGINE_FIFO },
+ { 0x0f, "HOST9", NULL, NVKM_ENGINE_FIFO },
+ { 0x10, "HOST10", NULL, NVKM_ENGINE_FIFO },
+ { 0x13, "PERF" },
+ { 0x17, "PMU" },
+ { 0x18, "PTP" },
+ { 0x1f, "PHYSICAL" },
+ {}
+};
+
+void
+gp100_fifo_intr_fault(struct nvkm_fifo *fifo, int unit)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+ u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+ u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+ u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
+ struct nvkm_fault_data info;
+
+ info.inst = (u64)inst << 12;
+ info.addr = ((u64)vahi << 32) | valo;
+ info.time = 0;
+ info.engine = unit;
+ info.valid = 1;
+ info.gpc = (type & 0x1f000000) >> 24;
+ info.hub = (type & 0x00100000) >> 20;
+ info.access = (type & 0x00070000) >> 16;
+ info.client = (type & 0x00007f00) >> 8;
+ info.reason = (type & 0x0000001f);
+
+ nvkm_fifo_fault(fifo, &info);
+}
+
+static const struct gk104_fifo_func
+gp100_fifo = {
+ .intr.fault = gp100_fifo_intr_fault,
+ .pbdma = &gm200_fifo_pbdma,
+ .fault.access = gk104_fifo_fault_access,
+ .fault.engine = gp100_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
+ .runlist = &gm107_fifo_runlist,
+ .chan = {{0,0,PASCAL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
+ .cgrp_force = true,
+};
+
+int
+gp100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gp100_fifo, device, type, inst, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
new file mode 100644
index 000000000..7a5929cb4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+static const struct gk104_fifo_func
+gp10b_fifo = {
+ .intr.fault = gp100_fifo_intr_fault,
+ .pbdma = &gm200_fifo_pbdma,
+ .fault.access = gk104_fifo_fault_access,
+ .fault.engine = gp100_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
+ .runlist = &gm107_fifo_runlist,
+ .chan = {{0,0,PASCAL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
+ .cgrp_force = true,
+};
+
+int
+gp10b_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gp10b_fifo, device, type, inst, 512, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
new file mode 100644
index 000000000..2121f517b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/cl826f.h>
+#include <nvif/unpack.h>
+
+static int
+g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct g82_channel_gpfifo_v0 v0;
+ } *args = data;
+ struct nv50_fifo *fifo = nv50_fifo(base);
+ struct nv50_fifo_chan *chan;
+ u64 ioffset, ilength;
+ int ret = -ENOSYS;
+
+ nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
+ "pushbuf %llx ioffset %016llx "
+ "ilength %08x\n",
+ args->v0.version, args->v0.vmm, args->v0.pushbuf,
+ args->v0.ioffset, args->v0.ilength);
+ if (!args->v0.pushbuf)
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
+ oclass, chan);
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
+
+ nvkm_kmap(chan->ramfc);
+ nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
+ nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+ nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+ nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
+ nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+ nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+ nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+ nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+ nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->node->offset >> 4));
+ nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
+ nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
+ nvkm_done(chan->ramfc);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+g84_fifo_gpfifo_oclass = {
+ .base.oclass = G82_CHANNEL_GPFIFO,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = g84_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
new file mode 100644
index 000000000..4e78bbe3b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changf100.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/cl906f.h>
+#include <nvif/unpack.h>
+
+int
+gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
+ struct nvkm_event **pevent)
+{
+ switch (type) {
+ case NV906F_V0_NTFY_NON_STALL_INTERRUPT:
+ *pevent = &chan->fifo->uevent;
+ return 0;
+ case NV906F_V0_NTFY_KILLED:
+ *pevent = &chan->fifo->kevent;
+ return 0;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static u32
+gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
+{
+ switch (engine->subdev.type) {
+ case NVKM_ENGINE_SW : return 0;
+ case NVKM_ENGINE_GR : return 0x0210;
+ case NVKM_ENGINE_CE : return 0x0230 + (engine->subdev.inst * 0x10);
+ case NVKM_ENGINE_MSPDEC: return 0x0250;
+ case NVKM_ENGINE_MSPPP : return 0x0260;
+ case NVKM_ENGINE_MSVLD : return 0x0270;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+static struct gf100_fifo_engn *
+gf100_fifo_gpfifo_engine(struct gf100_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
+static int
+gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine, bool suspend)
+{
+ const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
+ struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ struct nvkm_subdev *subdev = &chan->fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gpuobj *inst = chan->base.inst;
+ int ret = 0;
+
+ mutex_lock(&chan->fifo->base.mutex);
+ nvkm_wr32(device, 0x002634, chan->base.chid);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x002634) == chan->base.chid)
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "channel %d [%s] kick timeout\n",
+ chan->base.chid, chan->base.object.client->name);
+ ret = -ETIMEDOUT;
+ }
+ mutex_unlock(&chan->fifo->base.mutex);
+
+ if (ret && suspend)
+ return ret;
+
+ if (offset) {
+ nvkm_kmap(inst);
+ nvkm_wo32(inst, offset + 0x00, 0x00000000);
+ nvkm_wo32(inst, offset + 0x04, 0x00000000);
+ nvkm_done(inst);
+ }
+
+ return ret;
+}
+
+static int
+gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
+ struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
+ struct nvkm_gpuobj *inst = chan->base.inst;
+
+ if (offset) {
+ nvkm_kmap(inst);
+ nvkm_wo32(inst, offset + 0x00, lower_32_bits(engn->vma->addr) | 4);
+ nvkm_wo32(inst, offset + 0x04, upper_32_bits(engn->vma->addr));
+ nvkm_done(inst);
+ }
+
+ return 0;
+}
+
+static void
+gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
+ nvkm_vmm_put(chan->base.vmm, &engn->vma);
+ nvkm_gpuobj_del(&engn->inst);
+}
+
+static int
+gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine,
+ struct nvkm_object *object)
+{
+ struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
+ int ret;
+
+ if (!gf100_fifo_gpfifo_engine_addr(engine))
+ return 0;
+
+ ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
+ if (ret)
+ return ret;
+
+ ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0);
+}
+
+static void
+gf100_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
+{
+ struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ struct gf100_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 coff = chan->base.chid * 8;
+
+ if (!list_empty(&chan->head) && !chan->killed) {
+ gf100_fifo_runlist_remove(fifo, chan);
+ nvkm_mask(device, 0x003004 + coff, 0x00000001, 0x00000000);
+ gf100_fifo_runlist_commit(fifo);
+ }
+
+ gf100_fifo_intr_engine(fifo);
+
+ nvkm_wr32(device, 0x003000 + coff, 0x00000000);
+}
+
+static void
+gf100_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
+{
+ struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ struct gf100_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 addr = chan->base.inst->addr >> 12;
+ u32 coff = chan->base.chid * 8;
+
+ nvkm_wr32(device, 0x003000 + coff, 0xc0000000 | addr);
+
+ if (list_empty(&chan->head) && !chan->killed) {
+ gf100_fifo_runlist_insert(fifo, chan);
+ nvkm_wr32(device, 0x003004 + coff, 0x001f0001);
+ gf100_fifo_runlist_commit(fifo);
+ }
+}
+
+static void *
+gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
+{
+ return gf100_fifo_chan(base);
+}
+
+static const struct nvkm_fifo_chan_func
+gf100_fifo_gpfifo_func = {
+ .dtor = gf100_fifo_gpfifo_dtor,
+ .init = gf100_fifo_gpfifo_init,
+ .fini = gf100_fifo_gpfifo_fini,
+ .ntfy = gf100_fifo_chan_ntfy,
+ .engine_ctor = gf100_fifo_gpfifo_engine_ctor,
+ .engine_dtor = gf100_fifo_gpfifo_engine_dtor,
+ .engine_init = gf100_fifo_gpfifo_engine_init,
+ .engine_fini = gf100_fifo_gpfifo_engine_fini,
+};
+
+static int
+gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ union {
+ struct fermi_channel_gpfifo_v0 v0;
+ } *args = data;
+ struct gf100_fifo *fifo = gf100_fifo(base);
+ struct nvkm_object *parent = oclass->parent;
+ struct gf100_fifo_chan *chan;
+ u64 usermem, ioffset, ilength;
+ int ret = -ENOSYS, i;
+
+ nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
+ "ioffset %016llx ilength %08x\n",
+ args->v0.version, args->v0.vmm, args->v0.ioffset,
+ args->v0.ilength);
+ if (!args->v0.vmm)
+ return -EINVAL;
+ } else
+ return ret;
+
+ /* allocate channel */
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+ chan->fifo = fifo;
+ INIT_LIST_HEAD(&chan->head);
+
+ ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base,
+ 0x1000, 0x1000, true, args->v0.vmm, 0,
+ BIT(GF100_FIFO_ENGN_GR) |
+ BIT(GF100_FIFO_ENGN_MSPDEC) |
+ BIT(GF100_FIFO_ENGN_MSPPP) |
+ BIT(GF100_FIFO_ENGN_MSVLD) |
+ BIT(GF100_FIFO_ENGN_CE0) |
+ BIT(GF100_FIFO_ENGN_CE1) |
+ BIT(GF100_FIFO_ENGN_SW),
+ 1, fifo->user.bar->addr, 0x1000,
+ oclass, &chan->base);
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+
+ /* clear channel control registers */
+
+ usermem = chan->base.chid * 0x1000;
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
+
+ nvkm_kmap(fifo->user.mem);
+ for (i = 0; i < 0x1000; i += 4)
+ nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
+ nvkm_done(fifo->user.mem);
+ usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+
+ /* RAMFC */
+ nvkm_kmap(chan->base.inst);
+ nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
+ nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
+ nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
+ nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
+ nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
+ nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
+ (ilength << 16));
+ nvkm_wo32(chan->base.inst, 0x54, 0x00000002);
+ nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
+ nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
+ nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
+ nvkm_wo32(chan->base.inst, 0xa4, 0x1f1f1f1f);
+ nvkm_wo32(chan->base.inst, 0xa8, 0x1f1f1f1f);
+ nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+ nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
+ nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
+ nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
+ nvkm_done(chan->base.inst);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+gf100_fifo_gpfifo_oclass = {
+ .base.oclass = FERMI_CHANNEL_GPFIFO,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = gf100_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
new file mode 100644
index 000000000..80456ec70
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -0,0 +1,361 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+#include "cgrp.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/cla06f.h>
+#include <nvif/unpack.h>
+
+int
+gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *chan)
+{
+ struct gk104_fifo *fifo = chan->fifo;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_client *client = chan->base.object.client;
+ struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
+ int ret = 0;
+
+ if (cgrp)
+ nvkm_wr32(device, 0x002634, cgrp->id | 0x01000000);
+ else
+ nvkm_wr32(device, 0x002634, chan->base.chid);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "%s %d [%s] kick timeout\n",
+ cgrp ? "tsg" : "channel",
+ cgrp ? cgrp->id : chan->base.chid, client->name);
+ nvkm_fifo_recover_chan(&fifo->base, chan->base.chid);
+ ret = -ETIMEDOUT;
+ }
+ return ret;
+}
+
+int
+gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
+{
+ int ret;
+ mutex_lock(&chan->base.fifo->mutex);
+ ret = gk104_fifo_gpfifo_kick_locked(chan);
+ mutex_unlock(&chan->base.fifo->mutex);
+ return ret;
+}
+
+static u32
+gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
+{
+ switch (engine->subdev.type) {
+ case NVKM_ENGINE_SW :
+ case NVKM_ENGINE_CE : return 0;
+ case NVKM_ENGINE_GR : return 0x0210;
+ case NVKM_ENGINE_SEC : return 0x0220;
+ case NVKM_ENGINE_MSPDEC: return 0x0250;
+ case NVKM_ENGINE_MSPPP : return 0x0260;
+ case NVKM_ENGINE_MSVLD : return 0x0270;
+ case NVKM_ENGINE_VIC : return 0x0280;
+ case NVKM_ENGINE_MSENC : return 0x0290;
+ case NVKM_ENGINE_NVDEC : return 0x02100270;
+ case NVKM_ENGINE_NVENC :
+ if (engine->subdev.inst)
+ return 0x0210;
+ return 0x02100290;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+struct gk104_fifo_engn *
+gk104_fifo_gpfifo_engine(struct gk104_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
+static int
+gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine, bool suspend)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct nvkm_gpuobj *inst = chan->base.inst;
+ u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
+ int ret;
+
+ ret = gk104_fifo_gpfifo_kick(chan);
+ if (ret && suspend)
+ return ret;
+
+ if (offset) {
+ nvkm_kmap(inst);
+ nvkm_wo32(inst, (offset & 0xffff) + 0x00, 0x00000000);
+ nvkm_wo32(inst, (offset & 0xffff) + 0x04, 0x00000000);
+ if ((offset >>= 16)) {
+ nvkm_wo32(inst, offset + 0x00, 0x00000000);
+ nvkm_wo32(inst, offset + 0x04, 0x00000000);
+ }
+ nvkm_done(inst);
+ }
+
+ return ret;
+}
+
+static int
+gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
+ struct nvkm_gpuobj *inst = chan->base.inst;
+ u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
+
+ if (offset) {
+ u32 datalo = lower_32_bits(engn->vma->addr) | 0x00000004;
+ u32 datahi = upper_32_bits(engn->vma->addr);
+ nvkm_kmap(inst);
+ nvkm_wo32(inst, (offset & 0xffff) + 0x00, datalo);
+ nvkm_wo32(inst, (offset & 0xffff) + 0x04, datahi);
+ if ((offset >>= 16)) {
+ nvkm_wo32(inst, offset + 0x00, datalo);
+ nvkm_wo32(inst, offset + 0x04, datahi);
+ }
+ nvkm_done(inst);
+ }
+
+ return 0;
+}
+
+void
+gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
+ nvkm_vmm_put(chan->base.vmm, &engn->vma);
+ nvkm_gpuobj_del(&engn->inst);
+}
+
+int
+gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine,
+ struct nvkm_object *object)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
+ int ret;
+
+ if (!gk104_fifo_gpfifo_engine_addr(engine)) {
+ if (engine->subdev.type != NVKM_ENGINE_CE ||
+ engine->subdev.device->card_type < GV100)
+ return 0;
+ }
+
+ ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
+ if (ret)
+ return ret;
+
+ if (!gk104_fifo_gpfifo_engine_addr(engine))
+ return 0;
+
+ ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0);
+}
+
+void
+gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct gk104_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 coff = chan->base.chid * 8;
+
+ if (!list_empty(&chan->head)) {
+ gk104_fifo_runlist_remove(fifo, chan);
+ nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
+ gk104_fifo_gpfifo_kick(chan);
+ gk104_fifo_runlist_update(fifo, chan->runl);
+ }
+
+ nvkm_wr32(device, 0x800000 + coff, 0x00000000);
+}
+
+void
+gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct gk104_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 addr = chan->base.inst->addr >> 12;
+ u32 coff = chan->base.chid * 8;
+
+ nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->runl << 16);
+ nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr);
+
+ if (list_empty(&chan->head) && !chan->killed) {
+ gk104_fifo_runlist_insert(fifo, chan);
+ nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
+ gk104_fifo_runlist_update(fifo, chan->runl);
+ nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
+ }
+}
+
+void *
+gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ kfree(chan->cgrp);
+ return chan;
+}
+
+const struct nvkm_fifo_chan_func
+gk104_fifo_gpfifo_func = {
+ .dtor = gk104_fifo_gpfifo_dtor,
+ .init = gk104_fifo_gpfifo_init,
+ .fini = gk104_fifo_gpfifo_fini,
+ .ntfy = gf100_fifo_chan_ntfy,
+ .engine_ctor = gk104_fifo_gpfifo_engine_ctor,
+ .engine_dtor = gk104_fifo_gpfifo_engine_dtor,
+ .engine_init = gk104_fifo_gpfifo_engine_init,
+ .engine_fini = gk104_fifo_gpfifo_engine_fini,
+};
+
+static int
+gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
+ u64 vmm, u64 ioffset, u64 ilength, u64 *inst, bool priv,
+ const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct gk104_fifo_chan *chan;
+ int runlist = ffs(*runlists) -1, ret, i;
+ u64 usermem;
+
+ if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
+ return -EINVAL;
+ *runlists = BIT_ULL(runlist);
+
+ /* Allocate the channel. */
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+ chan->fifo = fifo;
+ chan->runl = runlist;
+ INIT_LIST_HEAD(&chan->head);
+
+ ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
+ 0x1000, 0x1000, true, vmm, 0, fifo->runlist[runlist].engm_sw,
+ 1, fifo->user.bar->addr, 0x200,
+ oclass, &chan->base);
+ if (ret)
+ return ret;
+
+ *chid = chan->base.chid;
+ *inst = chan->base.inst->addr;
+
+ /* Hack to support GPUs where even individual channels should be
+ * part of a channel group.
+ */
+ if (fifo->func->cgrp_force) {
+ if (!(chan->cgrp = kmalloc(sizeof(*chan->cgrp), GFP_KERNEL)))
+ return -ENOMEM;
+ chan->cgrp->id = chan->base.chid;
+ INIT_LIST_HEAD(&chan->cgrp->head);
+ INIT_LIST_HEAD(&chan->cgrp->chan);
+ chan->cgrp->chan_nr = 0;
+ }
+
+ /* Clear channel control registers. */
+ usermem = chan->base.chid * 0x200;
+ ilength = order_base_2(ilength / 8);
+
+ nvkm_kmap(fifo->user.mem);
+ for (i = 0; i < 0x200; i += 4)
+ nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
+ nvkm_done(fifo->user.mem);
+ usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+
+ /* RAMFC */
+ nvkm_kmap(chan->base.inst);
+ nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
+ nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
+ nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
+ nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
+ nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
+ nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
+ (ilength << 16));
+ nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
+ nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
+ nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
+ nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+ nvkm_wo32(chan->base.inst, 0xe4, priv ? 0x00000020 : 0x00000000);
+ nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid);
+ nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
+ nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
+ nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
+ nvkm_done(chan->base.inst);
+ return 0;
+}
+
+int
+gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct kepler_channel_gpfifo_a_v0 v0;
+ } *args = data;
+ int ret = -ENOSYS;
+
+ nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
+ "ioffset %016llx ilength %08x "
+ "runlist %016llx priv %d\n",
+ args->v0.version, args->v0.vmm, args->v0.ioffset,
+ args->v0.ilength, args->v0.runlist, args->v0.priv);
+ return gk104_fifo_gpfifo_new_(fifo,
+ &args->v0.runlist,
+ &args->v0.chid,
+ args->v0.vmm,
+ args->v0.ioffset,
+ args->v0.ilength,
+ &args->v0.inst,
+ args->v0.priv,
+ oclass, pobject);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
new file mode 100644
index 000000000..428f9b411
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "changk104.h"
+#include "cgrp.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+
+#include <nvif/clc36f.h>
+#include <nvif/unpack.h>
+
+static u32
+gv100_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *chan)
+{
+ return chan->chid;
+}
+
+static int
+gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid)
+{
+ struct nvkm_subdev *subdev = &chan->base.fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 mask = ce ? 0x00020000 : 0x00010000;
+ const u32 data = valid ? mask : 0x00000000;
+ int ret;
+
+ /* Block runlist to prevent the channel from being rescheduled. */
+ mutex_lock(&chan->fifo->base.mutex);
+ nvkm_mask(device, 0x002630, BIT(chan->runl), BIT(chan->runl));
+
+ /* Preempt the channel. */
+ ret = gk104_fifo_gpfifo_kick_locked(chan);
+ if (ret == 0) {
+ /* Update engine context validity. */
+ nvkm_kmap(chan->base.inst);
+ nvkm_mo32(chan->base.inst, 0x0ac, mask, data);
+ nvkm_done(chan->base.inst);
+ }
+
+ /* Resume runlist. */
+ nvkm_mask(device, 0x002630, BIT(chan->runl), 0);
+ mutex_unlock(&chan->fifo->base.mutex);
+ return ret;
+}
+
+int
+gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine, bool suspend)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct nvkm_gpuobj *inst = chan->base.inst;
+ int ret;
+
+ if (engine->subdev.type == NVKM_ENGINE_CE) {
+ ret = gv100_fifo_gpfifo_engine_valid(chan, true, false);
+ if (ret && suspend)
+ return ret;
+
+ nvkm_kmap(inst);
+ nvkm_wo32(chan->base.inst, 0x220, 0x00000000);
+ nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
+ nvkm_done(inst);
+ return ret;
+ }
+
+ ret = gv100_fifo_gpfifo_engine_valid(chan, false, false);
+ if (ret && suspend)
+ return ret;
+
+ nvkm_kmap(inst);
+ nvkm_wo32(inst, 0x0210, 0x00000000);
+ nvkm_wo32(inst, 0x0214, 0x00000000);
+ nvkm_done(inst);
+ return ret;
+}
+
+int
+gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
+ struct nvkm_gpuobj *inst = chan->base.inst;
+
+ if (engine->subdev.type == NVKM_ENGINE_CE) {
+ const u64 bar2 = nvkm_memory_bar2(engn->inst->memory);
+
+ nvkm_kmap(inst);
+ nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(bar2));
+ nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(bar2));
+ nvkm_done(inst);
+
+ return gv100_fifo_gpfifo_engine_valid(chan, true, true);
+ }
+
+ nvkm_kmap(inst);
+ nvkm_wo32(inst, 0x210, lower_32_bits(engn->vma->addr) | 0x00000004);
+ nvkm_wo32(inst, 0x214, upper_32_bits(engn->vma->addr));
+ nvkm_done(inst);
+
+ return gv100_fifo_gpfifo_engine_valid(chan, false, true);
+}
+
+static const struct nvkm_fifo_chan_func
+gv100_fifo_gpfifo = {
+ .dtor = gk104_fifo_gpfifo_dtor,
+ .init = gk104_fifo_gpfifo_init,
+ .fini = gk104_fifo_gpfifo_fini,
+ .ntfy = gf100_fifo_chan_ntfy,
+ .engine_ctor = gk104_fifo_gpfifo_engine_ctor,
+ .engine_dtor = gk104_fifo_gpfifo_engine_dtor,
+ .engine_init = gv100_fifo_gpfifo_engine_init,
+ .engine_fini = gv100_fifo_gpfifo_engine_fini,
+ .submit_token = gv100_fifo_gpfifo_submit_token,
+};
+
+int
+gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
+ struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
+ u64 vmm, u64 ioffset, u64 ilength, u64 *inst, bool priv,
+ u32 *token, const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct gk104_fifo_chan *chan;
+ int runlist = ffs(*runlists) -1, ret, i;
+ u64 usermem;
+
+ if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
+ return -EINVAL;
+ *runlists = BIT_ULL(runlist);
+
+ /* Allocate the channel. */
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+ chan->fifo = fifo;
+ chan->runl = runlist;
+ INIT_LIST_HEAD(&chan->head);
+
+ ret = nvkm_fifo_chan_ctor(func, &fifo->base, 0x1000, 0x1000, true, vmm,
+ 0, fifo->runlist[runlist].engm, 1, fifo->user.bar->addr, 0x200,
+ oclass, &chan->base);
+ if (ret)
+ return ret;
+
+ *chid = chan->base.chid;
+ *inst = chan->base.inst->addr;
+ *token = chan->base.func->submit_token(&chan->base);
+
+ /* Hack to support GPUs where even individual channels should be
+ * part of a channel group.
+ */
+ if (fifo->func->cgrp_force) {
+ if (!(chan->cgrp = kmalloc(sizeof(*chan->cgrp), GFP_KERNEL)))
+ return -ENOMEM;
+ chan->cgrp->id = chan->base.chid;
+ INIT_LIST_HEAD(&chan->cgrp->head);
+ INIT_LIST_HEAD(&chan->cgrp->chan);
+ chan->cgrp->chan_nr = 0;
+ }
+
+ /* Clear channel control registers. */
+ usermem = chan->base.chid * 0x200;
+ ilength = order_base_2(ilength / 8);
+
+ nvkm_kmap(fifo->user.mem);
+ for (i = 0; i < 0x200; i += 4)
+ nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
+ nvkm_done(fifo->user.mem);
+ usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+
+ /* RAMFC */
+ nvkm_kmap(chan->base.inst);
+ nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
+ nvkm_wo32(chan->base.inst, 0x00c, upper_32_bits(usermem));
+ nvkm_wo32(chan->base.inst, 0x010, 0x0000face);
+ nvkm_wo32(chan->base.inst, 0x030, 0x7ffff902);
+ nvkm_wo32(chan->base.inst, 0x048, lower_32_bits(ioffset));
+ nvkm_wo32(chan->base.inst, 0x04c, upper_32_bits(ioffset) |
+ (ilength << 16));
+ nvkm_wo32(chan->base.inst, 0x084, 0x20400000);
+ nvkm_wo32(chan->base.inst, 0x094, 0x30000001);
+ nvkm_wo32(chan->base.inst, 0x0e4, priv ? 0x00000020 : 0x00000000);
+ nvkm_wo32(chan->base.inst, 0x0e8, chan->base.chid);
+ nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
+ nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
+ nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
+ nvkm_done(chan->base.inst);
+ return 0;
+}
+
+int
+gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct volta_channel_gpfifo_a_v0 v0;
+ } *args = data;
+ int ret = -ENOSYS;
+
+ nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
+ "ioffset %016llx ilength %08x "
+ "runlist %016llx priv %d\n",
+ args->v0.version, args->v0.vmm, args->v0.ioffset,
+ args->v0.ilength, args->v0.runlist, args->v0.priv);
+ return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo,
+ &args->v0.runlist,
+ &args->v0.chid,
+ args->v0.vmm,
+ args->v0.ioffset,
+ args->v0.ilength,
+ &args->v0.inst,
+ args->v0.priv,
+ &args->v0.token,
+ oclass, pobject);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
new file mode 100644
index 000000000..d8f28ec1e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/cl506f.h>
+#include <nvif/unpack.h>
+
+static int
+nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct nv50_channel_gpfifo_v0 v0;
+ } *args = data;
+ struct nv50_fifo *fifo = nv50_fifo(base);
+ struct nv50_fifo_chan *chan;
+ u64 ioffset, ilength;
+ int ret = -ENOSYS;
+
+ nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
+ "pushbuf %llx ioffset %016llx "
+ "ilength %08x\n",
+ args->v0.version, args->v0.vmm, args->v0.pushbuf,
+ args->v0.ioffset, args->v0.ilength);
+ if (!args->v0.pushbuf)
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
+ oclass, chan);
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
+
+ nvkm_kmap(chan->ramfc);
+ nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
+ nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+ nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+ nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
+ nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+ nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+ nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+ nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+ nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->node->offset >> 4));
+ nvkm_done(chan->ramfc);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv50_fifo_gpfifo_oclass = {
+ .base.oclass = NV50_CHANNEL_GPFIFO,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
new file mode 100644
index 000000000..99aafa103
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "changk104.h"
+#include "cgrp.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+
+#include <nvif/clc36f.h>
+#include <nvif/unpack.h>
+
+static u32
+tu102_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *base)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ return (chan->runl << 16) | chan->base.chid;
+}
+
+static const struct nvkm_fifo_chan_func
+tu102_fifo_gpfifo = {
+ .dtor = gk104_fifo_gpfifo_dtor,
+ .init = gk104_fifo_gpfifo_init,
+ .fini = gk104_fifo_gpfifo_fini,
+ .ntfy = gf100_fifo_chan_ntfy,
+ .engine_ctor = gk104_fifo_gpfifo_engine_ctor,
+ .engine_dtor = gk104_fifo_gpfifo_engine_dtor,
+ .engine_init = gv100_fifo_gpfifo_engine_init,
+ .engine_fini = gv100_fifo_gpfifo_engine_fini,
+ .submit_token = tu102_fifo_gpfifo_submit_token,
+};
+
+int
+tu102_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct volta_channel_gpfifo_a_v0 v0;
+ } *args = data;
+ int ret = -ENOSYS;
+
+ nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
+ "ioffset %016llx ilength %08x "
+ "runlist %016llx priv %d\n",
+ args->v0.version, args->v0.vmm, args->v0.ioffset,
+ args->v0.ilength, args->v0.runlist, args->v0.priv);
+ return gv100_fifo_gpfifo_new_(&tu102_fifo_gpfifo, fifo,
+ &args->v0.runlist,
+ &args->v0.chid,
+ args->v0.vmm,
+ args->v0.ioffset,
+ args->v0.ilength,
+ &args->v0.inst,
+ args->v0.priv,
+ &args->v0.token,
+ oclass, pobject);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
new file mode 100644
index 000000000..faf0fe9f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "cgrp.h"
+#include "changk104.h"
+#include "user.h"
+
+#include <core/gpuobj.h>
+
+#include <nvif/class.h>
+
+void
+gv100_fifo_runlist_chan(struct gk104_fifo_chan *chan,
+ struct nvkm_memory *memory, u32 offset)
+{
+ struct nvkm_memory *usermem = chan->fifo->user.mem;
+ const u64 user = nvkm_memory_addr(usermem) + (chan->base.chid * 0x200);
+ const u64 inst = chan->base.inst->addr;
+
+ nvkm_wo32(memory, offset + 0x0, lower_32_bits(user));
+ nvkm_wo32(memory, offset + 0x4, upper_32_bits(user));
+ nvkm_wo32(memory, offset + 0x8, lower_32_bits(inst) | chan->base.chid);
+ nvkm_wo32(memory, offset + 0xc, upper_32_bits(inst));
+}
+
+void
+gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *cgrp,
+ struct nvkm_memory *memory, u32 offset)
+{
+ nvkm_wo32(memory, offset + 0x0, (128 << 24) | (3 << 16) | 0x00000001);
+ nvkm_wo32(memory, offset + 0x4, cgrp->chan_nr);
+ nvkm_wo32(memory, offset + 0x8, cgrp->id);
+ nvkm_wo32(memory, offset + 0xc, 0x00000000);
+}
+
+static const struct gk104_fifo_runlist_func
+gv100_fifo_runlist = {
+ .size = 16,
+ .cgrp = gv100_fifo_runlist_cgrp,
+ .chan = gv100_fifo_runlist_chan,
+ .commit = gk104_fifo_runlist_commit,
+};
+
+const struct nvkm_enum
+gv100_fifo_fault_gpcclient[] = {
+ { 0x00, "T1_0" },
+ { 0x01, "T1_1" },
+ { 0x02, "T1_2" },
+ { 0x03, "T1_3" },
+ { 0x04, "T1_4" },
+ { 0x05, "T1_5" },
+ { 0x06, "T1_6" },
+ { 0x07, "T1_7" },
+ { 0x08, "PE_0" },
+ { 0x09, "PE_1" },
+ { 0x0a, "PE_2" },
+ { 0x0b, "PE_3" },
+ { 0x0c, "PE_4" },
+ { 0x0d, "PE_5" },
+ { 0x0e, "PE_6" },
+ { 0x0f, "PE_7" },
+ { 0x10, "RAST" },
+ { 0x11, "GCC" },
+ { 0x12, "GPCCS" },
+ { 0x13, "PROP_0" },
+ { 0x14, "PROP_1" },
+ { 0x15, "PROP_2" },
+ { 0x16, "PROP_3" },
+ { 0x17, "GPM" },
+ { 0x18, "LTP_UTLB_0" },
+ { 0x19, "LTP_UTLB_1" },
+ { 0x1a, "LTP_UTLB_2" },
+ { 0x1b, "LTP_UTLB_3" },
+ { 0x1c, "LTP_UTLB_4" },
+ { 0x1d, "LTP_UTLB_5" },
+ { 0x1e, "LTP_UTLB_6" },
+ { 0x1f, "LTP_UTLB_7" },
+ { 0x20, "RGG_UTLB" },
+ { 0x21, "T1_8" },
+ { 0x22, "T1_9" },
+ { 0x23, "T1_10" },
+ { 0x24, "T1_11" },
+ { 0x25, "T1_12" },
+ { 0x26, "T1_13" },
+ { 0x27, "T1_14" },
+ { 0x28, "T1_15" },
+ { 0x29, "TPCCS_0" },
+ { 0x2a, "TPCCS_1" },
+ { 0x2b, "TPCCS_2" },
+ { 0x2c, "TPCCS_3" },
+ { 0x2d, "TPCCS_4" },
+ { 0x2e, "TPCCS_5" },
+ { 0x2f, "TPCCS_6" },
+ { 0x30, "TPCCS_7" },
+ { 0x31, "PE_8" },
+ { 0x32, "PE_9" },
+ { 0x33, "TPCCS_8" },
+ { 0x34, "TPCCS_9" },
+ { 0x35, "T1_16" },
+ { 0x36, "T1_17" },
+ { 0x37, "T1_18" },
+ { 0x38, "T1_19" },
+ { 0x39, "PE_10" },
+ { 0x3a, "PE_11" },
+ { 0x3b, "TPCCS_10" },
+ { 0x3c, "TPCCS_11" },
+ { 0x3d, "T1_20" },
+ { 0x3e, "T1_21" },
+ { 0x3f, "T1_22" },
+ { 0x40, "T1_23" },
+ { 0x41, "PE_12" },
+ { 0x42, "PE_13" },
+ { 0x43, "TPCCS_12" },
+ { 0x44, "TPCCS_13" },
+ { 0x45, "T1_24" },
+ { 0x46, "T1_25" },
+ { 0x47, "T1_26" },
+ { 0x48, "T1_27" },
+ { 0x49, "PE_14" },
+ { 0x4a, "PE_15" },
+ { 0x4b, "TPCCS_14" },
+ { 0x4c, "TPCCS_15" },
+ { 0x4d, "T1_28" },
+ { 0x4e, "T1_29" },
+ { 0x4f, "T1_30" },
+ { 0x50, "T1_31" },
+ { 0x51, "PE_16" },
+ { 0x52, "PE_17" },
+ { 0x53, "TPCCS_16" },
+ { 0x54, "TPCCS_17" },
+ { 0x55, "T1_32" },
+ { 0x56, "T1_33" },
+ { 0x57, "T1_34" },
+ { 0x58, "T1_35" },
+ { 0x59, "PE_18" },
+ { 0x5a, "PE_19" },
+ { 0x5b, "TPCCS_18" },
+ { 0x5c, "TPCCS_19" },
+ { 0x5d, "T1_36" },
+ { 0x5e, "T1_37" },
+ { 0x5f, "T1_38" },
+ { 0x60, "T1_39" },
+ {}
+};
+
+const struct nvkm_enum
+gv100_fifo_fault_hubclient[] = {
+ { 0x00, "VIP" },
+ { 0x01, "CE0" },
+ { 0x02, "CE1" },
+ { 0x03, "DNISO" },
+ { 0x04, "FE" },
+ { 0x05, "FECS" },
+ { 0x06, "HOST" },
+ { 0x07, "HOST_CPU" },
+ { 0x08, "HOST_CPU_NB" },
+ { 0x09, "ISO" },
+ { 0x0a, "MMU" },
+ { 0x0b, "NVDEC" },
+ { 0x0d, "NVENC1" },
+ { 0x0e, "NISO" },
+ { 0x0f, "P2P" },
+ { 0x10, "PD" },
+ { 0x11, "PERF" },
+ { 0x12, "PMU" },
+ { 0x13, "RASTERTWOD" },
+ { 0x14, "SCC" },
+ { 0x15, "SCC_NB" },
+ { 0x16, "SEC" },
+ { 0x17, "SSYNC" },
+ { 0x18, "CE2" },
+ { 0x19, "XV" },
+ { 0x1a, "MMU_NB" },
+ { 0x1b, "NVENC0" },
+ { 0x1c, "DFALCON" },
+ { 0x1d, "SKED" },
+ { 0x1e, "AFALCON" },
+ { 0x1f, "DONT_CARE" },
+ { 0x20, "HSCE0" },
+ { 0x21, "HSCE1" },
+ { 0x22, "HSCE2" },
+ { 0x23, "HSCE3" },
+ { 0x24, "HSCE4" },
+ { 0x25, "HSCE5" },
+ { 0x26, "HSCE6" },
+ { 0x27, "HSCE7" },
+ { 0x28, "HSCE8" },
+ { 0x29, "HSCE9" },
+ { 0x2a, "HSHUB" },
+ { 0x2b, "PTP_X0" },
+ { 0x2c, "PTP_X1" },
+ { 0x2d, "PTP_X2" },
+ { 0x2e, "PTP_X3" },
+ { 0x2f, "PTP_X4" },
+ { 0x30, "PTP_X5" },
+ { 0x31, "PTP_X6" },
+ { 0x32, "PTP_X7" },
+ { 0x33, "NVENC2" },
+ { 0x34, "VPR_SCRUBBER0" },
+ { 0x35, "VPR_SCRUBBER1" },
+ { 0x36, "DWBIF" },
+ { 0x37, "FBFALCON" },
+ { 0x38, "CE_SHIM" },
+ { 0x39, "GSP" },
+ {}
+};
+
+const struct nvkm_enum
+gv100_fifo_fault_reason[] = {
+ { 0x00, "PDE" },
+ { 0x01, "PDE_SIZE" },
+ { 0x02, "PTE" },
+ { 0x03, "VA_LIMIT_VIOLATION" },
+ { 0x04, "UNBOUND_INST_BLOCK" },
+ { 0x05, "PRIV_VIOLATION" },
+ { 0x06, "RO_VIOLATION" },
+ { 0x07, "WO_VIOLATION" },
+ { 0x08, "PITCH_MASK_VIOLATION" },
+ { 0x09, "WORK_CREATION" },
+ { 0x0a, "UNSUPPORTED_APERTURE" },
+ { 0x0b, "COMPRESSION_FAILURE" },
+ { 0x0c, "UNSUPPORTED_KIND" },
+ { 0x0d, "REGION_VIOLATION" },
+ { 0x0e, "POISONED" },
+ { 0x0f, "ATOMIC_VIOLATION" },
+ {}
+};
+
+static const struct nvkm_enum
+gv100_fifo_fault_engine[] = {
+ { 0x01, "DISPLAY" },
+ { 0x03, "PTP" },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x06, "PWR_PMU" },
+ { 0x08, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x09, "PERF" },
+ { 0x1f, "PHYSICAL" },
+ { 0x20, "HOST0" },
+ { 0x21, "HOST1" },
+ { 0x22, "HOST2" },
+ { 0x23, "HOST3" },
+ { 0x24, "HOST4" },
+ { 0x25, "HOST5" },
+ { 0x26, "HOST6" },
+ { 0x27, "HOST7" },
+ { 0x28, "HOST8" },
+ { 0x29, "HOST9" },
+ { 0x2a, "HOST10" },
+ { 0x2b, "HOST11" },
+ { 0x2c, "HOST12" },
+ { 0x2d, "HOST13" },
+ {}
+};
+
+const struct nvkm_enum
+gv100_fifo_fault_access[] = {
+ { 0x0, "VIRT_READ" },
+ { 0x1, "VIRT_WRITE" },
+ { 0x2, "VIRT_ATOMIC" },
+ { 0x3, "VIRT_PREFETCH" },
+ { 0x4, "VIRT_ATOMIC_WEAK" },
+ { 0x8, "PHYS_READ" },
+ { 0x9, "PHYS_WRITE" },
+ { 0xa, "PHYS_ATOMIC" },
+ { 0xb, "PHYS_PREFETCH" },
+ {}
+};
+
+static const struct gk104_fifo_func
+gv100_fifo = {
+ .pbdma = &gm200_fifo_pbdma,
+ .fault.access = gv100_fifo_fault_access,
+ .fault.engine = gv100_fifo_fault_engine,
+ .fault.reason = gv100_fifo_fault_reason,
+ .fault.hubclient = gv100_fifo_fault_hubclient,
+ .fault.gpcclient = gv100_fifo_fault_gpcclient,
+ .runlist = &gv100_fifo_runlist,
+ .user = {{-1,-1,VOLTA_USERMODE_A }, gv100_fifo_user_new },
+ .chan = {{ 0, 0,VOLTA_CHANNEL_GPFIFO_A}, gv100_fifo_gpfifo_new },
+ .cgrp_force = true,
+};
+
+int
+gv100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gv100_fifo, device, type, inst, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
new file mode 100644
index 000000000..c6730c124
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/instmem.h>
+#include <subdev/timer.h>
+#include <engine/sw.h>
+
+static const struct nv04_fifo_ramfc
+nv04_fifo_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
+ {}
+};
+
+void
+nv04_fifo_pause(struct nvkm_fifo *base, unsigned long *pflags)
+__acquires(fifo->base.lock)
+{
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ *pflags = flags;
+
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
+ nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
+
+ /* in some cases the puller may be left in an inconsistent state
+ * if you try to stop it while it's busy translating handles.
+ * sometimes you get a CACHE_ERROR, sometimes it just fails
+ * silently; sending incorrect instance offsets to PGRAPH after
+ * it's started up again.
+ *
+ * to avoid this, we invalidate the most recently calculated
+ * instance.
+ */
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
+ if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY))
+ break;
+ );
+
+ if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
+ NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000);
+}
+
+void
+nv04_fifo_start(struct nvkm_fifo *base, unsigned long *pflags)
+__releases(fifo->base.lock)
+{
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ unsigned long flags = *pflags;
+
+ nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
+
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+struct nvkm_engine *
+nv04_fifo_id_engine(struct nvkm_fifo *fifo, int engi)
+{
+ enum nvkm_subdev_type type;
+
+ switch (engi) {
+ case NV04_FIFO_ENGN_SW : type = NVKM_ENGINE_SW; break;
+ case NV04_FIFO_ENGN_GR : type = NVKM_ENGINE_GR; break;
+ case NV04_FIFO_ENGN_MPEG: type = NVKM_ENGINE_MPEG; break;
+ case NV04_FIFO_ENGN_DMA : type = NVKM_ENGINE_DMAOBJ; break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return nvkm_device_engine(fifo->engine.subdev.device, type, 0);
+}
+
+int
+nv04_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
+{
+ switch (engine->subdev.type) {
+ case NVKM_ENGINE_SW : return NV04_FIFO_ENGN_SW;
+ case NVKM_ENGINE_GR : return NV04_FIFO_ENGN_GR;
+ case NVKM_ENGINE_MPEG : return NV04_FIFO_ENGN_MPEG;
+ case NVKM_ENGINE_DMAOBJ: return NV04_FIFO_ENGN_DMA;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+static const char *
+nv_dma_state_err(u32 state)
+{
+ static const char * const desc[] = {
+ "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
+ "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
+ };
+ return desc[(state >> 29) & 0x7];
+}
+
+static bool
+nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
+{
+ struct nvkm_sw *sw = device->sw;
+ const int subc = (addr & 0x0000e000) >> 13;
+ const int mthd = (addr & 0x00001ffc);
+ const u32 mask = 0x0000000f << (subc * 4);
+ u32 engine = nvkm_rd32(device, 0x003280);
+ bool handled = false;
+
+ switch (mthd) {
+ case 0x0000 ... 0x0000: /* subchannel's engine -> software */
+ nvkm_wr32(device, 0x003280, (engine &= ~mask));
+ fallthrough;
+ case 0x0180 ... 0x01fc: /* handle -> instance */
+ data = nvkm_rd32(device, 0x003258) & 0x0000ffff;
+ fallthrough;
+ case 0x0100 ... 0x017c:
+ case 0x0200 ... 0x1ffc: /* pass method down to sw */
+ if (!(engine & mask) && sw)
+ handled = nvkm_sw_mthd(sw, chid, subc, mthd, data);
+ break;
+ default:
+ break;
+ }
+
+ return handled;
+}
+
+static void
+nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ u32 pull0 = nvkm_rd32(device, 0x003250);
+ u32 mthd, data;
+ int ptr;
+
+ /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
+ * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
+ * show that it wraps around to the start at GET=0x800.. No clue as to
+ * why..
+ */
+ ptr = (get & 0x7ff) >> 2;
+
+ if (device->card_type < NV_40) {
+ mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr));
+ data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr));
+ } else {
+ mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr));
+ data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr));
+ }
+
+ if (!(pull0 & 0x00000100) ||
+ !nv04_fifo_swmthd(device, chid, mthd, data)) {
+ chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+ nvkm_error(subdev, "CACHE_ERROR - "
+ "ch %d [%s] subc %d mthd %04x data %08x\n",
+ chid, chan ? chan->object.client->name : "unknown",
+ (mthd >> 13) & 7, mthd & 0x1ffc, data);
+ nvkm_fifo_chan_put(&fifo->base, flags, &chan);
+ }
+
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
+ nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
+ nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0);
+
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH,
+ nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+}
+
+static void
+nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 dma_get = nvkm_rd32(device, 0x003244);
+ u32 dma_put = nvkm_rd32(device, 0x003240);
+ u32 push = nvkm_rd32(device, 0x003220);
+ u32 state = nvkm_rd32(device, 0x003228);
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ const char *name;
+
+ chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+ name = chan ? chan->object.client->name : "unknown";
+ if (device->card_type == NV_50) {
+ u32 ho_get = nvkm_rd32(device, 0x003328);
+ u32 ho_put = nvkm_rd32(device, 0x003320);
+ u32 ib_get = nvkm_rd32(device, 0x003334);
+ u32 ib_put = nvkm_rd32(device, 0x003330);
+
+ nvkm_error(subdev, "DMA_PUSHER - "
+ "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
+ "ib_put %08x state %08x (err: %s) push %08x\n",
+ chid, name, ho_get, dma_get, ho_put, dma_put,
+ ib_get, ib_put, state, nv_dma_state_err(state),
+ push);
+
+ /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+ nvkm_wr32(device, 0x003364, 0x00000000);
+ if (dma_get != dma_put || ho_get != ho_put) {
+ nvkm_wr32(device, 0x003244, dma_put);
+ nvkm_wr32(device, 0x003328, ho_put);
+ } else
+ if (ib_get != ib_put)
+ nvkm_wr32(device, 0x003334, ib_put);
+ } else {
+ nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
+ "state %08x (err: %s) push %08x\n",
+ chid, name, dma_get, dma_put, state,
+ nv_dma_state_err(state), push);
+
+ if (dma_get != dma_put)
+ nvkm_wr32(device, 0x003244, dma_put);
+ }
+ nvkm_fifo_chan_put(&fifo->base, flags, &chan);
+
+ nvkm_wr32(device, 0x003228, 0x00000000);
+ nvkm_wr32(device, 0x003220, 0x00000001);
+ nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+}
+
+void
+nv04_fifo_intr(struct nvkm_fifo *base)
+{
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0);
+ u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask;
+ u32 reassign, chid, get, sem;
+
+ reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
+
+ chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & (fifo->base.nr - 1);
+ get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
+
+ if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
+ nv04_fifo_cache_error(fifo, chid, get);
+ stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
+ }
+
+ if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
+ nv04_fifo_dma_pusher(fifo, chid);
+ stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
+ }
+
+ if (stat & NV_PFIFO_INTR_SEMAPHORE) {
+ stat &= ~NV_PFIFO_INTR_SEMAPHORE;
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
+
+ sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE);
+ nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
+ if (device->card_type == NV_50) {
+ if (stat & 0x00000010) {
+ stat &= ~0x00000010;
+ nvkm_wr32(device, 0x002100, 0x00000010);
+ }
+
+ if (stat & 0x40000000) {
+ nvkm_wr32(device, 0x002100, 0x40000000);
+ nvkm_fifo_uevent(&fifo->base);
+ stat &= ~0x40000000;
+ }
+ }
+
+ if (stat) {
+ nvkm_warn(subdev, "intr %08x\n", stat);
+ nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, stat);
+ }
+
+ nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
+}
+
+void
+nv04_fifo_init(struct nvkm_fifo *base)
+{
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_ramht *ramht = imem->ramht;
+ struct nvkm_memory *ramro = imem->ramro;
+ struct nvkm_memory *ramfc = imem->ramfc;
+
+ nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+ nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((ramht->bits - 9) << 16) |
+ (ramht->gpuobj->addr >> 8));
+ nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+ nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
+
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+ nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
+}
+
+int
+nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, int nr, const struct nv04_fifo_ramfc *ramfc,
+ struct nvkm_fifo **pfifo)
+{
+ struct nv04_fifo *fifo;
+ int ret;
+
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+ fifo->ramfc = ramfc;
+ *pfifo = &fifo->base;
+
+ ret = nvkm_fifo_ctor(func, device, type, inst, nr, &fifo->base);
+ if (ret)
+ return ret;
+
+ set_bit(nr - 1, fifo->base.mask); /* inactive channel */
+ return 0;
+}
+
+static const struct nvkm_fifo_func
+nv04_fifo = {
+ .init = nv04_fifo_init,
+ .intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .chan = {
+ &nv04_fifo_dma_oclass,
+ NULL
+ },
+};
+
+int
+nv04_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nv04_fifo_new_(&nv04_fifo, device, type, inst, 16, nv04_fifo_ramfc, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
new file mode 100644
index 000000000..3f23bcde4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NV04_FIFO_H__
+#define __NV04_FIFO_H__
+#define nv04_fifo(p) container_of((p), struct nv04_fifo, base)
+#include "priv.h"
+
+struct nv04_fifo_ramfc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+};
+
+struct nv04_fifo {
+ struct nvkm_fifo base;
+ const struct nv04_fifo_ramfc *ramfc;
+};
+
+int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ int nr, const struct nv04_fifo_ramfc *, struct nvkm_fifo **);
+void nv04_fifo_init(struct nvkm_fifo *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
new file mode 100644
index 000000000..f8887f0f2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
+
+static const struct nv04_fifo_ramfc
+nv10_fifo_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
+ {}
+};
+
+static const struct nvkm_fifo_func
+nv10_fifo = {
+ .init = nv04_fifo_init,
+ .intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .chan = {
+ &nv10_fifo_dma_oclass,
+ NULL
+ },
+};
+
+int
+nv10_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nv04_fifo_new_(&nv10_fifo, device, type, inst, 32, nv10_fifo_ramfc, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
new file mode 100644
index 000000000..3f94c7b5b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/ramht.h>
+#include <subdev/instmem.h>
+
+static const struct nv04_fifo_ramfc
+nv17_fifo_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
+ { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+ { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+ { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+ { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
+ { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+ {}
+};
+
+static void
+nv17_fifo_init(struct nvkm_fifo *base)
+{
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_ramht *ramht = imem->ramht;
+ struct nvkm_memory *ramro = imem->ramro;
+ struct nvkm_memory *ramfc = imem->ramfc;
+
+ nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+ nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((ramht->bits - 9) << 16) |
+ (ramht->gpuobj->addr >> 8));
+ nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+ nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8 |
+ 0x00010000);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
+
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+ nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
+}
+
+static const struct nvkm_fifo_func
+nv17_fifo = {
+ .init = nv17_fifo_init,
+ .intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .chan = {
+ &nv17_fifo_dma_oclass,
+ NULL
+ },
+};
+
+int
+nv17_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nv04_fifo_new_(&nv17_fifo, device, type, inst, 32, nv17_fifo_ramfc, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
new file mode 100644
index 000000000..f9ea46809
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/ramht.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+
+static const struct nv04_fifo_ramfc
+nv40_fifo_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 2, 28, 0x18, 28, 0x002058 },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
+ { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+ { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+ { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+ { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
+ { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+ { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
+ { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
+ { 32, 0, 0x40, 0, 0x0032e4 },
+ { 32, 0, 0x44, 0, 0x0032e8 },
+ { 32, 0, 0x4c, 0, 0x002088 },
+ { 32, 0, 0x50, 0, 0x003300 },
+ { 32, 0, 0x54, 0, 0x00330c },
+ {}
+};
+
+static void
+nv40_fifo_init(struct nvkm_fifo *base)
+{
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_fb *fb = device->fb;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_ramht *ramht = imem->ramht;
+ struct nvkm_memory *ramro = imem->ramro;
+ struct nvkm_memory *ramfc = imem->ramfc;
+
+ nvkm_wr32(device, 0x002040, 0x000000ff);
+ nvkm_wr32(device, 0x002044, 0x2101ffff);
+ nvkm_wr32(device, 0x002058, 0x00000001);
+
+ nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((ramht->bits - 9) << 16) |
+ (ramht->gpuobj->addr >> 8));
+ nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+
+ switch (device->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nvkm_wr32(device, 0x002230, 0x00000001);
+ fallthrough;
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x45:
+ case 0x48:
+ nvkm_wr32(device, 0x002220, 0x00030002);
+ break;
+ default:
+ nvkm_wr32(device, 0x002230, 0x00000000);
+ nvkm_wr32(device, 0x002220, ((fb->ram->size - 512 * 1024 +
+ nvkm_memory_addr(ramfc)) >> 16) |
+ 0x00030000);
+ break;
+ }
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
+
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+ nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
+}
+
+static const struct nvkm_fifo_func
+nv40_fifo = {
+ .init = nv40_fifo_init,
+ .intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .chan = {
+ &nv40_fifo_dma_oclass,
+ NULL
+ },
+};
+
+int
+nv40_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nv04_fifo_new_(&nv40_fifo, device, type, inst, 32, nv40_fifo_ramfc, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
new file mode 100644
index 000000000..a08742cf4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+#include "channv50.h"
+
+#include <core/gpuobj.h>
+
+static void
+nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_memory *cur;
+ int i, p;
+
+ cur = fifo->runlist[fifo->cur_runlist];
+ fifo->cur_runlist = !fifo->cur_runlist;
+
+ nvkm_kmap(cur);
+ for (i = 0, p = 0; i < fifo->base.nr; i++) {
+ if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000)
+ nvkm_wo32(cur, p++ * 4, i);
+ }
+ nvkm_done(cur);
+
+ nvkm_wr32(device, 0x0032f4, nvkm_memory_addr(cur) >> 12);
+ nvkm_wr32(device, 0x0032ec, p);
+ nvkm_wr32(device, 0x002500, 0x00000101);
+}
+
+void
+nv50_fifo_runlist_update(struct nv50_fifo *fifo)
+{
+ mutex_lock(&fifo->base.mutex);
+ nv50_fifo_runlist_update_locked(fifo);
+ mutex_unlock(&fifo->base.mutex);
+}
+
+int
+nv50_fifo_oneinit(struct nvkm_fifo *base)
+{
+ struct nv50_fifo *fifo = nv50_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ int ret;
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
+ false, &fifo->runlist[0]);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
+ false, &fifo->runlist[1]);
+}
+
+void
+nv50_fifo_init(struct nvkm_fifo *base)
+{
+ struct nv50_fifo *fifo = nv50_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ int i;
+
+ nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
+ nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
+ nvkm_wr32(device, 0x00250c, 0x6f3cfc34);
+ nvkm_wr32(device, 0x002044, 0x01003fff);
+
+ nvkm_wr32(device, 0x002100, 0xffffffff);
+ nvkm_wr32(device, 0x002140, 0xbfffffff);
+
+ for (i = 0; i < 128; i++)
+ nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000);
+ nv50_fifo_runlist_update_locked(fifo);
+
+ nvkm_wr32(device, 0x003200, 0x00000001);
+ nvkm_wr32(device, 0x003250, 0x00000001);
+ nvkm_wr32(device, 0x002500, 0x00000001);
+}
+
+void *
+nv50_fifo_dtor(struct nvkm_fifo *base)
+{
+ struct nv50_fifo *fifo = nv50_fifo(base);
+ nvkm_memory_unref(&fifo->runlist[1]);
+ nvkm_memory_unref(&fifo->runlist[0]);
+ return fifo;
+}
+
+int
+nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
+{
+ struct nv50_fifo *fifo;
+ int ret;
+
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+ *pfifo = &fifo->base;
+
+ ret = nvkm_fifo_ctor(func, device, type, inst, 128, &fifo->base);
+ if (ret)
+ return ret;
+
+ set_bit(0, fifo->base.mask); /* PIO channel */
+ set_bit(127, fifo->base.mask); /* inactive channel */
+ return 0;
+}
+
+static const struct nvkm_fifo_func
+nv50_fifo = {
+ .dtor = nv50_fifo_dtor,
+ .oneinit = nv50_fifo_oneinit,
+ .init = nv50_fifo_init,
+ .intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .chan = {
+ &nv50_fifo_gpfifo_oclass,
+ NULL
+ },
+};
+
+int
+nv50_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nv50_fifo_new_(&nv50_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
new file mode 100644
index 000000000..0111e7e5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NV50_FIFO_H__
+#define __NV50_FIFO_H__
+#define nv50_fifo(p) container_of((p), struct nv50_fifo, base)
+#include "priv.h"
+
+struct nv50_fifo {
+ struct nvkm_fifo base;
+ struct nvkm_memory *runlist[2];
+ int cur_runlist;
+};
+
+int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_fifo **);
+
+void *nv50_fifo_dtor(struct nvkm_fifo *);
+int nv50_fifo_oneinit(struct nvkm_fifo *);
+void nv50_fifo_init(struct nvkm_fifo *);
+void nv50_fifo_runlist_update(struct nv50_fifo *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
new file mode 100644
index 000000000..79cec5764
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_FIFO_PRIV_H__
+#define __NVKM_FIFO_PRIV_H__
+#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
+#include <engine/fifo.h>
+
+int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ int nr, struct nvkm_fifo *);
+void nvkm_fifo_uevent(struct nvkm_fifo *);
+void nvkm_fifo_kevent(struct nvkm_fifo *, int chid);
+void nvkm_fifo_recover_chan(struct nvkm_fifo *, int chid);
+
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_inst_locked(struct nvkm_fifo *, u64 inst);
+
+struct nvkm_fifo_chan_oclass;
+struct nvkm_fifo_func {
+ void *(*dtor)(struct nvkm_fifo *);
+ int (*oneinit)(struct nvkm_fifo *);
+ int (*info)(struct nvkm_fifo *, u64 mthd, u64 *data);
+ void (*init)(struct nvkm_fifo *);
+ void (*fini)(struct nvkm_fifo *);
+ void (*intr)(struct nvkm_fifo *);
+ void (*fault)(struct nvkm_fifo *, struct nvkm_fault_data *);
+ int (*engine_id)(struct nvkm_fifo *, struct nvkm_engine *);
+ struct nvkm_engine *(*id_engine)(struct nvkm_fifo *, int engi);
+ void (*pause)(struct nvkm_fifo *, unsigned long *);
+ void (*start)(struct nvkm_fifo *, unsigned long *);
+ void (*uevent_init)(struct nvkm_fifo *);
+ void (*uevent_fini)(struct nvkm_fifo *);
+ void (*recover_chan)(struct nvkm_fifo *, int chid);
+ int (*class_get)(struct nvkm_fifo *, int index, struct nvkm_oclass *);
+ int (*class_new)(struct nvkm_fifo *, const struct nvkm_oclass *,
+ void *, u32, struct nvkm_object **);
+ const struct nvkm_fifo_chan_oclass *chan[];
+};
+
+void nv04_fifo_intr(struct nvkm_fifo *);
+int nv04_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
+struct nvkm_engine *nv04_fifo_id_engine(struct nvkm_fifo *, int);
+void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
+void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
+
+void gf100_fifo_intr_fault(struct nvkm_fifo *, int);
+
+int gk104_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
+struct nvkm_engine *gk104_fifo_id_engine(struct nvkm_fifo *, int);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
new file mode 100644
index 000000000..4445a12b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NV04_FIFO_REGS_H__
+#define __NV04_FIFO_REGS_H__
+
+#define NV04_PFIFO_DELAY_0 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
+#define NV03_PFIFO_INTR_0 0x00002100
+#define NV03_PFIFO_INTR_EN_0 0x00002140
+# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
+# define NV_PFIFO_INTR_RUNOUT (1<<4)
+# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
+# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
+# define NV_PFIFO_INTR_DMA_PT (1<<16)
+# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
+# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
+#define NV03_PFIFO_RAMHT 0x00002210
+#define NV03_PFIFO_RAMFC 0x00002214
+#define NV03_PFIFO_RAMRO 0x00002218
+#define NV40_PFIFO_RAMFC 0x00002220
+#define NV03_PFIFO_CACHES 0x00002500
+#define NV04_PFIFO_MODE 0x00002504
+#define NV04_PFIFO_DMA 0x00002508
+#define NV04_PFIFO_SIZE 0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE 128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
+#define NV03_PFIFO_CACHE0_PULL0 0x00003040
+#define NV04_PFIFO_CACHE0_PULL0 0x00003050
+#define NV04_PFIFO_CACHE0_PULL1 0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
+#define NV03_PFIFO_CACHE1_PUT 0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
+# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
+# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
+# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0 0x00003240
+#define NV04_PFIFO_CACHE1_PULL0 0x00003250
+# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
+# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
+#define NV03_PFIFO_CACHE1_PULL1 0x00003250
+#define NV04_PFIFO_CACHE1_PULL1 0x00003254
+#define NV04_PFIFO_CACHE1_HASH 0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
+#define NV03_PFIFO_CACHE1_GET 0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
+#define NV40_PFIFO_UNK32E4 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
new file mode 100644
index 000000000..260b197f8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "cgrp.h"
+#include "changk104.h"
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/bar.h>
+#include <subdev/fault.h>
+#include <subdev/top.h>
+#include <subdev/timer.h>
+#include <engine/sw.h>
+
+#include <nvif/class.h>
+
+static void
+tu102_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
+ struct nvkm_memory *mem, int nr)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u64 addr = nvkm_memory_addr(mem);
+ /*XXX: target? */
+
+ nvkm_wr32(device, 0x002b00 + (runl * 0x10), lower_32_bits(addr));
+ nvkm_wr32(device, 0x002b04 + (runl * 0x10), upper_32_bits(addr));
+ nvkm_wr32(device, 0x002b08 + (runl * 0x10), nr);
+
+ /*XXX: how to wait? can you even wait? */
+}
+
+static const struct gk104_fifo_runlist_func
+tu102_fifo_runlist = {
+ .size = 16,
+ .cgrp = gv100_fifo_runlist_cgrp,
+ .chan = gv100_fifo_runlist_chan,
+ .commit = tu102_fifo_runlist_commit,
+};
+
+static const struct nvkm_enum
+tu102_fifo_fault_engine[] = {
+ { 0x01, "DISPLAY" },
+ { 0x03, "PTP" },
+ { 0x06, "PWR_PMU" },
+ { 0x08, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x09, "PERF" },
+ { 0x1f, "PHYSICAL" },
+ { 0x20, "HOST0" },
+ { 0x21, "HOST1" },
+ { 0x22, "HOST2" },
+ { 0x23, "HOST3" },
+ { 0x24, "HOST4" },
+ { 0x25, "HOST5" },
+ { 0x26, "HOST6" },
+ { 0x27, "HOST7" },
+ { 0x28, "HOST8" },
+ { 0x29, "HOST9" },
+ { 0x2a, "HOST10" },
+ { 0x2b, "HOST11" },
+ { 0x2c, "HOST12" },
+ { 0x2d, "HOST13" },
+ { 0x2e, "HOST14" },
+ { 0x80, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0xc0, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ {}
+};
+
+static void
+tu102_fifo_pbdma_init(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ const u32 mask = (1 << fifo->pbdma_nr) - 1;
+ /*XXX: this is a bit of a guess at this point in time. */
+ nvkm_mask(device, 0xb65000, 0x80000fff, 0x80000000 | mask);
+}
+
+static const struct gk104_fifo_pbdma_func
+tu102_fifo_pbdma = {
+ .nr = gm200_fifo_pbdma_nr,
+ .init = tu102_fifo_pbdma_init,
+ .init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
+static const struct gk104_fifo_func
+tu102_fifo = {
+ .pbdma = &tu102_fifo_pbdma,
+ .fault.access = gv100_fifo_fault_access,
+ .fault.engine = tu102_fifo_fault_engine,
+ .fault.reason = gv100_fifo_fault_reason,
+ .fault.hubclient = gv100_fifo_fault_hubclient,
+ .fault.gpcclient = gv100_fifo_fault_gpcclient,
+ .runlist = &tu102_fifo_runlist,
+ .user = {{-1,-1,VOLTA_USERMODE_A }, tu102_fifo_user_new },
+ .chan = {{ 0, 0,TURING_CHANNEL_GPFIFO_A}, tu102_fifo_gpfifo_new },
+ .cgrp_force = true,
+};
+
+static void
+tu102_fifo_recover_work(struct work_struct *w)
+{
+ struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_engine *engine;
+ unsigned long flags;
+ u32 engm, runm, todo;
+ int engn, runl;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ runm = fifo->recover.runm;
+ engm = fifo->recover.engm;
+ fifo->recover.engm = 0;
+ fifo->recover.runm = 0;
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+
+ nvkm_mask(device, 0x002630, runm, runm);
+
+ for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) {
+ if ((engine = fifo->engine[engn].engine)) {
+ nvkm_subdev_fini(&engine->subdev, false);
+ WARN_ON(nvkm_subdev_init(&engine->subdev));
+ }
+ }
+
+ for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
+ gk104_fifo_runlist_update(fifo, runl);
+
+ nvkm_mask(device, 0x002630, runm, 0x00000000);
+}
+
+static void tu102_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
+
+static void
+tu102_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 runm = BIT(runl);
+
+ assert_spin_locked(&fifo->base.lock);
+ if (fifo->recover.runm & runm)
+ return;
+ fifo->recover.runm |= runm;
+
+ /* Block runlist to prevent channel assignment(s) from changing. */
+ nvkm_mask(device, 0x002630, runm, runm);
+
+ /* Schedule recovery. */
+ nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl);
+ schedule_work(&fifo->recover.work);
+}
+
+static struct gk104_fifo_chan *
+tu102_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid)
+{
+ struct gk104_fifo_chan *chan;
+ struct nvkm_fifo_cgrp *cgrp;
+
+ list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
+ if (chan->base.chid == chid) {
+ list_del_init(&chan->head);
+ return chan;
+ }
+ }
+
+ list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
+ if (cgrp->id == chid) {
+ chan = list_first_entry(&cgrp->chan, typeof(*chan), head);
+ list_del_init(&chan->head);
+ if (!--cgrp->chan_nr)
+ list_del_init(&cgrp->head);
+ return chan;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+tu102_fifo_recover_chan(struct nvkm_fifo *base, int chid)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08));
+ const u32 runl = (stat & 0x000f0000) >> 16;
+ const bool used = (stat & 0x00000001);
+ unsigned long engn, engm = fifo->runlist[runl].engm;
+ struct gk104_fifo_chan *chan;
+
+ assert_spin_locked(&fifo->base.lock);
+ if (!used)
+ return;
+
+ /* Lookup SW state for channel, and mark it as dead. */
+ chan = tu102_fifo_recover_chid(fifo, runl, chid);
+ if (chan) {
+ chan->killed = true;
+ nvkm_fifo_kevent(&fifo->base, chid);
+ }
+
+ /* Disable channel. */
+ nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
+ nvkm_warn(subdev, "channel %d: killed\n", chid);
+
+ /* Block channel assignments from changing during recovery. */
+ tu102_fifo_recover_runl(fifo, runl);
+
+ /* Schedule recovery for any engines the channel is on. */
+ for_each_set_bit(engn, &engm, fifo->engine_nr) {
+ struct gk104_fifo_engine_status status;
+
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (!status.chan || status.chan->id != chid)
+ continue;
+ tu102_fifo_recover_engn(fifo, engn);
+ }
+}
+
+static void
+tu102_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 runl = fifo->engine[engn].runl;
+ const u32 engm = BIT(engn);
+ struct gk104_fifo_engine_status status;
+
+ assert_spin_locked(&fifo->base.lock);
+ if (fifo->recover.engm & engm)
+ return;
+ fifo->recover.engm |= engm;
+
+ /* Block channel assignments from changing during recovery. */
+ tu102_fifo_recover_runl(fifo, runl);
+
+ /* Determine which channel (if any) is currently on the engine. */
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (status.chan) {
+ /* The channel is not longer viable, kill it. */
+ tu102_fifo_recover_chan(&fifo->base, status.chan->id);
+ }
+
+ /* Preempt the runlist */
+ nvkm_wr32(device, 0x2638, BIT(runl));
+
+ /* Schedule recovery. */
+ nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn);
+ schedule_work(&fifo->recover.work);
+}
+
+static void
+tu102_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct nvkm_enum *er, *ee, *ec, *ea;
+ struct nvkm_engine *engine = NULL;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ const char *en = "";
+ char ct[8] = "HUB/";
+ int engn;
+
+ er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
+ ee = nvkm_enum_find(fifo->func->fault.engine, info->engine);
+ if (info->hub) {
+ ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client);
+ } else {
+ ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client);
+ snprintf(ct, sizeof(ct), "GPC%d/", info->gpc);
+ }
+ ea = nvkm_enum_find(fifo->func->fault.access, info->access);
+
+ if (ee && ee->data2) {
+ switch (ee->data2) {
+ case NVKM_SUBDEV_BAR:
+ nvkm_bar_bar1_reset(device);
+ break;
+ case NVKM_SUBDEV_INSTMEM:
+ nvkm_bar_bar2_reset(device);
+ break;
+ case NVKM_ENGINE_IFB:
+ nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
+ break;
+ default:
+ engine = nvkm_device_engine(device, ee->data2, 0);
+ break;
+ }
+ }
+
+ if (ee == NULL) {
+ struct nvkm_subdev *subdev = nvkm_top_fault(device, info->engine);
+ if (subdev) {
+ if (subdev->func == &nvkm_engine)
+ engine = container_of(subdev, typeof(*engine), subdev);
+ en = engine->subdev.name;
+ }
+ } else {
+ en = ee->name;
+ }
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst);
+
+ nvkm_error(subdev,
+ "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
+ "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
+ info->access, ea ? ea->name : "", info->addr,
+ info->engine, ee ? ee->name : en,
+ info->client, ct, ec ? ec->name : "",
+ info->reason, er ? er->name : "", chan ? chan->chid : -1,
+ info->inst, chan ? chan->object.client->name : "unknown");
+
+ /* Kill the channel that caused the fault. */
+ if (chan)
+ tu102_fifo_recover_chan(&fifo->base, chan->chid);
+
+ /* Channel recovery will probably have already done this for the
+ * correct engine(s), but just in case we can't find the channel
+ * information...
+ */
+ for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
+ if (fifo->engine[engn].engine == engine) {
+ tu102_fifo_recover_engn(fifo, engn);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static void
+tu102_fifo_intr_ctxsw_timeout(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ unsigned long flags, engm;
+ u32 engn;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+
+ engm = nvkm_rd32(device, 0x2a30);
+ nvkm_wr32(device, 0x2a30, engm);
+
+ for_each_set_bit(engn, &engm, 32)
+ tu102_fifo_recover_engn(fifo, engn);
+
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static void
+tu102_fifo_intr_sched(struct gk104_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x00254c);
+ u32 code = intr & 0x000000ff;
+
+ nvkm_error(subdev, "SCHED_ERROR %02x\n", code);
+}
+
+static void
+tu102_fifo_intr(struct nvkm_fifo *base)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x002140);
+ u32 stat = nvkm_rd32(device, 0x002100) & mask;
+
+ if (stat & 0x00000001) {
+ gk104_fifo_intr_bind(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000002) {
+ tu102_fifo_intr_ctxsw_timeout(fifo);
+ stat &= ~0x00000002;
+ }
+
+ if (stat & 0x00000100) {
+ tu102_fifo_intr_sched(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x00010000) {
+ gk104_fifo_intr_chsw(fifo);
+ nvkm_wr32(device, 0x002100, 0x00010000);
+ stat &= ~0x00010000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 mask = nvkm_rd32(device, 0x0025a0);
+
+ while (mask) {
+ u32 unit = __ffs(mask);
+
+ gk104_fifo_intr_pbdma_0(fifo, unit);
+ gk104_fifo_intr_pbdma_1(fifo, unit);
+ nvkm_wr32(device, 0x0025a0, (1 << unit));
+ mask &= ~(1 << unit);
+ }
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ gk104_fifo_intr_runlist(fifo);
+ stat &= ~0x40000000;
+ }
+
+ if (stat & 0x80000000) {
+ nvkm_wr32(device, 0x002100, 0x80000000);
+ gk104_fifo_intr_engine(fifo);
+ stat &= ~0x80000000;
+ }
+
+ if (stat) {
+ nvkm_error(subdev, "INTR %08x\n", stat);
+ nvkm_mask(device, 0x002140, stat, 0x00000000);
+ nvkm_wr32(device, 0x002100, stat);
+ }
+}
+
+static const struct nvkm_fifo_func
+tu102_fifo_ = {
+ .dtor = gk104_fifo_dtor,
+ .oneinit = gk104_fifo_oneinit,
+ .info = gk104_fifo_info,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
+ .intr = tu102_fifo_intr,
+ .fault = tu102_fifo_fault,
+ .engine_id = gk104_fifo_engine_id,
+ .id_engine = gk104_fifo_id_engine,
+ .uevent_init = gk104_fifo_uevent_init,
+ .uevent_fini = gk104_fifo_uevent_fini,
+ .recover_chan = tu102_fifo_recover_chan,
+ .class_get = gk104_fifo_class_get,
+ .class_new = gk104_fifo_class_new,
+};
+
+int
+tu102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ struct gk104_fifo *fifo;
+
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+ fifo->func = &tu102_fifo;
+ INIT_WORK(&fifo->recover.work, tu102_fifo_recover_work);
+ *pfifo = &fifo->base;
+
+ return nvkm_fifo_ctor(&tu102_fifo_, device, type, inst, 4096, &fifo->base);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
new file mode 100644
index 000000000..54a3a3092
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
@@ -0,0 +1,8 @@
+#ifndef __NVKM_FIFO_USER_H__
+#define __NVKM_FIFO_USER_H__
+#include "priv.h"
+int gv100_fifo_user_new(const struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+int tu102_fifo_user_new(const struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usergv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usergv100.c
new file mode 100644
index 000000000..3dc3b8b31
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usergv100.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "user.h"
+
+static int
+gv100_fifo_user_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+ struct nvkm_device *device = object->engine->subdev.device;
+ *addr = 0x810000 + device->func->resource_addr(device, 0);
+ *size = 0x010000;
+ *type = NVKM_OBJECT_MAP_IO;
+ return 0;
+}
+
+static const struct nvkm_object_func
+gv100_fifo_user = {
+ .map = gv100_fifo_user_map,
+};
+
+int
+gv100_fifo_user_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ return nvkm_object_new_(&gv100_fifo_user, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu102.c
new file mode 100644
index 000000000..217268f8c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu102.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "user.h"
+
+static int
+tu102_fifo_user_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+ struct nvkm_device *device = object->engine->subdev.device;
+ *addr = 0xbb0000 + device->func->resource_addr(device, 0);
+ *size = 0x010000;
+ *type = NVKM_OBJECT_MAP_IO;
+ return 0;
+}
+
+static const struct nvkm_object_func
+tu102_fifo_user = {
+ .map = tu102_fifo_user_map,
+};
+
+int
+tu102_fifo_user_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ return nvkm_object_new_(&tu102_fifo_user, oclass, argv, argc, pobject);
+}