summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/dispnv50
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/dispnv50')
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild61
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/atom.h267
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base.c53
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base.h27
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c341
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base827c.c104
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base907c.c216
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base917c.c50
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c72
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h73
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c184
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core827d.c42
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core907d.c78
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core917d.c44
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c179
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c80
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c745
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.h131
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc907d.c142
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc37d.c127
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc37d.h40
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc57d.c58
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c54
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.h14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c205
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs907a.c30
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/cursc37a.c62
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/dac507d.c52
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/dac907d.c45
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2790
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h117
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/handles.h16
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c637
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h100
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head507d.c449
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head827d.c168
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c433
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c138
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c300
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c253
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/lut.c79
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/lut.h16
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/oimm.c51
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/oimm.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/oimm507b.c52
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly.c57
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly.h26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly507e.c182
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly827e.c120
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly907e.c96
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly917e.c42
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/pior507d.c60
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor507d.c59
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor907d.c58
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sorc37d.c54
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c49
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c94
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c798
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h139
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c386
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c243
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c106
63 files changed, 11760 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
new file mode 100644
index 0000000000..28be2912ff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: MIT
+nouveau-y += dispnv50/disp.o
+nouveau-y += dispnv50/lut.o
+
+nouveau-y += dispnv50/core.o
+nouveau-y += dispnv50/core507d.o
+nouveau-y += dispnv50/core827d.o
+nouveau-y += dispnv50/core907d.o
+nouveau-y += dispnv50/core917d.o
+nouveau-y += dispnv50/corec37d.o
+nouveau-y += dispnv50/corec57d.o
+
+nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc.o
+nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc907d.o
+nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc37d.o
+nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc57d.o
+
+nouveau-y += dispnv50/dac507d.o
+nouveau-y += dispnv50/dac907d.o
+
+nouveau-y += dispnv50/pior507d.o
+
+nouveau-y += dispnv50/sor507d.o
+nouveau-y += dispnv50/sor907d.o
+nouveau-y += dispnv50/sorc37d.o
+
+nouveau-y += dispnv50/head.o
+nouveau-y += dispnv50/head507d.o
+nouveau-y += dispnv50/head827d.o
+nouveau-y += dispnv50/head907d.o
+nouveau-y += dispnv50/head917d.o
+nouveau-y += dispnv50/headc37d.o
+nouveau-y += dispnv50/headc57d.o
+
+nouveau-y += dispnv50/wimm.o
+nouveau-y += dispnv50/wimmc37b.o
+
+nouveau-y += dispnv50/wndw.o
+nouveau-y += dispnv50/wndwc37e.o
+nouveau-y += dispnv50/wndwc57e.o
+nouveau-y += dispnv50/wndwc67e.o
+
+nouveau-y += dispnv50/base.o
+nouveau-y += dispnv50/base507c.o
+nouveau-y += dispnv50/base827c.o
+nouveau-y += dispnv50/base907c.o
+nouveau-y += dispnv50/base917c.o
+
+nouveau-y += dispnv50/curs.o
+nouveau-y += dispnv50/curs507a.o
+nouveau-y += dispnv50/curs907a.o
+nouveau-y += dispnv50/cursc37a.o
+
+nouveau-y += dispnv50/oimm.o
+nouveau-y += dispnv50/oimm507b.o
+
+nouveau-y += dispnv50/ovly.o
+nouveau-y += dispnv50/ovly507e.o
+nouveau-y += dispnv50/ovly827e.o
+nouveau-y += dispnv50/ovly907e.o
+nouveau-y += dispnv50/ovly917e.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
new file mode 100644
index 0000000000..93f8f4f645
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -0,0 +1,267 @@
+#ifndef __NV50_KMS_ATOM_H__
+#define __NV50_KMS_ATOM_H__
+#define nv50_atom(p) container_of((p), struct nv50_atom, state)
+#include <drm/drm_atomic.h>
+#include "crc.h"
+
+struct nouveau_encoder;
+
+struct nv50_atom {
+ struct drm_atomic_state state;
+
+ struct list_head outp;
+ bool lock_core;
+ bool flush_disable;
+};
+
+#define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
+
+struct nv50_head_atom {
+ struct drm_crtc_state state;
+
+ struct {
+ u32 mask;
+ u32 owned;
+ u32 olut;
+ } wndw;
+
+ struct {
+ u16 iW;
+ u16 iH;
+ u16 oW;
+ u16 oH;
+ } view;
+
+ struct nv50_head_mode {
+ bool interlace;
+ u32 clock;
+ struct {
+ u16 active;
+ u16 synce;
+ u16 blanke;
+ u16 blanks;
+ } h;
+ struct {
+ u32 active;
+ u16 synce;
+ u16 blanke;
+ u16 blanks;
+ u16 blank2s;
+ u16 blank2e;
+ u16 blankus;
+ } v;
+ } mode;
+
+ struct {
+ bool visible;
+ u32 handle;
+ u64 offset:40;
+ u8 buffer:1;
+ u8 mode:4;
+ u16 size:11;
+ u8 range:2;
+ u8 output_mode:2;
+ void (*load)(struct drm_color_lut *, int size, void __iomem *);
+ } olut;
+
+ struct {
+ bool visible;
+ u32 handle;
+ u64 offset:40;
+ u8 format;
+ u8 kind:7;
+ u8 layout:1;
+ u8 blockh:4;
+ u16 blocks:12;
+ u32 pitch:20;
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+ } core;
+
+ struct {
+ bool visible;
+ u32 handle;
+ u64 offset:40;
+ u8 layout:2;
+ u8 format:8;
+ } curs;
+
+ struct {
+ u8 depth;
+ u8 cpp;
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+ } base;
+
+ struct {
+ u8 cpp;
+ } ovly;
+
+ struct {
+ bool enable:1;
+ u8 bits:2;
+ u8 mode:4;
+ } dither;
+
+ struct {
+ struct {
+ u16 cos:12;
+ u16 sin:12;
+ } sat;
+ } procamp;
+
+ struct {
+ u8 nhsync:1;
+ u8 nvsync:1;
+ u8 depth:4;
+ u8 crc_raster:2;
+ u8 bpc;
+ } or;
+
+ struct nv50_crc_atom crc;
+
+ /* Currently only used for MST */
+ struct {
+ int pbn;
+ u8 tu:6;
+ } dp;
+
+ union nv50_head_atom_mask {
+ struct {
+ bool olut:1;
+ bool core:1;
+ bool curs:1;
+ bool view:1;
+ bool mode:1;
+ bool base:1;
+ bool ovly:1;
+ bool dither:1;
+ bool procamp:1;
+ bool crc:1;
+ bool or:1;
+ };
+ u16 mask;
+ } set, clr;
+};
+
+static inline struct nv50_head_atom *
+nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(statec))
+ return (void *)statec;
+ return nv50_head_atom(statec);
+}
+
+static inline struct drm_encoder *
+nv50_head_atom_get_encoder(struct nv50_head_atom *atom)
+{
+ struct drm_encoder *encoder;
+
+ /* We only ever have a single encoder */
+ drm_for_each_encoder_mask(encoder, atom->state.crtc->dev,
+ atom->state.encoder_mask)
+ return encoder;
+
+ return NULL;
+}
+
+#define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
+
+struct nv50_wndw_atom {
+ struct drm_plane_state state;
+
+ struct drm_property_blob *ilut;
+ bool visible;
+
+ struct {
+ u32 handle;
+ u16 offset:12;
+ bool awaken:1;
+ } ntfy;
+
+ struct {
+ u32 handle;
+ u16 offset:12;
+ u32 acquire;
+ u32 release;
+ } sema;
+
+ struct {
+ u32 handle;
+ struct {
+ u64 offset:40;
+ u8 buffer:1;
+ u8 enable:2;
+ u8 mode:4;
+ u16 size:11;
+ u8 range:2;
+ u8 output_mode:2;
+ void (*load)(struct drm_color_lut *, int size,
+ void __iomem *);
+ } i;
+ } xlut;
+
+ struct {
+ u32 matrix[12];
+ bool valid;
+ } csc;
+
+ struct {
+ u8 mode:2;
+ u8 interval:4;
+
+ u8 colorspace:2;
+ u8 format;
+ u8 kind:7;
+ u8 layout:1;
+ u8 blockh:4;
+ u16 blocks[3];
+ u32 pitch[3];
+ u16 w;
+ u16 h;
+
+ u32 handle[6];
+ u64 offset[6];
+ } image;
+
+ struct {
+ u16 sx;
+ u16 sy;
+ u16 sw;
+ u16 sh;
+ u16 dw;
+ u16 dh;
+ } scale;
+
+ struct {
+ u16 x;
+ u16 y;
+ } point;
+
+ struct {
+ u8 depth;
+ u8 k1;
+ u8 src_color:4;
+ u8 dst_color:4;
+ } blend;
+
+ union nv50_wndw_atom_mask {
+ struct {
+ bool ntfy:1;
+ bool sema:1;
+ bool xlut:1;
+ bool csc:1;
+ bool image:1;
+ bool scale:1;
+ bool point:1;
+ bool blend:1;
+ };
+ u8 mask;
+ } set, clr;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.c b/drivers/gpu/drm/nouveau/dispnv50/base.c
new file mode 100644
index 0000000000..7c752acf2b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "base.h"
+
+#include <nvif/class.h>
+
+int
+nv50_base_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
+{
+ struct {
+ s32 oclass;
+ int version;
+ int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+ } bases[] = {
+ { GK110_DISP_BASE_CHANNEL_DMA, 0, base917c_new },
+ { GK104_DISP_BASE_CHANNEL_DMA, 0, base917c_new },
+ { GF110_DISP_BASE_CHANNEL_DMA, 0, base907c_new },
+ { GT214_DISP_BASE_CHANNEL_DMA, 0, base827c_new },
+ { GT200_DISP_BASE_CHANNEL_DMA, 0, base827c_new },
+ { G82_DISP_BASE_CHANNEL_DMA, 0, base827c_new },
+ { NV50_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+ {}
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ int cid;
+
+ cid = nvif_mclass(&disp->disp->object, bases);
+ if (cid < 0) {
+ NV_ERROR(drm, "No supported base class\n");
+ return cid;
+ }
+
+ return bases[cid].new(drm, head, bases[cid].oclass, pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.h b/drivers/gpu/drm/nouveau/dispnv50/base.h
new file mode 100644
index 0000000000..085bd3aeb4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base.h
@@ -0,0 +1,27 @@
+#ifndef __NV50_KMS_BASE_H__
+#define __NV50_KMS_BASE_H__
+#include "wndw.h"
+
+int base507c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+int base507c_new_(const struct nv50_wndw_func *, const u32 *format,
+ struct nouveau_drm *, int head, s32 oclass,
+ u32 interlock_data, struct nv50_wndw **);
+extern const u32 base507c_format[];
+int base507c_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+void base507c_release(struct nv50_wndw *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+int base507c_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int base507c_sema_clr(struct nv50_wndw *);
+int base507c_xlut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int base507c_xlut_clr(struct nv50_wndw *);
+
+int base827c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+
+int base907c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+extern const struct nv50_wndw_func base907c;
+
+int base917c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+
+int nv50_base_new(struct nouveau_drm *, int head, struct nv50_wndw **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
new file mode 100644
index 0000000000..70c62b8612
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "base.h"
+
+#include <nvif/if0014.h>
+#include <nvif/push507c.h>
+#include <nvif/timer.h>
+
+#include <nvhw/class/cl507c.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include "nouveau_bo.h"
+
+int
+base507c_update(struct nv50_wndw *wndw, u32 *interlock)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, UPDATE, interlock[NV50_DISP_INTERLOCK_CORE]);
+ return PUSH_KICK(push);
+}
+
+int
+base507c_image_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_PRESENT_CONTROL,
+ NVDEF(NV507C, SET_PRESENT_CONTROL, BEGIN_MODE, NON_TEARING) |
+ NVVAL(NV507C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, 0));
+
+ PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_ISO, 0x00000000);
+ return 0;
+}
+
+static int
+base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 13)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_PRESENT_CONTROL,
+ NVVAL(NV507C, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVVAL(NV507C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+ PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
+
+ if (asyw->image.format == NV507C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16) {
+ PUSH_MTHD(push, NV507C, SET_PROCESSING,
+ NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, ENABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV507C, SET_CONVERSION, OFS, 0x64));
+ } else {
+ PUSH_MTHD(push, NV507C, SET_PROCESSING,
+ NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV507C, SET_CONVERSION, OFS, 0));
+ }
+
+ PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NV507C, SURFACE_SET_SIZE(0),
+ NVVAL(NV507C, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NV507C, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+ SURFACE_SET_STORAGE(0),
+ NVVAL(NV507C, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout) |
+ NVVAL(NV507C, SURFACE_SET_STORAGE, PITCH, asyw->image.pitch[0] >> 8) |
+ NVVAL(NV507C, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NV507C, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
+
+ SURFACE_SET_PARAMS(0),
+ NVVAL(NV507C, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NV507C, SURFACE_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+ NVDEF(NV507C, SURFACE_SET_PARAMS, GAMMA, LINEAR) |
+ NVDEF(NV507C, SURFACE_SET_PARAMS, LAYOUT, FRM) |
+ NVVAL(NV507C, SURFACE_SET_PARAMS, KIND, asyw->image.kind) |
+ NVDEF(NV507C, SURFACE_SET_PARAMS, PART_STRIDE, PARTSTRIDE_256));
+ return 0;
+}
+
+int
+base507c_xlut_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_BASE_LUT_LO,
+ NVDEF(NV507C, SET_BASE_LUT_LO, ENABLE, DISABLE));
+ return 0;
+}
+
+int
+base507c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_BASE_LUT_LO,
+ NVDEF(NV507C, SET_BASE_LUT_LO, ENABLE, USE_CORE_LUT));
+ return 0;
+}
+
+int
+base507c_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
+ struct nvif_device *device)
+{
+ s64 time = nvif_msec(device, 2000ULL,
+ if (NVBO_TD32(bo, offset, NV_DISP_BASE_NOTIFIER_1, _0, STATUS, ==, BEGUN))
+ break;
+ usleep_range(1, 2);
+ );
+ return time < 0 ? time : 0;
+}
+
+int
+base507c_ntfy_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_NOTIFIER, 0x00000000);
+ return 0;
+}
+
+int
+base507c_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 3)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_NOTIFIER_CONTROL,
+ NVVAL(NV507C, SET_NOTIFIER_CONTROL, MODE, asyw->ntfy.awaken) |
+ NVVAL(NV507C, SET_NOTIFIER_CONTROL, OFFSET, asyw->ntfy.offset >> 2),
+
+ SET_CONTEXT_DMA_NOTIFIER, asyw->ntfy.handle);
+ return 0;
+}
+
+void
+base507c_ntfy_reset(struct nouveau_bo *bo, u32 offset)
+{
+ NVBO_WR32(bo, offset, NV_DISP_BASE_NOTIFIER_1, _0,
+ NVDEF(NV_DISP_BASE_NOTIFIER_1, _0, STATUS, NOT_BEGUN));
+}
+
+int
+base507c_sema_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_SEMAPHORE, 0x00000000);
+ return 0;
+}
+
+int
+base507c_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_SEMAPHORE_CONTROL, asyw->sema.offset,
+ SET_SEMAPHORE_ACQUIRE, asyw->sema.acquire,
+ SET_SEMAPHORE_RELEASE, asyw->sema.release,
+ SET_CONTEXT_DMA_SEMAPHORE, asyw->sema.handle);
+ return 0;
+}
+
+void
+base507c_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ asyh->base.cpp = 0;
+}
+
+int
+base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ const struct drm_framebuffer *fb = asyw->state.fb;
+ int ret;
+
+ ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, true);
+ if (ret)
+ return ret;
+
+ if (!wndw->func->ilut) {
+ if ((asyh->base.cpp != 1) ^ (fb->format->cpp[0] != 1))
+ asyh->state.color_mgmt_changed = true;
+ }
+
+ asyh->base.depth = fb->format->depth;
+ asyh->base.cpp = fb->format->cpp[0];
+ asyh->base.x = asyw->state.src.x1 >> 16;
+ asyh->base.y = asyw->state.src.y1 >> 16;
+ asyh->base.w = asyw->state.fb->width;
+ asyh->base.h = asyw->state.fb->height;
+
+ /* Some newer formats, esp FP16 ones, don't have a
+ * "depth". There's nothing that really makes sense there
+ * either, so just set it to the implicit bit count.
+ */
+ if (!asyh->base.depth)
+ asyh->base.depth = asyh->base.cpp * 8;
+
+ return 0;
+}
+
+const u32
+base507c_format[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ABGR16161616F,
+ 0
+};
+
+static const struct nv50_wndw_func
+base507c = {
+ .acquire = base507c_acquire,
+ .release = base507c_release,
+ .sema_set = base507c_sema_set,
+ .sema_clr = base507c_sema_clr,
+ .ntfy_reset = base507c_ntfy_reset,
+ .ntfy_set = base507c_ntfy_set,
+ .ntfy_clr = base507c_ntfy_clr,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .olut_core = 1,
+ .xlut_set = base507c_xlut_set,
+ .xlut_clr = base507c_xlut_clr,
+ .image_set = base507c_image_set,
+ .image_clr = base507c_image_clr,
+ .update = base507c_update,
+};
+
+int
+base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
+ struct nouveau_drm *drm, int head, s32 oclass, u32 interlock_data,
+ struct nv50_wndw **pwndw)
+{
+ struct nvif_disp_chan_v0 args = {
+ .id = head,
+ };
+ struct nouveau_display *disp = nouveau_display(drm->dev);
+ struct nv50_disp *disp50 = nv50_disp(drm->dev);
+ struct nv50_wndw *wndw;
+ int ret;
+
+ ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_PRIMARY,
+ "base", head, format, BIT(head),
+ NV50_DISP_INTERLOCK_BASE, interlock_data, &wndw);
+ if (*pwndw = wndw, ret)
+ return ret;
+
+ ret = nv50_dmac_create(&drm->client.device, &disp->disp.object,
+ &oclass, head, &args, sizeof(args),
+ disp50->sync->offset, &wndw->wndw);
+ if (ret) {
+ NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret);
+ return ret;
+ }
+
+ wndw->ntfy = NV50_DISP_BASE_NTFY(wndw->id);
+ wndw->sema = NV50_DISP_BASE_SEM0(wndw->id);
+ wndw->data = 0x00000000;
+ return 0;
+}
+
+int
+base507c_new(struct nouveau_drm *drm, int head, s32 oclass,
+ struct nv50_wndw **pwndw)
+{
+ return base507c_new_(&base507c, base507c_format, drm, head, oclass,
+ 0x00000002 << (head * 8), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
new file mode 100644
index 0000000000..093d4ba691
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "base.h"
+
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl827c.h>
+
+static int
+base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 13)))
+ return ret;
+
+ PUSH_MTHD(push, NV827C, SET_PRESENT_CONTROL,
+ NVVAL(NV827C, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVVAL(NV827C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+ PUSH_MTHD(push, NV827C, SET_CONTEXT_DMAS_ISO(0), asyw->image.handle, 1);
+
+ if (asyw->image.format == NV827C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16) {
+ PUSH_MTHD(push, NV827C, SET_PROCESSING,
+ NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, ENABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV827C, SET_CONVERSION, OFS, 0x64));
+ } else {
+ PUSH_MTHD(push, NV827C, SET_PROCESSING,
+ NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV827C, SET_CONVERSION, OFS, 0));
+ }
+
+ PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
+ SURFACE_SET_OFFSET(0, 1), 0x00000000,
+
+ SURFACE_SET_SIZE(0),
+ NVVAL(NV827C, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NV827C, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+ SURFACE_SET_STORAGE(0),
+ NVVAL(NV827C, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+ NVVAL(NV827C, SURFACE_SET_STORAGE, PITCH, asyw->image.pitch[0] >> 8) |
+ NVVAL(NV827C, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NV827C, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+ SURFACE_SET_PARAMS(0),
+ NVVAL(NV827C, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NV827C, SURFACE_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+ NVDEF(NV827C, SURFACE_SET_PARAMS, GAMMA, LINEAR) |
+ NVDEF(NV827C, SURFACE_SET_PARAMS, LAYOUT, FRM));
+ return 0;
+}
+
+static const struct nv50_wndw_func
+base827c = {
+ .acquire = base507c_acquire,
+ .release = base507c_release,
+ .sema_set = base507c_sema_set,
+ .sema_clr = base507c_sema_clr,
+ .ntfy_reset = base507c_ntfy_reset,
+ .ntfy_set = base507c_ntfy_set,
+ .ntfy_clr = base507c_ntfy_clr,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .olut_core = 1,
+ .xlut_set = base507c_xlut_set,
+ .xlut_clr = base507c_xlut_clr,
+ .image_set = base827c_image_set,
+ .image_clr = base507c_image_clr,
+ .update = base507c_update,
+};
+
+int
+base827c_new(struct nouveau_drm *drm, int head, s32 oclass,
+ struct nv50_wndw **pwndw)
+{
+ return base507c_new_(&base827c, base507c_format, drm, head, oclass,
+ 0x00000002 << (head * 8), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
new file mode 100644
index 0000000000..e6b0417c32
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "base.h"
+
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907c.h>
+
+static int
+base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 10)))
+ return ret;
+
+ PUSH_MTHD(push, NV907C, SET_PRESENT_CONTROL,
+ NVVAL(NV907C, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NV907C, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE) |
+ NVVAL(NV907C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+ PUSH_MTHD(push, NV907C, SET_CONTEXT_DMAS_ISO(0), asyw->image.handle, 1);
+
+ PUSH_MTHD(push, NV907C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
+ SURFACE_SET_OFFSET(0, 1), 0x00000000,
+
+ SURFACE_SET_SIZE(0),
+ NVVAL(NV907C, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NV907C, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+ SURFACE_SET_STORAGE(0),
+ NVVAL(NV907C, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+ NVVAL(NV907C, SURFACE_SET_STORAGE, PITCH, asyw->image.pitch[0] >> 8) |
+ NVVAL(NV907C, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NV907C, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+ SURFACE_SET_PARAMS(0),
+ NVVAL(NV907C, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NV907C, SURFACE_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+ NVDEF(NV907C, SURFACE_SET_PARAMS, GAMMA, LINEAR) |
+ NVDEF(NV907C, SURFACE_SET_PARAMS, LAYOUT, FRM));
+ return 0;
+}
+
+static int
+base907c_xlut_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 6)))
+ return ret;
+
+ PUSH_MTHD(push, NV907C, SET_BASE_LUT_LO,
+ NVDEF(NV907C, SET_BASE_LUT_LO, ENABLE, DISABLE));
+
+ PUSH_MTHD(push, NV907C, SET_OUTPUT_LUT_LO,
+ NVDEF(NV907C, SET_OUTPUT_LUT_LO, ENABLE, DISABLE));
+
+ PUSH_MTHD(push, NV907C, SET_CONTEXT_DMA_LUT, 0x00000000);
+ return 0;
+}
+
+static int
+base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 6)))
+ return ret;
+
+ PUSH_MTHD(push, NV907C, SET_BASE_LUT_LO,
+ NVVAL(NV907C, SET_BASE_LUT_LO, ENABLE, asyw->xlut.i.enable) |
+ NVVAL(NV907C, SET_BASE_LUT_LO, MODE, asyw->xlut.i.mode),
+
+ SET_BASE_LUT_HI, asyw->xlut.i.offset >> 8,
+
+ SET_OUTPUT_LUT_LO,
+ NVDEF(NV907C, SET_OUTPUT_LUT_LO, ENABLE, USE_CORE_LUT));
+
+ PUSH_MTHD(push, NV907C, SET_CONTEXT_DMA_LUT, asyw->xlut.handle);
+ return 0;
+}
+
+static void
+base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
+{
+ if (size == 1024)
+ asyw->xlut.i.mode = NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE;
+ else
+ asyw->xlut.i.mode = NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE;
+
+ asyw->xlut.i.enable = NV907C_SET_BASE_LUT_LO_ENABLE_ENABLE;
+ asyw->xlut.i.load = head907d_olut_load;
+}
+
+static inline u32
+csc_drm_to_base(u64 in)
+{
+ /* base takes a 19-bit 2's complement value in S3.16 format */
+ bool sign = in & BIT_ULL(63);
+ u32 integer = (in >> 32) & 0x7fffffff;
+ u32 fraction = in & 0xffffffff;
+
+ if (integer >= 4) {
+ return (1 << 18) - (sign ? 0 : 1);
+ } else {
+ u32 ret = (integer << 16) | (fraction >> 16);
+ if (sign)
+ ret = -ret;
+ return ret & GENMASK(18, 0);
+ }
+}
+
+void
+base907c_csc(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ const struct drm_color_ctm *ctm)
+{
+ int i, j;
+
+ for (j = 0; j < 3; j++) {
+ for (i = 0; i < 4; i++) {
+ u32 *val = &asyw->csc.matrix[j * 4 + i];
+ /* DRM does not support constant offset, while
+ * HW CSC does. Skip it. */
+ if (i == 3) {
+ *val = 0;
+ } else {
+ *val = csc_drm_to_base(ctm->matrix[j * 3 + i]);
+ }
+ }
+ }
+}
+
+static int
+base907c_csc_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907C, SET_CSC_RED2RED,
+ NVDEF(NV907C, SET_CSC_RED2RED, OWNER, CORE));
+ return 0;
+}
+
+static int
+base907c_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 13)))
+ return ret;
+
+ PUSH_MTHD(push, NV907C, SET_CSC_RED2RED,
+ NVDEF(NV907C, SET_CSC_RED2RED, OWNER, BASE) |
+ NVVAL(NV907C, SET_CSC_RED2RED, COEFF, asyw->csc.matrix[0]),
+
+ SET_CSC_GRN2RED, &asyw->csc.matrix[1], 11);
+ return 0;
+}
+
+const struct nv50_wndw_func
+base907c = {
+ .acquire = base507c_acquire,
+ .release = base507c_release,
+ .sema_set = base507c_sema_set,
+ .sema_clr = base507c_sema_clr,
+ .ntfy_reset = base507c_ntfy_reset,
+ .ntfy_set = base507c_ntfy_set,
+ .ntfy_clr = base507c_ntfy_clr,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .ilut = base907c_ilut,
+ .csc = base907c_csc,
+ .csc_set = base907c_csc_set,
+ .csc_clr = base907c_csc_clr,
+ .olut_core = true,
+ .ilut_size = 1024,
+ .xlut_set = base907c_xlut_set,
+ .xlut_clr = base907c_xlut_clr,
+ .image_set = base907c_image_set,
+ .image_clr = base507c_image_clr,
+ .update = base507c_update,
+};
+
+int
+base907c_new(struct nouveau_drm *drm, int head, s32 oclass,
+ struct nv50_wndw **pwndw)
+{
+ return base507c_new_(&base907c, base507c_format, drm, head, oclass,
+ 0x00000002 << (head * 4), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base917c.c b/drivers/gpu/drm/nouveau/dispnv50/base917c.c
new file mode 100644
index 0000000000..ca260509a4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base917c.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "base.h"
+#include "atom.h"
+
+static const u32
+base917c_format[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ABGR16161616F,
+ 0
+};
+
+int
+base917c_new(struct nouveau_drm *drm, int head, s32 oclass,
+ struct nv50_wndw **pwndw)
+{
+ return base507c_new_(&base907c, base917c_format, drm, head, oclass,
+ 0x00000002 << (head * 4), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
new file mode 100644
index 0000000000..abefc23434
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+
+#include <nvif/class.h>
+
+void
+nv50_core_del(struct nv50_core **pcore)
+{
+ struct nv50_core *core = *pcore;
+ if (core) {
+ nv50_dmac_destroy(&core->chan);
+ kfree(*pcore);
+ *pcore = NULL;
+ }
+}
+
+int
+nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
+{
+ struct {
+ s32 oclass;
+ int version;
+ int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
+ } cores[] = {
+ { GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
+ { TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
+ { GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
+ { GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
+ { GP100_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
+ { GM200_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
+ { GM107_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
+ { GK110_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
+ { GK104_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
+ { GF110_DISP_CORE_CHANNEL_DMA, 0, core907d_new },
+ { GT214_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
+ { GT206_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
+ { GT200_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
+ { G82_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
+ { NV50_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+ {}
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ int cid;
+
+ cid = nvif_mclass(&disp->disp->object, cores);
+ if (cid < 0) {
+ NV_ERROR(drm, "No supported core channel class\n");
+ return cid;
+ }
+
+ return cores[cid].new(drm, cores[cid].oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
new file mode 100644
index 0000000000..f75088186f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -0,0 +1,73 @@
+#ifndef __NV50_KMS_CORE_H__
+#define __NV50_KMS_CORE_H__
+#include "disp.h"
+#include "atom.h"
+#include "crc.h"
+#include <nouveau_encoder.h>
+
+struct nv50_core {
+ const struct nv50_core_func *func;
+ struct nv50_dmac chan;
+ bool assign_windows;
+};
+
+int nv50_core_new(struct nouveau_drm *, struct nv50_core **);
+void nv50_core_del(struct nv50_core **);
+
+struct nv50_core_func {
+ int (*init)(struct nv50_core *);
+ void (*ntfy_init)(struct nouveau_bo *, u32 offset);
+ int (*caps_init)(struct nouveau_drm *, struct nv50_disp *);
+ int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
+ struct nvif_device *);
+ int (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
+
+ struct {
+ int (*owner)(struct nv50_core *);
+ } wndw;
+
+ const struct nv50_head_func *head;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ const struct nv50_crc_func *crc;
+#endif
+ const struct nv50_outp_func {
+ int (*ctrl)(struct nv50_core *, int or, u32 ctrl,
+ struct nv50_head_atom *);
+ /* XXX: Only used by SORs and PIORs for now */
+ void (*get_caps)(struct nv50_disp *,
+ struct nouveau_encoder *, int or);
+ } *dac, *pior, *sor;
+};
+
+int core507d_new(struct nouveau_drm *, s32, struct nv50_core **);
+int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32,
+ struct nv50_core **);
+int core507d_init(struct nv50_core *);
+void core507d_ntfy_init(struct nouveau_bo *, u32);
+int core507d_read_caps(struct nv50_disp *disp);
+int core507d_caps_init(struct nouveau_drm *, struct nv50_disp *);
+int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
+int core507d_update(struct nv50_core *, u32 *, bool);
+
+extern const struct nv50_outp_func dac507d;
+extern const struct nv50_outp_func sor507d;
+extern const struct nv50_outp_func pior507d;
+
+int core827d_new(struct nouveau_drm *, s32, struct nv50_core **);
+
+int core907d_new(struct nouveau_drm *, s32, struct nv50_core **);
+int core907d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp);
+extern const struct nv50_outp_func dac907d;
+extern const struct nv50_outp_func sor907d;
+
+int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
+
+int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
+int corec37d_caps_init(struct nouveau_drm *, struct nv50_disp *);
+int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
+int corec37d_update(struct nv50_core *, u32 *, bool);
+int corec37d_wndw_owner(struct nv50_core *);
+extern const struct nv50_outp_func sorc37d;
+
+int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
new file mode 100644
index 0000000000..e5bb5ca950
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+#include <nvif/if0014.h>
+#include <nvif/push507c.h>
+#include <nvif/timer.h>
+
+#include <nvhw/class/cl507d.h>
+
+#include "nouveau_bo.h"
+
+int
+core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
+{
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, (ntfy ? 2 : 0) + 3)))
+ return ret;
+
+ if (ntfy) {
+ PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL,
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
+ NVVAL(NV507D, SET_NOTIFIER_CONTROL, OFFSET, NV50_DISP_CORE_NTFY >> 2) |
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
+ }
+
+ PUSH_MTHD(push, NV507D, UPDATE, interlock[NV50_DISP_INTERLOCK_BASE] |
+ interlock[NV50_DISP_INTERLOCK_OVLY] |
+ NVDEF(NV507D, UPDATE, NOT_DRIVER_FRIENDLY, FALSE) |
+ NVDEF(NV507D, UPDATE, NOT_DRIVER_UNFRIENDLY, FALSE) |
+ NVDEF(NV507D, UPDATE, INHIBIT_INTERRUPTS, FALSE),
+
+ SET_NOTIFIER_CONTROL,
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
+
+ return PUSH_KICK(push);
+}
+
+int
+core507d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
+ struct nvif_device *device)
+{
+ s64 time = nvif_msec(device, 2000ULL,
+ if (NVBO_TD32(bo, offset, NV_DISP_CORE_NOTIFIER_1, COMPLETION_0, DONE, ==, TRUE))
+ break;
+ usleep_range(1, 2);
+ );
+ return time < 0 ? time : 0;
+}
+
+void
+core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
+{
+ NVBO_WR32(bo, offset, NV_DISP_CORE_NOTIFIER_1, COMPLETION_0,
+ NVDEF(NV_DISP_CORE_NOTIFIER_1, COMPLETION_0, DONE, FALSE));
+}
+
+int
+core507d_read_caps(struct nv50_disp *disp)
+{
+ struct nvif_push *push = disp->core->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 6);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL,
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
+ NVVAL(NV507D, SET_NOTIFIER_CONTROL, OFFSET, NV50_DISP_CORE_NTFY >> 2) |
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
+
+ PUSH_MTHD(push, NV507D, GET_CAPABILITIES, 0x00000000);
+
+ PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL,
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
+
+ return PUSH_KICK(push);
+}
+
+int
+core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+ struct nv50_core *core = disp->core;
+ struct nouveau_bo *bo = disp->sync;
+ s64 time;
+ int ret;
+
+ NVBO_WR32(bo, NV50_DISP_CORE_NTFY, NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1,
+ NVDEF(NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, FALSE));
+
+ ret = core507d_read_caps(disp);
+ if (ret < 0)
+ return ret;
+
+ time = nvif_msec(core->chan.base.device, 2000ULL,
+ if (NVBO_TD32(bo, NV50_DISP_CORE_NTFY,
+ NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, ==, TRUE))
+ break;
+ usleep_range(1, 2);
+ );
+ if (time < 0)
+ NV_ERROR(drm, "core caps notifier timeout\n");
+
+ return 0;
+}
+
+int
+core507d_init(struct nv50_core *core)
+{
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, SET_CONTEXT_DMA_NOTIFIER, core->chan.sync.handle);
+ return PUSH_KICK(push);
+}
+
+static const struct nv50_core_func
+core507d = {
+ .init = core507d_init,
+ .ntfy_init = core507d_ntfy_init,
+ .caps_init = core507d_caps_init,
+ .ntfy_wait_done = core507d_ntfy_wait_done,
+ .update = core507d_update,
+ .head = &head507d,
+ .dac = &dac507d,
+ .sor = &sor507d,
+ .pior = &pior507d,
+};
+
+int
+core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
+ s32 oclass, struct nv50_core **pcore)
+{
+ struct nvif_disp_chan_v0 args = {};
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_core *core;
+ int ret;
+
+ if (!(core = *pcore = kzalloc(sizeof(*core), GFP_KERNEL)))
+ return -ENOMEM;
+ core->func = func;
+
+ ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ &oclass, 0, &args, sizeof(args),
+ disp->sync->offset, &core->chan);
+ if (ret) {
+ NV_ERROR(drm, "core%04x allocation failed: %d\n", oclass, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+core507d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+ return core507d_new_(&core507d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core827d.c b/drivers/gpu/drm/nouveau/dispnv50/core827d.c
new file mode 100644
index 0000000000..2e0c1c536a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core827d.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+static const struct nv50_core_func
+core827d = {
+ .init = core507d_init,
+ .ntfy_init = core507d_ntfy_init,
+ .caps_init = core507d_caps_init,
+ .ntfy_wait_done = core507d_ntfy_wait_done,
+ .update = core507d_update,
+ .head = &head827d,
+ .dac = &dac507d,
+ .sor = &sor507d,
+ .pior = &pior507d,
+};
+
+int
+core827d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+ return core507d_new_(&core827d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core907d.c b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
new file mode 100644
index 0000000000..8564d4dffa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+#include <nvif/push507c.h>
+#include <nvif/timer.h>
+
+#include <nvhw/class/cl907d.h>
+
+#include "nouveau_bo.h"
+
+int
+core907d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+ struct nv50_core *core = disp->core;
+ struct nouveau_bo *bo = disp->sync;
+ s64 time;
+ int ret;
+
+ NVBO_WR32(bo, NV50_DISP_CORE_NTFY, NV907D_CORE_NOTIFIER_3, CAPABILITIES_4,
+ NVDEF(NV907D_CORE_NOTIFIER_3, CAPABILITIES_4, DONE, FALSE));
+
+ ret = core507d_read_caps(disp);
+ if (ret < 0)
+ return ret;
+
+ time = nvif_msec(core->chan.base.device, 2000ULL,
+ if (NVBO_TD32(bo, NV50_DISP_CORE_NTFY,
+ NV907D_CORE_NOTIFIER_3, CAPABILITIES_4, DONE, ==, TRUE))
+ break;
+ usleep_range(1, 2);
+ );
+ if (time < 0)
+ NV_ERROR(drm, "core caps notifier timeout\n");
+
+ return 0;
+}
+
+static const struct nv50_core_func
+core907d = {
+ .init = core507d_init,
+ .ntfy_init = core507d_ntfy_init,
+ .caps_init = core907d_caps_init,
+ .ntfy_wait_done = core507d_ntfy_wait_done,
+ .update = core507d_update,
+ .head = &head907d,
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ .crc = &crc907d,
+#endif
+ .dac = &dac907d,
+ .sor = &sor907d,
+};
+
+int
+core907d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+ return core507d_new_(&core907d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core917d.c b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
new file mode 100644
index 0000000000..1cd3a2a35d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+static const struct nv50_core_func
+core917d = {
+ .init = core507d_init,
+ .ntfy_init = core507d_ntfy_init,
+ .caps_init = core907d_caps_init,
+ .ntfy_wait_done = core507d_ntfy_wait_done,
+ .update = core507d_update,
+ .head = &head917d,
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ .crc = &crc907d,
+#endif
+ .dac = &dac907d,
+ .sor = &sor907d,
+};
+
+int
+core917d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+ return core507d_new_(&core917d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
new file mode 100644
index 0000000000..42f877f2ce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+#include <nvif/class.h>
+#include <nvif/pushc37b.h>
+#include <nvif/timer.h>
+
+#include <nvhw/class/clc37d.h>
+
+#include <nouveau_bo.h>
+
+int
+corec37d_wndw_owner(struct nv50_core *core)
+{
+ struct nvif_push *push = core->chan.push;
+ const u32 windows = 8; /*XXX*/
+ int ret, i;
+
+ if ((ret = PUSH_WAIT(push, windows * 2)))
+ return ret;
+
+ for (i = 0; i < windows; i++) {
+ PUSH_MTHD(push, NVC37D, WINDOW_SET_CONTROL(i),
+ NVDEF(NVC37D, WINDOW_SET_CONTROL, OWNER, HEAD(i >> 1)));
+ }
+
+ return 0;
+}
+
+int
+corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
+{
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, (ntfy ? 2 * 2 : 0) + 5)))
+ return ret;
+
+ if (ntfy) {
+ PUSH_MTHD(push, NVC37D, SET_NOTIFIER_CONTROL,
+ NVDEF(NVC37D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
+ NVVAL(NVC37D, SET_NOTIFIER_CONTROL, OFFSET, NV50_DISP_CORE_NTFY >> 4) |
+ NVDEF(NVC37D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
+ }
+
+ PUSH_MTHD(push, NVC37D, SET_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_CURS],
+ SET_WINDOW_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_WNDW]);
+ PUSH_MTHD(push, NVC37D, UPDATE, 0x00000001 |
+ NVDEF(NVC37D, UPDATE, SPECIAL_HANDLING, NONE) |
+ NVDEF(NVC37D, UPDATE, INHIBIT_INTERRUPTS, FALSE));
+
+ if (ntfy) {
+ PUSH_MTHD(push, NVC37D, SET_NOTIFIER_CONTROL,
+ NVDEF(NVC37D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
+ }
+
+ return PUSH_KICK(push);
+}
+
+int
+corec37d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
+ struct nvif_device *device)
+{
+ s64 time = nvif_msec(device, 2000ULL,
+ if (NVBO_TD32(bo, offset, NV_DISP_NOTIFIER, _0, STATUS, ==, FINISHED))
+ break;
+ usleep_range(1, 2);
+ );
+ return time < 0 ? time : 0;
+}
+
+void
+corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
+{
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _0,
+ NVDEF(NV_DISP_NOTIFIER, _0, STATUS, NOT_BEGUN));
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _1, 0);
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _2, 0);
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _3, 0);
+}
+
+int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+ int ret;
+
+ ret = nvif_object_ctor(&disp->disp->object, "dispCaps", 0,
+ GV100_DISP_CAPS, NULL, 0, &disp->caps);
+ if (ret) {
+ NV_ERROR(drm,
+ "Failed to init notifier caps region: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = nvif_object_map(&disp->caps, NULL, 0);
+ if (ret) {
+ NV_ERROR(drm,
+ "Failed to map notifier caps region: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+corec37d_init(struct nv50_core *core)
+{
+ struct nvif_push *push = core->chan.push;
+ const u32 windows = 8; /*XXX*/
+ int ret, i;
+
+ if ((ret = PUSH_WAIT(push, 2 + windows * 5)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, SET_CONTEXT_DMA_NOTIFIER, core->chan.sync.handle);
+
+ for (i = 0; i < windows; i++) {
+ PUSH_MTHD(push, NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(i),
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED1BPP, TRUE) |
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED2BPP, TRUE) |
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED4BPP, TRUE) |
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED8BPP, TRUE) |
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, YUV_PACKED422, TRUE),
+
+ WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(i), 0x00000000);
+
+ PUSH_MTHD(push, NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS(i),
+ NVVAL(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, MAX_PIXELS_FETCHED_PER_LINE, 0x7fff) |
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_LUT, USAGE_1025) |
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_SCALER_TAPS, TAPS_2) |
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, UPSCALING_ALLOWED, FALSE));
+ }
+
+ core->assign_windows = true;
+ return PUSH_KICK(push);
+}
+
+static const struct nv50_core_func
+corec37d = {
+ .init = corec37d_init,
+ .ntfy_init = corec37d_ntfy_init,
+ .caps_init = corec37d_caps_init,
+ .ntfy_wait_done = corec37d_ntfy_wait_done,
+ .update = corec37d_update,
+ .wndw.owner = corec37d_wndw_owner,
+ .head = &headc37d,
+ .sor = &sorc37d,
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ .crc = &crcc37d,
+#endif
+};
+
+int
+corec37d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+ return core507d_new_(&corec37d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
new file mode 100644
index 0000000000..53b1e2a569
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57d.h>
+
+static int
+corec57d_init(struct nv50_core *core)
+{
+ struct nvif_push *push = core->chan.push;
+ const u32 windows = 8; /*XXX*/
+ int ret, i;
+
+ if ((ret = PUSH_WAIT(push, 2 + windows * 5)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57D, SET_CONTEXT_DMA_NOTIFIER, core->chan.sync.handle);
+
+ for (i = 0; i < windows; i++) {
+ PUSH_MTHD(push, NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(i),
+ NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED1BPP, TRUE) |
+ NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED2BPP, TRUE) |
+ NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED4BPP, TRUE) |
+ NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED8BPP, TRUE),
+
+ WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(i), 0x00000000);
+
+ PUSH_MTHD(push, NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS(i),
+ NVVAL(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, MAX_PIXELS_FETCHED_PER_LINE, 0x7fff) |
+ NVDEF(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, ILUT_ALLOWED, TRUE) |
+ NVDEF(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_SCALER_TAPS, TAPS_2) |
+ NVDEF(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, UPSCALING_ALLOWED, FALSE));
+ }
+
+ core->assign_windows = true;
+ return PUSH_KICK(push);
+}
+
+static const struct nv50_core_func
+corec57d = {
+ .init = corec57d_init,
+ .ntfy_init = corec37d_ntfy_init,
+ .caps_init = corec37d_caps_init,
+ .ntfy_wait_done = corec37d_ntfy_wait_done,
+ .update = corec37d_update,
+ .wndw.owner = corec37d_wndw_owner,
+ .head = &headc57d,
+ .sor = &sorc37d,
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ .crc = &crcc57d,
+#endif
+};
+
+int
+corec57d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+ return core507d_new_(&corec57d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c
new file mode 100644
index 0000000000..9c942fbd83
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
@@ -0,0 +1,745 @@
+// SPDX-License-Identifier: MIT
+#include <linux/string.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_work.h>
+
+#include <nvif/class.h>
+#include <nvif/cl0002.h>
+#include <nvif/timer.h>
+
+#include <nvhw/class/cl907d.h>
+
+#include "nouveau_drv.h"
+#include "core.h"
+#include "head.h"
+#include "wndw.h"
+#include "handles.h"
+#include "crc.h"
+
+static const char * const nv50_crc_sources[] = {
+ [NV50_CRC_SOURCE_NONE] = "none",
+ [NV50_CRC_SOURCE_AUTO] = "auto",
+ [NV50_CRC_SOURCE_RG] = "rg",
+ [NV50_CRC_SOURCE_OUTP_ACTIVE] = "outp-active",
+ [NV50_CRC_SOURCE_OUTP_COMPLETE] = "outp-complete",
+ [NV50_CRC_SOURCE_OUTP_INACTIVE] = "outp-inactive",
+};
+
+static int nv50_crc_parse_source(const char *buf, enum nv50_crc_source *s)
+{
+ int i;
+
+ if (!buf) {
+ *s = NV50_CRC_SOURCE_NONE;
+ return 0;
+ }
+
+ i = match_string(nv50_crc_sources, ARRAY_SIZE(nv50_crc_sources), buf);
+ if (i < 0)
+ return i;
+
+ *s = i;
+ return 0;
+}
+
+int
+nv50_crc_verify_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt)
+{
+ struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+ enum nv50_crc_source source;
+
+ if (nv50_crc_parse_source(source_name, &source) < 0) {
+ NV_DEBUG(drm, "unknown source %s\n", source_name);
+ return -EINVAL;
+ }
+
+ *values_cnt = 1;
+ return 0;
+}
+
+const char *const *nv50_crc_get_sources(struct drm_crtc *crtc, size_t *count)
+{
+ *count = ARRAY_SIZE(nv50_crc_sources);
+ return nv50_crc_sources;
+}
+
+static void
+nv50_crc_program_ctx(struct nv50_head *head,
+ struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nv50_disp *disp = nv50_disp(head->base.base.dev);
+ struct nv50_core *core = disp->core;
+ u32 interlock[NV50_DISP_INTERLOCK__SIZE] = { 0 };
+
+ core->func->crc->set_ctx(head, ctx);
+ core->func->update(core, interlock, false);
+}
+
+static void nv50_crc_ctx_flip_work(struct kthread_work *base)
+{
+ struct drm_vblank_work *work = to_drm_vblank_work(base);
+ struct nv50_crc *crc = container_of(work, struct nv50_crc, flip_work);
+ struct nv50_head *head = container_of(crc, struct nv50_head, crc);
+ struct drm_crtc *crtc = &head->base.base;
+ struct drm_device *dev = crtc->dev;
+ struct nv50_disp *disp = nv50_disp(dev);
+ const uint64_t start_vbl = drm_crtc_vblank_count(crtc);
+ uint64_t end_vbl;
+ u8 new_idx = crc->ctx_idx ^ 1;
+
+ /*
+ * We don't want to accidentally wait for longer then the vblank, so
+ * try again for the next vblank if we don't grab the lock
+ */
+ if (!mutex_trylock(&disp->mutex)) {
+ drm_dbg_kms(dev, "Lock contended, delaying CRC ctx flip for %s\n", crtc->name);
+ drm_vblank_work_schedule(work, start_vbl + 1, true);
+ return;
+ }
+
+ drm_dbg_kms(dev, "Flipping notifier ctx for %s (%d -> %d)\n",
+ crtc->name, crc->ctx_idx, new_idx);
+
+ nv50_crc_program_ctx(head, NULL);
+ nv50_crc_program_ctx(head, &crc->ctx[new_idx]);
+ mutex_unlock(&disp->mutex);
+
+ end_vbl = drm_crtc_vblank_count(crtc);
+ if (unlikely(end_vbl != start_vbl))
+ NV_ERROR(nouveau_drm(dev),
+ "Failed to flip CRC context on %s on time (%llu > %llu)\n",
+ crtc->name, end_vbl, start_vbl);
+
+ spin_lock_irq(&crc->lock);
+ crc->ctx_changed = true;
+ spin_unlock_irq(&crc->lock);
+}
+
+static inline void nv50_crc_reset_ctx(struct nv50_crc_notifier_ctx *ctx)
+{
+ memset_io(ctx->mem.object.map.ptr, 0, ctx->mem.object.map.size);
+}
+
+static void
+nv50_crc_get_entries(struct nv50_head *head,
+ const struct nv50_crc_func *func,
+ enum nv50_crc_source source)
+{
+ struct drm_crtc *crtc = &head->base.base;
+ struct nv50_crc *crc = &head->crc;
+ u32 output_crc;
+
+ while (crc->entry_idx < func->num_entries) {
+ /*
+ * While Nvidia's documentation says CRCs are written on each
+ * subsequent vblank after being enabled, in practice they
+ * aren't written immediately.
+ */
+ output_crc = func->get_entry(head, &crc->ctx[crc->ctx_idx],
+ source, crc->entry_idx);
+ if (!output_crc)
+ return;
+
+ drm_crtc_add_crc_entry(crtc, true, crc->frame, &output_crc);
+ crc->frame++;
+ crc->entry_idx++;
+ }
+}
+
+void nv50_crc_handle_vblank(struct nv50_head *head)
+{
+ struct drm_crtc *crtc = &head->base.base;
+ struct nv50_crc *crc = &head->crc;
+ const struct nv50_crc_func *func =
+ nv50_disp(head->base.base.dev)->core->func->crc;
+ struct nv50_crc_notifier_ctx *ctx;
+ bool need_reschedule = false;
+
+ if (!func)
+ return;
+
+ /*
+ * We don't lose events if we aren't able to report CRCs until the
+ * next vblank, so only report CRCs if the locks we need aren't
+ * contended to prevent missing an actual vblank event
+ */
+ if (!spin_trylock(&crc->lock))
+ return;
+
+ if (!crc->src)
+ goto out;
+
+ ctx = &crc->ctx[crc->ctx_idx];
+ if (crc->ctx_changed && func->ctx_finished(head, ctx)) {
+ nv50_crc_get_entries(head, func, crc->src);
+
+ crc->ctx_idx ^= 1;
+ crc->entry_idx = 0;
+ crc->ctx_changed = false;
+
+ /*
+ * Unfortunately when notifier contexts are changed during CRC
+ * capture, we will inevitably lose the CRC entry for the
+ * frame where the hardware actually latched onto the first
+ * UPDATE. According to Nvidia's hardware engineers, there's
+ * no workaround for this.
+ *
+ * Now, we could try to be smart here and calculate the number
+ * of missed CRCs based on audit timestamps, but those were
+ * removed starting with volta. Since we always flush our
+ * updates back-to-back without waiting, we'll just be
+ * optimistic and assume we always miss exactly one frame.
+ */
+ drm_dbg_kms(head->base.base.dev,
+ "Notifier ctx flip for head-%d finished, lost CRC for frame %llu\n",
+ head->base.index, crc->frame);
+ crc->frame++;
+
+ nv50_crc_reset_ctx(ctx);
+ need_reschedule = true;
+ }
+
+ nv50_crc_get_entries(head, func, crc->src);
+
+ if (need_reschedule)
+ drm_vblank_work_schedule(&crc->flip_work,
+ drm_crtc_vblank_count(crtc)
+ + crc->flip_threshold
+ - crc->entry_idx,
+ true);
+
+out:
+ spin_unlock(&crc->lock);
+}
+
+static void nv50_crc_wait_ctx_finished(struct nv50_head *head,
+ const struct nv50_crc_func *func,
+ struct nv50_crc_notifier_ctx *ctx)
+{
+ struct drm_device *dev = head->base.base.dev;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ s64 ret;
+
+ ret = nvif_msec(&drm->client.device, 50,
+ if (func->ctx_finished(head, ctx)) break;);
+ if (ret == -ETIMEDOUT)
+ NV_ERROR(drm,
+ "CRC notifier ctx for head %d not finished after 50ms\n",
+ head->base.index);
+ else if (ret)
+ NV_ATOMIC(drm,
+ "CRC notifier ctx for head-%d finished after %lldns\n",
+ head->base.index, ret);
+}
+
+void nv50_crc_atomic_stop_reporting(struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct nv50_head *head = nv50_head(crtc);
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
+ struct nv50_crc *crc = &head->crc;
+
+ if (!asyh->clr.crc)
+ continue;
+
+ spin_lock_irq(&crc->lock);
+ crc->src = NV50_CRC_SOURCE_NONE;
+ spin_unlock_irq(&crc->lock);
+
+ drm_crtc_vblank_put(crtc);
+ drm_vblank_work_cancel_sync(&crc->flip_work);
+
+ NV_ATOMIC(nouveau_drm(crtc->dev),
+ "CRC reporting on vblank for head-%d disabled\n",
+ head->base.index);
+
+ /* CRC generation is still enabled in hw, we'll just report
+ * any remaining CRC entries ourselves after it gets disabled
+ * in hardware
+ */
+ }
+}
+
+void nv50_crc_atomic_init_notifier_contexts(struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct nv50_head *head = nv50_head(crtc);
+ struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
+ struct nv50_crc *crc = &head->crc;
+ int i;
+
+ if (!asyh->set.crc)
+ continue;
+
+ crc->entry_idx = 0;
+ crc->ctx_changed = false;
+ for (i = 0; i < ARRAY_SIZE(crc->ctx); i++)
+ nv50_crc_reset_ctx(&crc->ctx[i]);
+ }
+}
+
+void nv50_crc_atomic_release_notifier_contexts(struct drm_atomic_state *state)
+{
+ const struct nv50_crc_func *func =
+ nv50_disp(state->dev)->core->func->crc;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct nv50_head *head = nv50_head(crtc);
+ struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
+ struct nv50_crc *crc = &head->crc;
+ struct nv50_crc_notifier_ctx *ctx = &crc->ctx[crc->ctx_idx];
+
+ if (!asyh->clr.crc)
+ continue;
+
+ if (crc->ctx_changed) {
+ nv50_crc_wait_ctx_finished(head, func, ctx);
+ ctx = &crc->ctx[crc->ctx_idx ^ 1];
+ }
+ nv50_crc_wait_ctx_finished(head, func, ctx);
+ }
+}
+
+void nv50_crc_atomic_start_reporting(struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct nv50_head *head = nv50_head(crtc);
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
+ struct nv50_crc *crc = &head->crc;
+ u64 vbl_count;
+
+ if (!asyh->set.crc)
+ continue;
+
+ drm_crtc_vblank_get(crtc);
+
+ spin_lock_irq(&crc->lock);
+ vbl_count = drm_crtc_vblank_count(crtc);
+ crc->frame = vbl_count;
+ crc->src = asyh->crc.src;
+ drm_vblank_work_schedule(&crc->flip_work,
+ vbl_count + crc->flip_threshold,
+ true);
+ spin_unlock_irq(&crc->lock);
+
+ NV_ATOMIC(nouveau_drm(crtc->dev),
+ "CRC reporting on vblank for head-%d enabled\n",
+ head->base.index);
+ }
+}
+
+int nv50_crc_atomic_check_head(struct nv50_head *head,
+ struct nv50_head_atom *asyh,
+ struct nv50_head_atom *armh)
+{
+ struct nv50_atom *atom = nv50_atom(asyh->state.state);
+ bool changed = armh->crc.src != asyh->crc.src;
+
+ if (!armh->crc.src && !asyh->crc.src) {
+ asyh->set.crc = false;
+ asyh->clr.crc = false;
+ return 0;
+ }
+
+ if (drm_atomic_crtc_needs_modeset(&asyh->state) || changed) {
+ asyh->clr.crc = armh->crc.src && armh->state.active;
+ asyh->set.crc = asyh->crc.src && asyh->state.active;
+ if (changed)
+ asyh->set.or |= armh->or.crc_raster !=
+ asyh->or.crc_raster;
+
+ if (asyh->clr.crc && asyh->set.crc)
+ atom->flush_disable = true;
+ } else {
+ asyh->set.crc = false;
+ asyh->clr.crc = false;
+ }
+
+ return 0;
+}
+
+void nv50_crc_atomic_check_outp(struct nv50_atom *atom)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+
+ if (atom->flush_disable)
+ return;
+
+ for_each_oldnew_crtc_in_state(&atom->state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state);
+ struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
+ struct nv50_outp_atom *outp_atom;
+ struct nouveau_encoder *outp;
+ struct drm_encoder *encoder, *enc;
+
+ enc = nv50_head_atom_get_encoder(armh);
+ if (!enc)
+ continue;
+
+ outp = nv50_real_outp(enc);
+ if (!outp)
+ continue;
+
+ encoder = &outp->base.base;
+
+ if (!asyh->clr.crc)
+ continue;
+
+ /*
+ * Re-programming ORs can't be done in the same flush as
+ * disabling CRCs
+ */
+ list_for_each_entry(outp_atom, &atom->outp, head) {
+ if (outp_atom->encoder == encoder) {
+ if (outp_atom->set.mask) {
+ atom->flush_disable = true;
+ return;
+ } else {
+ break;
+ }
+ }
+ }
+ }
+}
+
+static enum nv50_crc_source_type
+nv50_crc_source_type(struct nouveau_encoder *outp,
+ enum nv50_crc_source source)
+{
+ struct dcb_output *dcbe = outp->dcb;
+
+ switch (source) {
+ case NV50_CRC_SOURCE_NONE: return NV50_CRC_SOURCE_TYPE_NONE;
+ case NV50_CRC_SOURCE_RG: return NV50_CRC_SOURCE_TYPE_RG;
+ default: break;
+ }
+
+ if (dcbe->location != DCB_LOC_ON_CHIP)
+ return NV50_CRC_SOURCE_TYPE_PIOR;
+
+ switch (dcbe->type) {
+ case DCB_OUTPUT_DP: return NV50_CRC_SOURCE_TYPE_SF;
+ case DCB_OUTPUT_ANALOG: return NV50_CRC_SOURCE_TYPE_DAC;
+ default: return NV50_CRC_SOURCE_TYPE_SOR;
+ }
+}
+
+void nv50_crc_atomic_set(struct nv50_head *head,
+ struct nv50_head_atom *asyh)
+{
+ struct drm_crtc *crtc = &head->base.base;
+ struct drm_device *dev = crtc->dev;
+ struct nv50_crc *crc = &head->crc;
+ const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc;
+ struct nouveau_encoder *outp;
+ struct drm_encoder *encoder;
+
+ encoder = nv50_head_atom_get_encoder(asyh);
+ if (!encoder)
+ return;
+
+ outp = nv50_real_outp(encoder);
+ if (!outp)
+ return;
+
+ func->set_src(head, outp->outp.or.id, nv50_crc_source_type(outp, asyh->crc.src),
+ &crc->ctx[crc->ctx_idx]);
+}
+
+void nv50_crc_atomic_clr(struct nv50_head *head)
+{
+ const struct nv50_crc_func *func =
+ nv50_disp(head->base.base.dev)->core->func->crc;
+
+ func->set_src(head, 0, NV50_CRC_SOURCE_TYPE_NONE, NULL);
+}
+
+static inline int
+nv50_crc_raster_type(enum nv50_crc_source source)
+{
+ switch (source) {
+ case NV50_CRC_SOURCE_NONE:
+ case NV50_CRC_SOURCE_AUTO:
+ case NV50_CRC_SOURCE_RG:
+ case NV50_CRC_SOURCE_OUTP_ACTIVE:
+ return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER;
+ case NV50_CRC_SOURCE_OUTP_COMPLETE:
+ return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER;
+ case NV50_CRC_SOURCE_OUTP_INACTIVE:
+ return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER;
+ }
+
+ return 0;
+}
+
+/* We handle mapping the memory for CRC notifiers ourselves, since each
+ * notifier needs it's own handle
+ */
+static inline int
+nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu,
+ struct nv50_crc_notifier_ctx *ctx, size_t len, int idx)
+{
+ struct nv50_core *core = nv50_disp(head->base.base.dev)->core;
+ int ret;
+
+ ret = nvif_mem_ctor_map(mmu, "kmsCrcNtfy", NVIF_MEM_VRAM, len, &ctx->mem);
+ if (ret)
+ return ret;
+
+ ret = nvif_object_ctor(&core->chan.base.user, "kmsCrcNtfyCtxDma",
+ NV50_DISP_HANDLE_CRC_CTX(head, idx),
+ NV_DMA_IN_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
+ .start = ctx->mem.addr,
+ .limit = ctx->mem.addr
+ + ctx->mem.size - 1,
+ }, sizeof(struct nv_dma_v0),
+ &ctx->ntfy);
+ if (ret)
+ goto fail_fini;
+
+ return 0;
+
+fail_fini:
+ nvif_mem_dtor(&ctx->mem);
+ return ret;
+}
+
+static inline void
+nv50_crc_ctx_fini(struct nv50_crc_notifier_ctx *ctx)
+{
+ nvif_object_dtor(&ctx->ntfy);
+ nvif_mem_dtor(&ctx->mem);
+}
+
+int nv50_crc_set_source(struct drm_crtc *crtc, const char *source_str)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct nv50_head *head = nv50_head(crtc);
+ struct nv50_crc *crc = &head->crc;
+ const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc;
+ struct nvif_mmu *mmu = &nouveau_drm(dev)->client.mmu;
+ struct nv50_head_atom *asyh;
+ struct drm_crtc_state *crtc_state;
+ enum nv50_crc_source source;
+ int ret = 0, ctx_flags = 0, i;
+
+ ret = nv50_crc_parse_source(source_str, &source);
+ if (ret)
+ return ret;
+
+ /*
+ * Since we don't want the user to accidentally interrupt us as we're
+ * disabling CRCs
+ */
+ if (source)
+ ctx_flags |= DRM_MODESET_ACQUIRE_INTERRUPTIBLE;
+ drm_modeset_acquire_init(&ctx, ctx_flags);
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out_acquire_fini;
+ }
+ state->acquire_ctx = &ctx;
+
+ if (source) {
+ for (i = 0; i < ARRAY_SIZE(head->crc.ctx); i++) {
+ ret = nv50_crc_ctx_init(head, mmu, &crc->ctx[i],
+ func->notifier_len, i);
+ if (ret)
+ goto out_ctx_fini;
+ }
+ }
+
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, &head->base.base);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ if (ret == -EDEADLK)
+ goto deadlock;
+ else if (ret)
+ goto out_drop_locks;
+ }
+ asyh = nv50_head_atom(crtc_state);
+ asyh->crc.src = source;
+ asyh->or.crc_raster = nv50_crc_raster_type(source);
+
+ ret = drm_atomic_commit(state);
+ if (ret == -EDEADLK)
+ goto deadlock;
+ else if (ret)
+ goto out_drop_locks;
+
+ if (!source) {
+ /*
+ * If the user specified a custom flip threshold through
+ * debugfs, reset it
+ */
+ crc->flip_threshold = func->flip_threshold;
+ }
+
+out_drop_locks:
+ drm_modeset_drop_locks(&ctx);
+out_ctx_fini:
+ if (!source || ret) {
+ for (i = 0; i < ARRAY_SIZE(crc->ctx); i++)
+ nv50_crc_ctx_fini(&crc->ctx[i]);
+ }
+ drm_atomic_state_put(state);
+out_acquire_fini:
+ drm_modeset_acquire_fini(&ctx);
+ return ret;
+
+deadlock:
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+}
+
+static int
+nv50_crc_debugfs_flip_threshold_get(struct seq_file *m, void *data)
+{
+ struct nv50_head *head = m->private;
+ struct drm_crtc *crtc = &head->base.base;
+ struct nv50_crc *crc = &head->crc;
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "%d\n", crc->flip_threshold);
+
+ drm_modeset_unlock(&crtc->mutex);
+ return ret;
+}
+
+static int
+nv50_crc_debugfs_flip_threshold_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nv50_crc_debugfs_flip_threshold_get,
+ inode->i_private);
+}
+
+static ssize_t
+nv50_crc_debugfs_flip_threshold_set(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct nv50_head *head = m->private;
+ struct nv50_head_atom *armh;
+ struct drm_crtc *crtc = &head->base.base;
+ struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+ struct nv50_crc *crc = &head->crc;
+ const struct nv50_crc_func *func =
+ nv50_disp(crtc->dev)->core->func->crc;
+ int value, ret;
+
+ ret = kstrtoint_from_user(ubuf, len, 10, &value);
+ if (ret)
+ return ret;
+
+ if (value > func->flip_threshold)
+ return -EINVAL;
+ else if (value == -1)
+ value = func->flip_threshold;
+ else if (value < -1)
+ return -EINVAL;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
+ if (ret)
+ return ret;
+
+ armh = nv50_head_atom(crtc->state);
+ if (armh->crc.src) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ NV_DEBUG(drm,
+ "Changing CRC flip threshold for next capture on head-%d to %d\n",
+ head->base.index, value);
+ crc->flip_threshold = value;
+ ret = len;
+
+out:
+ drm_modeset_unlock(&crtc->mutex);
+ return ret;
+}
+
+static const struct file_operations nv50_crc_flip_threshold_fops = {
+ .owner = THIS_MODULE,
+ .open = nv50_crc_debugfs_flip_threshold_open,
+ .read = seq_read,
+ .write = nv50_crc_debugfs_flip_threshold_set,
+ .release = single_release,
+};
+
+int nv50_head_crc_late_register(struct nv50_head *head)
+{
+ struct drm_crtc *crtc = &head->base.base;
+ const struct nv50_crc_func *func =
+ nv50_disp(crtc->dev)->core->func->crc;
+ struct dentry *root;
+
+ if (!func || !crtc->debugfs_entry)
+ return 0;
+
+ root = debugfs_create_dir("nv_crc", crtc->debugfs_entry);
+ debugfs_create_file("flip_threshold", 0644, root, head,
+ &nv50_crc_flip_threshold_fops);
+
+ return 0;
+}
+
+static inline void
+nv50_crc_init_head(struct nv50_disp *disp, const struct nv50_crc_func *func,
+ struct nv50_head *head)
+{
+ struct nv50_crc *crc = &head->crc;
+
+ crc->flip_threshold = func->flip_threshold;
+ spin_lock_init(&crc->lock);
+ drm_vblank_work_init(&crc->flip_work, &head->base.base,
+ nv50_crc_ctx_flip_work);
+}
+
+void nv50_crc_init(struct drm_device *dev)
+{
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_crtc *crtc;
+ const struct nv50_crc_func *func = disp->core->func->crc;
+
+ if (!func)
+ return;
+
+ drm_for_each_crtc(crtc, dev)
+ nv50_crc_init_head(disp, func, nv50_head(crtc));
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.h b/drivers/gpu/drm/nouveau/dispnv50/crc.h
new file mode 100644
index 0000000000..4823f1fde2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NV50_CRC_H__
+#define __NV50_CRC_H__
+
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_vblank_work.h>
+
+#include <nvif/mem.h>
+#include <nvkm/subdev/bios.h>
+#include "nouveau_encoder.h"
+
+struct nv50_atom;
+struct nv50_disp;
+struct nv50_head;
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+enum nv50_crc_source {
+ NV50_CRC_SOURCE_NONE = 0,
+ NV50_CRC_SOURCE_AUTO,
+ NV50_CRC_SOURCE_RG,
+ NV50_CRC_SOURCE_OUTP_ACTIVE,
+ NV50_CRC_SOURCE_OUTP_COMPLETE,
+ NV50_CRC_SOURCE_OUTP_INACTIVE,
+};
+
+/* RG -> SF (DP only)
+ * -> SOR
+ * -> PIOR
+ * -> DAC
+ */
+enum nv50_crc_source_type {
+ NV50_CRC_SOURCE_TYPE_NONE = 0,
+ NV50_CRC_SOURCE_TYPE_SOR,
+ NV50_CRC_SOURCE_TYPE_PIOR,
+ NV50_CRC_SOURCE_TYPE_DAC,
+ NV50_CRC_SOURCE_TYPE_RG,
+ NV50_CRC_SOURCE_TYPE_SF,
+};
+
+struct nv50_crc_notifier_ctx {
+ struct nvif_mem mem;
+ struct nvif_object ntfy;
+};
+
+struct nv50_crc_atom {
+ enum nv50_crc_source src;
+};
+
+struct nv50_crc_func {
+ int (*set_src)(struct nv50_head *, int or, enum nv50_crc_source_type type,
+ struct nv50_crc_notifier_ctx *ctx);
+ int (*set_ctx)(struct nv50_head *, struct nv50_crc_notifier_ctx *);
+ u32 (*get_entry)(struct nv50_head *, struct nv50_crc_notifier_ctx *,
+ enum nv50_crc_source, int idx);
+ bool (*ctx_finished)(struct nv50_head *,
+ struct nv50_crc_notifier_ctx *);
+ short flip_threshold;
+ short num_entries;
+ size_t notifier_len;
+};
+
+struct nv50_crc {
+ spinlock_t lock;
+ struct nv50_crc_notifier_ctx ctx[2];
+ struct drm_vblank_work flip_work;
+ enum nv50_crc_source src;
+
+ u64 frame;
+ short entry_idx;
+ short flip_threshold;
+ u8 ctx_idx : 1;
+ bool ctx_changed : 1;
+};
+
+void nv50_crc_init(struct drm_device *dev);
+int nv50_head_crc_late_register(struct nv50_head *);
+void nv50_crc_handle_vblank(struct nv50_head *head);
+
+int nv50_crc_verify_source(struct drm_crtc *, const char *, size_t *);
+const char *const *nv50_crc_get_sources(struct drm_crtc *, size_t *);
+int nv50_crc_set_source(struct drm_crtc *, const char *);
+
+int nv50_crc_atomic_check_head(struct nv50_head *, struct nv50_head_atom *,
+ struct nv50_head_atom *);
+void nv50_crc_atomic_check_outp(struct nv50_atom *atom);
+void nv50_crc_atomic_stop_reporting(struct drm_atomic_state *);
+void nv50_crc_atomic_init_notifier_contexts(struct drm_atomic_state *);
+void nv50_crc_atomic_release_notifier_contexts(struct drm_atomic_state *);
+void nv50_crc_atomic_start_reporting(struct drm_atomic_state *);
+void nv50_crc_atomic_set(struct nv50_head *, struct nv50_head_atom *);
+void nv50_crc_atomic_clr(struct nv50_head *);
+
+extern const struct nv50_crc_func crc907d;
+extern const struct nv50_crc_func crcc37d;
+extern const struct nv50_crc_func crcc57d;
+
+#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
+struct nv50_crc {};
+struct nv50_crc_func {};
+struct nv50_crc_atom {};
+
+#define nv50_crc_verify_source NULL
+#define nv50_crc_get_sources NULL
+#define nv50_crc_set_source NULL
+
+static inline void nv50_crc_init(struct drm_device *dev) {}
+static inline int
+nv50_head_crc_late_register(struct nv50_head *head) { return 0; }
+static inline void nv50_crc_handle_vblank(struct nv50_head *head) {}
+
+static inline int
+nv50_crc_atomic_check_head(struct nv50_head *head,
+ struct nv50_head_atom *asyh,
+ struct nv50_head_atom *armh) { return 0; }
+static inline void nv50_crc_atomic_check_outp(struct nv50_atom *atom) {}
+static inline void
+nv50_crc_atomic_stop_reporting(struct drm_atomic_state *state) {}
+static inline void
+nv50_crc_atomic_init_notifier_contexts(struct drm_atomic_state *state) {}
+static inline void
+nv50_crc_atomic_release_notifier_contexts(struct drm_atomic_state *state) {}
+static inline void
+nv50_crc_atomic_start_reporting(struct drm_atomic_state *state) {}
+static inline void
+nv50_crc_atomic_set(struct nv50_head *head, struct nv50_head_atom *state) {}
+static inline void
+nv50_crc_atomic_clr(struct nv50_head *head) {}
+
+#endif /* IS_ENABLED(CONFIG_DEBUG_FS) */
+#endif /* !__NV50_CRC_H__ */
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
new file mode 100644
index 0000000000..f9ad641555
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: MIT
+#include <drm/drm_crtc.h>
+
+#include "crc.h"
+#include "core.h"
+#include "disp.h"
+#include "head.h"
+
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907d.h>
+
+#define CRC907D_MAX_ENTRIES 255
+
+struct crc907d_notifier {
+ u32 status;
+ u32 :32; /* reserved */
+ struct crc907d_entry {
+ u32 status;
+ u32 compositor_crc;
+ u32 output_crc[2];
+ } entries[CRC907D_MAX_ENTRIES];
+} __packed;
+
+static int
+crc907d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
+ struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ u32 crc_args = NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, TIMESTAMP_MODE, FALSE) |
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, SECONDARY_OUTPUT, NONE) |
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE) |
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, WIDE_PIPE_CRC, ENABLE);
+ int ret;
+
+ switch (source) {
+ case NV50_CRC_SOURCE_TYPE_SOR:
+ crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, SOR(or));
+ break;
+ case NV50_CRC_SOURCE_TYPE_PIOR:
+ crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, PIOR(or));
+ break;
+ case NV50_CRC_SOURCE_TYPE_DAC:
+ crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, DAC(or));
+ break;
+ case NV50_CRC_SOURCE_TYPE_RG:
+ crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, RG(i));
+ break;
+ case NV50_CRC_SOURCE_TYPE_SF:
+ crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, SF(i));
+ break;
+ case NV50_CRC_SOURCE_NONE:
+ crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, NONE);
+ break;
+ }
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ if (source) {
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx->ntfy.handle);
+ PUSH_MTHD(push, NV907D, HEAD_SET_CRC_CONTROL(i), crc_args);
+ } else {
+ PUSH_MTHD(push, NV907D, HEAD_SET_CRC_CONTROL(i), crc_args);
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CRC(i), 0);
+ }
+
+ return 0;
+}
+
+static int
+crc907d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx ? ctx->ntfy.handle : 0);
+ return 0;
+}
+
+static u32 crc907d_get_entry(struct nv50_head *head,
+ struct nv50_crc_notifier_ctx *ctx,
+ enum nv50_crc_source source, int idx)
+{
+ struct crc907d_notifier __iomem *notifier = ctx->mem.object.map.ptr;
+
+ return ioread32_native(&notifier->entries[idx].output_crc[0]);
+}
+
+static bool crc907d_ctx_finished(struct nv50_head *head,
+ struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nouveau_drm *drm = nouveau_drm(head->base.base.dev);
+ struct crc907d_notifier __iomem *notifier = ctx->mem.object.map.ptr;
+ const u32 status = ioread32_native(&notifier->status);
+ const u32 overflow = status & 0x0000003e;
+
+ if (!(status & 0x00000001))
+ return false;
+
+ if (overflow) {
+ const char *engine = NULL;
+
+ switch (overflow) {
+ case 0x00000004: engine = "DSI"; break;
+ case 0x00000008: engine = "Compositor"; break;
+ case 0x00000010: engine = "CRC output 1"; break;
+ case 0x00000020: engine = "CRC output 2"; break;
+ }
+
+ if (engine)
+ NV_ERROR(drm,
+ "CRC notifier context for head %d overflowed on %s: %x\n",
+ head->base.index, engine, status);
+ else
+ NV_ERROR(drm,
+ "CRC notifier context for head %d overflowed: %x\n",
+ head->base.index, status);
+ }
+
+ NV_DEBUG(drm, "Head %d CRC context status: %x\n",
+ head->base.index, status);
+
+ return true;
+}
+
+const struct nv50_crc_func crc907d = {
+ .set_src = crc907d_set_src,
+ .set_ctx = crc907d_set_ctx,
+ .get_entry = crc907d_get_entry,
+ .ctx_finished = crc907d_ctx_finished,
+ .flip_threshold = CRC907D_MAX_ENTRIES - 10,
+ .num_entries = CRC907D_MAX_ENTRIES,
+ .notifier_len = sizeof(struct crc907d_notifier),
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
new file mode 100644
index 0000000000..f10f6c4844
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: MIT
+#include <drm/drm_crtc.h>
+
+#include "crc.h"
+#include "crcc37d.h"
+#include "core.h"
+#include "disp.h"
+#include "head.h"
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc37d.h>
+
+static int
+crcc37d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
+ struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ u32 crc_args = NVVAL(NVC37D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, i * 4) |
+ NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
+ NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) |
+ NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE);
+ int ret;
+
+ switch (source) {
+ case NV50_CRC_SOURCE_TYPE_SOR:
+ crc_args |= NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SOR(or));
+ break;
+ case NV50_CRC_SOURCE_TYPE_PIOR:
+ crc_args |= NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, PIOR(or));
+ break;
+ case NV50_CRC_SOURCE_TYPE_SF:
+ crc_args |= NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SF);
+ break;
+ default:
+ break;
+ }
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ if (source) {
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx->ntfy.handle);
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CRC_CONTROL(i), crc_args);
+ } else {
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CRC_CONTROL(i), 0);
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CRC(i), 0);
+ }
+
+ return 0;
+}
+
+int crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx ? ctx->ntfy.handle : 0);
+ return 0;
+}
+
+u32 crcc37d_get_entry(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx,
+ enum nv50_crc_source source, int idx)
+{
+ struct crcc37d_notifier __iomem *notifier = ctx->mem.object.map.ptr;
+ struct crcc37d_entry __iomem *entry = &notifier->entries[idx];
+ u32 __iomem *crc_addr;
+
+ if (source == NV50_CRC_SOURCE_RG)
+ crc_addr = &entry->rg_crc;
+ else
+ crc_addr = &entry->output_crc[0];
+
+ return ioread32_native(crc_addr);
+}
+
+bool crcc37d_ctx_finished(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nouveau_drm *drm = nouveau_drm(head->base.base.dev);
+ struct crcc37d_notifier __iomem *notifier = ctx->mem.object.map.ptr;
+ const u32 status = ioread32_native(&notifier->status);
+ const u32 overflow = status & 0x0000007e;
+
+ if (!(status & 0x00000001))
+ return false;
+
+ if (overflow) {
+ const char *engine = NULL;
+
+ switch (overflow) {
+ case 0x00000004: engine = "Front End"; break;
+ case 0x00000008: engine = "Compositor"; break;
+ case 0x00000010: engine = "RG"; break;
+ case 0x00000020: engine = "CRC output 1"; break;
+ case 0x00000040: engine = "CRC output 2"; break;
+ }
+
+ if (engine)
+ NV_ERROR(drm,
+ "CRC notifier context for head %d overflowed on %s: %x\n",
+ head->base.index, engine, status);
+ else
+ NV_ERROR(drm,
+ "CRC notifier context for head %d overflowed: %x\n",
+ head->base.index, status);
+ }
+
+ NV_DEBUG(drm, "Head %d CRC context status: %x\n",
+ head->base.index, status);
+
+ return true;
+}
+
+const struct nv50_crc_func crcc37d = {
+ .set_src = crcc37d_set_src,
+ .set_ctx = crcc37d_set_ctx,
+ .get_entry = crcc37d_get_entry,
+ .ctx_finished = crcc37d_ctx_finished,
+ .flip_threshold = CRCC37D_FLIP_THRESHOLD,
+ .num_entries = CRCC37D_MAX_ENTRIES,
+ .notifier_len = sizeof(struct crcc37d_notifier),
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.h b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.h
new file mode 100644
index 0000000000..5775137b83
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __CRCC37D_H__
+#define __CRCC37D_H__
+
+#include <linux/types.h>
+
+#include "crc.h"
+
+#define CRCC37D_MAX_ENTRIES 2047
+#define CRCC37D_FLIP_THRESHOLD (CRCC37D_MAX_ENTRIES - 30)
+
+struct crcc37d_notifier {
+ u32 status;
+
+ /* reserved */
+ u32:32;
+ u32:32;
+ u32:32;
+ u32:32;
+ u32:32;
+ u32:32;
+ u32:32;
+
+ struct crcc37d_entry {
+ u32 status[2];
+ u32:32; /* reserved */
+ u32 compositor_crc;
+ u32 rg_crc;
+ u32 output_crc[2];
+ u32:32; /* reserved */
+ } entries[CRCC37D_MAX_ENTRIES];
+} __packed;
+
+int crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx);
+u32 crcc37d_get_entry(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx,
+ enum nv50_crc_source source, int idx);
+bool crcc37d_ctx_finished(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx);
+
+#endif /* !__CRCC37D_H__ */
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c
new file mode 100644
index 0000000000..cc0130e3d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: MIT
+
+#include "crc.h"
+#include "crcc37d.h"
+#include "core.h"
+#include "disp.h"
+#include "head.h"
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57d.h>
+
+static int crcc57d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
+ struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ u32 crc_args = NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
+ NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
+ NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) |
+ NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE);
+ int ret;
+
+ switch (source) {
+ case NV50_CRC_SOURCE_TYPE_SOR:
+ crc_args |= NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SOR(or));
+ break;
+ case NV50_CRC_SOURCE_TYPE_SF:
+ crc_args |= NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SF);
+ break;
+ default:
+ break;
+ }
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ if (source) {
+ PUSH_MTHD(push, NVC57D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx->ntfy.handle);
+ PUSH_MTHD(push, NVC57D, HEAD_SET_CRC_CONTROL(i), crc_args);
+ } else {
+ PUSH_MTHD(push, NVC57D, HEAD_SET_CRC_CONTROL(i), 0);
+ PUSH_MTHD(push, NVC57D, HEAD_SET_CONTEXT_DMA_CRC(i), 0);
+ }
+
+ return 0;
+}
+
+const struct nv50_crc_func crcc57d = {
+ .set_src = crcc57d_set_src,
+ .set_ctx = crcc37d_set_ctx,
+ .get_entry = crcc37d_get_entry,
+ .ctx_finished = crcc37d_ctx_finished,
+ .flip_threshold = CRCC37D_FLIP_THRESHOLD,
+ .num_entries = CRCC37D_MAX_ENTRIES,
+ .notifier_len = sizeof(struct crcc37d_notifier),
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
new file mode 100644
index 0000000000..31d8b2e479
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "curs.h"
+
+#include <nvif/class.h>
+
+int
+nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
+{
+ struct {
+ s32 oclass;
+ int version;
+ int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+ } curses[] = {
+ { GA102_DISP_CURSOR, 0, cursc37a_new },
+ { TU102_DISP_CURSOR, 0, cursc37a_new },
+ { GV100_DISP_CURSOR, 0, cursc37a_new },
+ { GK104_DISP_CURSOR, 0, curs907a_new },
+ { GF110_DISP_CURSOR, 0, curs907a_new },
+ { GT214_DISP_CURSOR, 0, curs507a_new },
+ { G82_DISP_CURSOR, 0, curs507a_new },
+ { NV50_DISP_CURSOR, 0, curs507a_new },
+ {}
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ int cid;
+
+ cid = nvif_mclass(&disp->disp->object, curses);
+ if (cid < 0) {
+ NV_ERROR(drm, "No supported cursor immediate class\n");
+ return cid;
+ }
+
+ return curses[cid].new(drm, head, curses[cid].oclass, pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.h b/drivers/gpu/drm/nouveau/dispnv50/curs.h
new file mode 100644
index 0000000000..23aff5fd67
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.h
@@ -0,0 +1,14 @@
+#ifndef __NV50_KMS_CURS_H__
+#define __NV50_KMS_CURS_H__
+#include "wndw.h"
+
+int curs507a_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+int curs507a_new_(const struct nv50_wimm_func *, struct nouveau_drm *,
+ int head, s32 oclass, u32 interlock_data,
+ struct nv50_wndw **);
+
+int curs907a_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+int cursc37a_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+
+int nv50_curs_new(struct nouveau_drm *, int head, struct nv50_wndw **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
new file mode 100644
index 0000000000..a95ee5dcc2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "curs.h"
+#include "core.h"
+#include "head.h"
+
+#include <nvif/if0014.h>
+#include <nvif/timer.h>
+
+#include <nvhw/class/cl507a.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+
+bool
+curs507a_space(struct nv50_wndw *wndw)
+{
+ nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 100,
+ if (NVIF_TV32(&wndw->wimm.base.user, NV507A, FREE, COUNT, >=, 4))
+ return true;
+ );
+
+ WARN_ON(1);
+ return false;
+}
+
+static int
+curs507a_update(struct nv50_wndw *wndw, u32 *interlock)
+{
+ struct nvif_object *user = &wndw->wimm.base.user;
+ int ret = nvif_chan_wait(&wndw->wimm, 1);
+ if (ret == 0) {
+ NVIF_WR32(user, NV507A, UPDATE,
+ NVDEF(NV507A, UPDATE, INTERLOCK_WITH_CORE, DISABLE));
+ }
+ return ret;
+}
+
+static int
+curs507a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_object *user = &wndw->wimm.base.user;
+ int ret = nvif_chan_wait(&wndw->wimm, 1);
+ if (ret == 0) {
+ NVIF_WR32(user, NV507A, SET_CURSOR_HOT_SPOT_POINT_OUT,
+ NVVAL(NV507A, SET_CURSOR_HOT_SPOT_POINT_OUT, X, asyw->point.x) |
+ NVVAL(NV507A, SET_CURSOR_HOT_SPOT_POINT_OUT, Y, asyw->point.y));
+ }
+ return ret;
+}
+
+const struct nv50_wimm_func
+curs507a = {
+ .point = curs507a_point,
+ .update = curs507a_update,
+};
+
+static void
+curs507a_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
+ struct nv50_wndw_atom *asyw)
+{
+ u32 handle = nv50_disp(wndw->plane.dev)->core->chan.vram.handle;
+ u32 offset = asyw->image.offset[0];
+ if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
+ asyh->curs.handle = handle;
+ asyh->curs.offset = offset;
+ asyh->set.curs = asyh->curs.visible;
+ }
+}
+
+static void
+curs507a_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ asyh->curs.visible = false;
+}
+
+static int
+curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ struct nv50_head *head = nv50_head(asyw->state.crtc);
+ struct drm_framebuffer *fb = asyw->state.fb;
+ int ret;
+
+ ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+ asyh->curs.visible = asyw->state.visible;
+ if (ret || !asyh->curs.visible)
+ return ret;
+
+ if (asyw->state.crtc_w != asyw->state.crtc_h) {
+ NV_ATOMIC(drm, "Plane width/height must be equal for cursors\n");
+ return -EINVAL;
+ }
+
+ if (asyw->image.w != asyw->state.crtc_w) {
+ NV_ATOMIC(drm, "Plane width must be equal to fb width for cursors (height can be larger though)\n");
+ return -EINVAL;
+ }
+
+ if (asyw->state.src_x || asyw->state.src_y) {
+ NV_ATOMIC(drm, "Cursor planes do not support framebuffer offsets\n");
+ return -EINVAL;
+ }
+
+ if (asyw->image.pitch[0] != asyw->image.w * fb->format->cpp[0]) {
+ NV_ATOMIC(drm,
+ "%s: invalid cursor image pitch: image must be packed (pitch = %d, width = %d)\n",
+ wndw->plane.name, asyw->image.pitch[0], asyw->image.w);
+ return -EINVAL;
+ }
+
+ ret = head->func->curs_layout(head, asyw, asyh);
+ if (ret) {
+ NV_ATOMIC(drm,
+ "%s: invalid cursor image size: unsupported size %dx%d\n",
+ wndw->plane.name, asyw->image.w, asyw->image.h);
+ return ret;
+ }
+
+ ret = head->func->curs_format(head, asyw, asyh);
+ if (ret) {
+ NV_ATOMIC(drm,
+ "%s: invalid cursor image format 0x%X\n",
+ wndw->plane.name, fb->format->format);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const u32
+curs507a_format[] = {
+ DRM_FORMAT_ARGB8888,
+ 0
+};
+
+static const struct nv50_wndw_func
+curs507a_wndw = {
+ .acquire = curs507a_acquire,
+ .release = curs507a_release,
+ .prepare = curs507a_prepare,
+};
+
+int
+curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
+ int head, s32 oclass, u32 interlock_data,
+ struct nv50_wndw **pwndw)
+{
+ struct nvif_disp_chan_v0 args = {
+ .id = head,
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_wndw *wndw;
+ int ret;
+
+ ret = nv50_wndw_new_(&curs507a_wndw, drm->dev, DRM_PLANE_TYPE_CURSOR,
+ "curs", head, curs507a_format, BIT(head),
+ NV50_DISP_INTERLOCK_CURS, interlock_data, &wndw);
+ if (*pwndw = wndw, ret)
+ return ret;
+
+ ret = nvif_object_ctor(&disp->disp->object, "kmsCurs", 0, oclass,
+ &args, sizeof(args), &wndw->wimm.base.user);
+ if (ret) {
+ NV_ERROR(drm, "curs%04x allocation failed: %d\n", oclass, ret);
+ return ret;
+ }
+
+ nvif_object_map(&wndw->wimm.base.user, NULL, 0);
+ wndw->immd = func;
+ wndw->ctxdma.parent = NULL;
+ return 0;
+}
+
+int
+curs507a_new(struct nouveau_drm *drm, int head, s32 oclass,
+ struct nv50_wndw **pwndw)
+{
+ return curs507a_new_(&curs507a, drm, head, oclass,
+ 0x00000001 << (head * 8), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs907a.c b/drivers/gpu/drm/nouveau/dispnv50/curs907a.c
new file mode 100644
index 0000000000..d742362de0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs907a.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "curs.h"
+
+int
+curs907a_new(struct nouveau_drm *drm, int head, s32 oclass,
+ struct nv50_wndw **pwndw)
+{
+ return curs507a_new_(&curs507a, drm, head, oclass,
+ 0x00000001 << (head * 4), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
new file mode 100644
index 0000000000..e39d08698c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "curs.h"
+#include "atom.h"
+
+#include <nvhw/class/clc37a.h>
+
+static int
+cursc37a_update(struct nv50_wndw *wndw, u32 *interlock)
+{
+ struct nvif_object *user = &wndw->wimm.base.user;
+ int ret = nvif_chan_wait(&wndw->wimm, 1);
+ if (ret == 0)
+ NVIF_WR32(user, NVC37A, UPDATE, 0x00000001);
+ return ret;
+}
+
+static int
+cursc37a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_object *user = &wndw->wimm.base.user;
+ int ret = nvif_chan_wait(&wndw->wimm, 1);
+ if (ret == 0) {
+ NVIF_WR32(user, NVC37A, SET_CURSOR_HOT_SPOT_POINT_OUT(0),
+ NVVAL(NVC37A, SET_CURSOR_HOT_SPOT_POINT_OUT, X, asyw->point.x) |
+ NVVAL(NVC37A, SET_CURSOR_HOT_SPOT_POINT_OUT, Y, asyw->point.y));
+ }
+ return ret;
+}
+
+static const struct nv50_wimm_func
+cursc37a = {
+ .point = cursc37a_point,
+ .update = cursc37a_update,
+};
+
+int
+cursc37a_new(struct nouveau_drm *drm, int head, s32 oclass,
+ struct nv50_wndw **pwndw)
+{
+ return curs507a_new_(&cursc37a, drm, head, oclass,
+ 0x00000001 << head, pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac507d.c b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
new file mode 100644
index 0000000000..09de78d966
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl507d.h>
+
+static int
+dac507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+ struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = core->chan.push;
+ u32 sync = 0;
+ int ret;
+
+ if (asyh) {
+ sync |= NVVAL(NV507D, DAC_SET_POLARITY, HSYNC, asyh->or.nhsync);
+ sync |= NVVAL(NV507D, DAC_SET_POLARITY, VSYNC, asyh->or.nvsync);
+ }
+
+ if ((ret = PUSH_WAIT(push, 3)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, DAC_SET_CONTROL(or), ctrl,
+ DAC_SET_POLARITY(or), sync);
+ return 0;
+}
+
+const struct nv50_outp_func
+dac507d = {
+ .ctrl = dac507d_ctrl,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac907d.c b/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
new file mode 100644
index 0000000000..95efa625b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907d.h>
+
+static int
+dac907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+ struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, DAC_SET_CONTROL(or), ctrl);
+ return 0;
+}
+
+const struct nv50_outp_func
+dac907d = {
+ .ctrl = dac907d_ctrl,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
new file mode 100644
index 0000000000..de8041c94d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -0,0 +1,2790 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "disp.h"
+#include "atom.h"
+#include "core.h"
+#include "head.h"
+#include "wndw.h"
+#include "handles.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/hdmi.h>
+#include <linux/component.h>
+#include <linux/iopoll.h>
+
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_scdc_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include <nvif/push507c.h>
+
+#include <nvif/class.h>
+#include <nvif/cl0002.h>
+#include <nvif/event.h>
+#include <nvif/if0012.h>
+#include <nvif/if0014.h>
+#include <nvif/timer.h>
+
+#include <nvhw/class/cl507c.h>
+#include <nvhw/class/cl507d.h>
+#include <nvhw/class/cl837d.h>
+#include <nvhw/class/cl887d.h>
+#include <nvhw/class/cl907d.h>
+#include <nvhw/class/cl917d.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_gem.h"
+#include "nouveau_connector.h"
+#include "nouveau_encoder.h"
+#include "nouveau_fence.h"
+#include "nv50_display.h"
+
+#include <subdev/bios/dp.h>
+
+/******************************************************************************
+ * EVO channel
+ *****************************************************************************/
+
+static int
+nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
+ const s32 *oclass, u8 head, void *data, u32 size,
+ struct nv50_chan *chan)
+{
+ struct nvif_sclass *sclass;
+ int ret, i, n;
+
+ chan->device = device;
+
+ ret = n = nvif_object_sclass_get(disp, &sclass);
+ if (ret < 0)
+ return ret;
+
+ while (oclass[0]) {
+ for (i = 0; i < n; i++) {
+ if (sclass[i].oclass == oclass[0]) {
+ ret = nvif_object_ctor(disp, "kmsChan", 0,
+ oclass[0], data, size,
+ &chan->user);
+ if (ret == 0)
+ nvif_object_map(&chan->user, NULL, 0);
+ nvif_object_sclass_put(&sclass);
+ return ret;
+ }
+ }
+ oclass++;
+ }
+
+ nvif_object_sclass_put(&sclass);
+ return -ENOSYS;
+}
+
+static void
+nv50_chan_destroy(struct nv50_chan *chan)
+{
+ nvif_object_dtor(&chan->user);
+}
+
+/******************************************************************************
+ * DMA EVO channel
+ *****************************************************************************/
+
+void
+nv50_dmac_destroy(struct nv50_dmac *dmac)
+{
+ nvif_object_dtor(&dmac->vram);
+ nvif_object_dtor(&dmac->sync);
+
+ nv50_chan_destroy(&dmac->base);
+
+ nvif_mem_dtor(&dmac->_push.mem);
+}
+
+static void
+nv50_dmac_kick(struct nvif_push *push)
+{
+ struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
+
+ dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
+ if (dmac->put != dmac->cur) {
+ /* Push buffer fetches are not coherent with BAR1, we need to ensure
+ * writes have been flushed right through to VRAM before writing PUT.
+ */
+ if (dmac->push->mem.type & NVIF_MEM_VRAM) {
+ struct nvif_device *device = dmac->base.device;
+ nvif_wr32(&device->object, 0x070000, 0x00000001);
+ nvif_msec(device, 2000,
+ if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+ break;
+ );
+ }
+
+ NVIF_WV32(&dmac->base.user, NV507C, PUT, PTR, dmac->cur);
+ dmac->put = dmac->cur;
+ }
+
+ push->bgn = push->cur;
+}
+
+static int
+nv50_dmac_free(struct nv50_dmac *dmac)
+{
+ u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
+ if (get > dmac->cur) /* NVIDIA stay 5 away from GET, do the same. */
+ return get - dmac->cur - 5;
+ return dmac->max - dmac->cur;
+}
+
+static int
+nv50_dmac_wind(struct nv50_dmac *dmac)
+{
+ /* Wait for GET to depart from the beginning of the push buffer to
+ * prevent writing PUT == GET, which would be ignored by HW.
+ */
+ u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
+ if (get == 0) {
+ /* Corner-case, HW idle, but non-committed work pending. */
+ if (dmac->put == 0)
+ nv50_dmac_kick(dmac->push);
+
+ if (nvif_msec(dmac->base.device, 2000,
+ if (NVIF_TV32(&dmac->base.user, NV507C, GET, PTR, >, 0))
+ break;
+ ) < 0)
+ return -ETIMEDOUT;
+ }
+
+ PUSH_RSVD(dmac->push, PUSH_JUMP(dmac->push, 0));
+ dmac->cur = 0;
+ return 0;
+}
+
+static int
+nv50_dmac_wait(struct nvif_push *push, u32 size)
+{
+ struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
+ int free;
+
+ if (WARN_ON(size > dmac->max))
+ return -EINVAL;
+
+ dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
+ if (dmac->cur + size >= dmac->max) {
+ int ret = nv50_dmac_wind(dmac);
+ if (ret)
+ return ret;
+
+ push->cur = dmac->_push.mem.object.map.ptr;
+ push->cur = push->cur + dmac->cur;
+ nv50_dmac_kick(push);
+ }
+
+ if (nvif_msec(dmac->base.device, 2000,
+ if ((free = nv50_dmac_free(dmac)) >= size)
+ break;
+ ) < 0) {
+ WARN_ON(1);
+ return -ETIMEDOUT;
+ }
+
+ push->bgn = dmac->_push.mem.object.map.ptr;
+ push->bgn = push->bgn + dmac->cur;
+ push->cur = push->bgn;
+ push->end = push->cur + free;
+ return 0;
+}
+
+MODULE_PARM_DESC(kms_vram_pushbuf, "Place EVO/NVD push buffers in VRAM (default: auto)");
+static int nv50_dmac_vram_pushbuf = -1;
+module_param_named(kms_vram_pushbuf, nv50_dmac_vram_pushbuf, int, 0400);
+
+int
+nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+ const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
+ struct nv50_dmac *dmac)
+{
+ struct nouveau_cli *cli = (void *)device->object.client;
+ struct nvif_disp_chan_v0 *args = data;
+ u8 type = NVIF_MEM_COHERENT;
+ int ret;
+
+ mutex_init(&dmac->lock);
+
+ /* Pascal added support for 47-bit physical addresses, but some
+ * parts of EVO still only accept 40-bit PAs.
+ *
+ * To avoid issues on systems with large amounts of RAM, and on
+ * systems where an IOMMU maps pages at a high address, we need
+ * to allocate push buffers in VRAM instead.
+ *
+ * This appears to match NVIDIA's behaviour on Pascal.
+ */
+ if ((nv50_dmac_vram_pushbuf > 0) ||
+ (nv50_dmac_vram_pushbuf < 0 && device->info.family == NV_DEVICE_INFO_V0_PASCAL))
+ type |= NVIF_MEM_VRAM;
+
+ ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000,
+ &dmac->_push.mem);
+ if (ret)
+ return ret;
+
+ dmac->ptr = dmac->_push.mem.object.map.ptr;
+ dmac->_push.wait = nv50_dmac_wait;
+ dmac->_push.kick = nv50_dmac_kick;
+ dmac->push = &dmac->_push;
+ dmac->push->bgn = dmac->_push.mem.object.map.ptr;
+ dmac->push->cur = dmac->push->bgn;
+ dmac->push->end = dmac->push->bgn;
+ dmac->max = 0x1000/4 - 1;
+
+ /* EVO channels are affected by a HW bug where the last 12 DWORDs
+ * of the push buffer aren't able to be used safely.
+ */
+ if (disp->oclass < GV100_DISP)
+ dmac->max -= 12;
+
+ args->pushbuf = nvif_handle(&dmac->_push.mem.object);
+
+ ret = nv50_chan_create(device, disp, oclass, head, data, size,
+ &dmac->base);
+ if (ret)
+ return ret;
+
+ if (syncbuf < 0)
+ return 0;
+
+ ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
+ NV_DMA_IN_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
+ .start = syncbuf + 0x0000,
+ .limit = syncbuf + 0x0fff,
+ }, sizeof(struct nv_dma_v0),
+ &dmac->sync);
+ if (ret)
+ return ret;
+
+ ret = nvif_object_ctor(&dmac->base.user, "kmsVramCtxDma", NV50_DISP_HANDLE_VRAM,
+ NV_DMA_IN_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
+ .start = 0,
+ .limit = device->info.ram_user - 1,
+ }, sizeof(struct nv_dma_v0),
+ &dmac->vram);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+/******************************************************************************
+ * Output path helpers
+ *****************************************************************************/
+static void
+nv50_outp_dump_caps(struct nouveau_drm *drm,
+ struct nouveau_encoder *outp)
+{
+ NV_DEBUG(drm, "%s caps: dp_interlace=%d\n",
+ outp->base.base.name, outp->caps.dp_interlace);
+}
+
+static int
+nv50_outp_atomic_check_view(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct drm_display_mode *native_mode)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_connector *connector = conn_state->connector;
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+
+ NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
+ asyc->scaler.full = false;
+ if (!native_mode)
+ return 0;
+
+ if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ /* Don't force scaler for EDID modes with
+ * same size as the native one (e.g. different
+ * refresh rate)
+ */
+ if (mode->hdisplay == native_mode->hdisplay &&
+ mode->vdisplay == native_mode->vdisplay &&
+ mode->type & DRM_MODE_TYPE_DRIVER)
+ break;
+ mode = native_mode;
+ asyc->scaler.full = true;
+ break;
+ default:
+ break;
+ }
+ } else {
+ mode = native_mode;
+ }
+
+ if (!drm_mode_equal(adjusted_mode, mode)) {
+ drm_mode_copy(adjusted_mode, mode);
+ crtc_state->mode_changed = true;
+ }
+
+ return 0;
+}
+
+static void
+nv50_outp_atomic_fix_depth(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state)
+{
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_display_mode *mode = &asyh->state.adjusted_mode;
+ unsigned int max_rate, mode_rate;
+
+ switch (nv_encoder->dcb->type) {
+ case DCB_OUTPUT_DP:
+ max_rate = nv_encoder->dp.link_nr * nv_encoder->dp.link_bw;
+
+ /* we don't support more than 10 anyway */
+ asyh->or.bpc = min_t(u8, asyh->or.bpc, 10);
+
+ /* reduce the bpc until it works out */
+ while (asyh->or.bpc > 6) {
+ mode_rate = DIV_ROUND_UP(mode->clock * asyh->or.bpc * 3, 8);
+ if (mode_rate <= max_rate)
+ break;
+
+ asyh->or.bpc -= 2;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+nv50_outp_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_connector *connector = conn_state->connector;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
+ int ret;
+
+ ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+ nv_connector->native_mode);
+ if (ret)
+ return ret;
+
+ if (crtc_state->mode_changed || crtc_state->connectors_changed)
+ asyh->or.bpc = connector->display_info.bpc;
+
+ /* We might have to reduce the bpc */
+ nv50_outp_atomic_fix_depth(encoder, crtc_state);
+
+ return 0;
+}
+
+struct nouveau_connector *
+nv50_outp_get_new_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ struct drm_encoder *encoder = to_drm_encoder(outp);
+ int i;
+
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->best_encoder == encoder)
+ return nouveau_connector(connector);
+ }
+
+ return NULL;
+}
+
+struct nouveau_connector *
+nv50_outp_get_old_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ struct drm_encoder *encoder = to_drm_encoder(outp);
+ int i;
+
+ for_each_old_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->best_encoder == encoder)
+ return nouveau_connector(connector);
+ }
+
+ return NULL;
+}
+
+static struct nouveau_crtc *
+nv50_outp_get_new_crtc(const struct drm_atomic_state *state, const struct nouveau_encoder *outp)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ const u32 mask = drm_encoder_mask(&outp->base.base);
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->encoder_mask & mask)
+ return nouveau_crtc(crtc);
+ }
+
+ return NULL;
+}
+
+/******************************************************************************
+ * DAC
+ *****************************************************************************/
+static void
+nv50_dac_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_core *core = nv50_disp(encoder->dev)->core;
+ const u32 ctrl = NVDEF(NV507D, DAC_SET_CONTROL, OWNER, NONE);
+
+ core->func->dac->ctrl(core, nv_encoder->outp.or.id, ctrl, NULL);
+ nv_encoder->crtc = NULL;
+ nvif_outp_release(&nv_encoder->outp);
+}
+
+static void
+nv50_dac_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
+ struct nv50_core *core = nv50_disp(encoder->dev)->core;
+ u32 ctrl = 0;
+
+ switch (nv_crtc->index) {
+ case 0: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD0); break;
+ case 1: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD1); break;
+ case 2: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD2); break;
+ case 3: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD3); break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, PROTOCOL, RGB_CRT);
+
+ nvif_outp_acquire_rgb_crt(&nv_encoder->outp);
+
+ core->func->dac->ctrl(core, nv_encoder->outp.or.id, ctrl, asyh);
+ asyh->or.depth = 0;
+
+ nv_encoder->crtc = &nv_crtc->base;
+}
+
+static enum drm_connector_status
+nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ u32 loadval;
+ int ret;
+
+ loadval = nouveau_drm(encoder->dev)->vbios.dactestval;
+ if (loadval == 0)
+ loadval = 340;
+
+ ret = nvif_outp_load_detect(&nv_encoder->outp, loadval);
+ if (ret <= 0)
+ return connector_status_disconnected;
+
+ return connector_status_connected;
+}
+
+static const struct drm_encoder_helper_funcs
+nv50_dac_help = {
+ .atomic_check = nv50_outp_atomic_check,
+ .atomic_enable = nv50_dac_atomic_enable,
+ .atomic_disable = nv50_dac_atomic_disable,
+ .detect = nv50_dac_detect
+};
+
+static void
+nv50_dac_destroy(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ nvif_outp_dtor(&nv_encoder->outp);
+
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs
+nv50_dac_func = {
+ .destroy = nv50_dac_destroy,
+};
+
+static int
+nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nv50_disp *disp = nv50_disp(connector->dev);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c_bus *bus;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+ int type = DRM_MODE_ENCODER_DAC;
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+
+ bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
+ if (bus)
+ nv_encoder->i2c = &bus->i2c;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
+ "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_dac_help);
+
+ drm_connector_attach_encoder(connector, encoder);
+ return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
+}
+
+/*
+ * audio component binding for ELD notification
+ */
+static void
+nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port,
+ int dev_id)
+{
+ if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ port, dev_id);
+}
+
+static int
+nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
+ bool *enabled, unsigned char *buf, int max_bytes)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(kdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_encoder *encoder;
+ struct nouveau_encoder *nv_encoder;
+ struct nouveau_crtc *nv_crtc;
+ int ret = 0;
+
+ *enabled = false;
+
+ mutex_lock(&drm->audio.lock);
+
+ drm_for_each_encoder(encoder, drm->dev) {
+ struct nouveau_connector *nv_connector = NULL;
+
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST)
+ continue; /* TODO */
+
+ nv_encoder = nouveau_encoder(encoder);
+ nv_connector = nouveau_connector(nv_encoder->audio.connector);
+ nv_crtc = nouveau_crtc(nv_encoder->crtc);
+
+ if (!nv_crtc || nv_encoder->outp.or.id != port || nv_crtc->index != dev_id)
+ continue;
+
+ *enabled = nv_encoder->audio.enabled;
+ if (*enabled) {
+ ret = drm_eld_size(nv_connector->base.eld);
+ memcpy(buf, nv_connector->base.eld,
+ min(max_bytes, ret));
+ }
+ break;
+ }
+
+ mutex_unlock(&drm->audio.lock);
+
+ return ret;
+}
+
+static const struct drm_audio_component_ops nv50_audio_component_ops = {
+ .get_eld = nv50_audio_component_get_eld,
+};
+
+static int
+nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev,
+ void *data)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(kdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_audio_component *acomp = data;
+
+ if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS)))
+ return -ENOMEM;
+
+ drm_modeset_lock_all(drm_dev);
+ acomp->ops = &nv50_audio_component_ops;
+ acomp->dev = kdev;
+ drm->audio.component = acomp;
+ drm_modeset_unlock_all(drm_dev);
+ return 0;
+}
+
+static void
+nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev,
+ void *data)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(kdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_audio_component *acomp = data;
+
+ drm_modeset_lock_all(drm_dev);
+ drm->audio.component = NULL;
+ acomp->ops = NULL;
+ acomp->dev = NULL;
+ drm_modeset_unlock_all(drm_dev);
+}
+
+static const struct component_ops nv50_audio_component_bind_ops = {
+ .bind = nv50_audio_component_bind,
+ .unbind = nv50_audio_component_unbind,
+};
+
+static void
+nv50_audio_component_init(struct nouveau_drm *drm)
+{
+ if (component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
+ return;
+
+ drm->audio.component_registered = true;
+ mutex_init(&drm->audio.lock);
+}
+
+static void
+nv50_audio_component_fini(struct nouveau_drm *drm)
+{
+ if (!drm->audio.component_registered)
+ return;
+
+ component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
+ drm->audio.component_registered = false;
+ mutex_destroy(&drm->audio.lock);
+}
+
+/******************************************************************************
+ * Audio
+ *****************************************************************************/
+static bool
+nv50_audio_supported(struct drm_encoder *encoder)
+{
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+ if (disp->disp->object.oclass <= GT200_DISP ||
+ disp->disp->object.oclass == GT206_DISP)
+ return false;
+
+ return true;
+}
+
+static void
+nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
+{
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nvif_outp *outp = &nv_encoder->outp;
+
+ if (!nv50_audio_supported(encoder))
+ return;
+
+ mutex_lock(&drm->audio.lock);
+ if (nv_encoder->audio.enabled) {
+ nv_encoder->audio.enabled = false;
+ nv_encoder->audio.connector = NULL;
+ nvif_outp_hda_eld(&nv_encoder->outp, nv_crtc->index, NULL, 0);
+ }
+ mutex_unlock(&drm->audio.lock);
+
+ nv50_audio_component_eld_notify(drm->audio.component, outp->or.id, nv_crtc->index);
+}
+
+static void
+nv50_audio_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
+ struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
+ struct drm_display_mode *mode)
+{
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nvif_outp *outp = &nv_encoder->outp;
+
+ if (!nv50_audio_supported(encoder) || !drm_detect_monitor_audio(nv_connector->edid))
+ return;
+
+ mutex_lock(&drm->audio.lock);
+
+ nvif_outp_hda_eld(&nv_encoder->outp, nv_crtc->index, nv_connector->base.eld,
+ drm_eld_size(nv_connector->base.eld));
+ nv_encoder->audio.enabled = true;
+ nv_encoder->audio.connector = &nv_connector->base;
+
+ mutex_unlock(&drm->audio.lock);
+
+ nv50_audio_component_eld_notify(drm->audio.component, outp->or.id, nv_crtc->index);
+}
+
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
+static void
+nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
+ struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
+ struct drm_display_mode *mode, bool hda)
+{
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_hdmi_info *hdmi = &nv_connector->base.display_info.hdmi;
+ union hdmi_infoframe infoframe = { 0 };
+ const u8 rekey = 56; /* binary driver, and tegra, constant */
+ u8 scdc = 0;
+ u32 max_ac_packet;
+ struct {
+ struct nvif_outp_infoframe_v0 infoframe;
+ u8 data[17];
+ } args = { 0 };
+ int ret, size;
+
+ max_ac_packet = mode->htotal - mode->hdisplay;
+ max_ac_packet -= rekey;
+ max_ac_packet -= 18; /* constant from tegra */
+ max_ac_packet /= 32;
+
+ if (hdmi->scdc.scrambling.supported) {
+ const bool high_tmds_clock_ratio = mode->clock > 340000;
+
+ ret = drm_scdc_readb(nv_encoder->i2c, SCDC_TMDS_CONFIG, &scdc);
+ if (ret < 0) {
+ NV_ERROR(drm, "Failure to read SCDC_TMDS_CONFIG: %d\n", ret);
+ return;
+ }
+
+ scdc &= ~(SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 | SCDC_SCRAMBLING_ENABLE);
+ if (high_tmds_clock_ratio || hdmi->scdc.scrambling.low_rates)
+ scdc |= SCDC_SCRAMBLING_ENABLE;
+ if (high_tmds_clock_ratio)
+ scdc |= SCDC_TMDS_BIT_CLOCK_RATIO_BY_40;
+
+ ret = drm_scdc_writeb(nv_encoder->i2c, SCDC_TMDS_CONFIG, scdc);
+ if (ret < 0)
+ NV_ERROR(drm, "Failure to write SCDC_TMDS_CONFIG = 0x%02x: %d\n",
+ scdc, ret);
+ }
+
+ ret = nvif_outp_acquire_tmds(&nv_encoder->outp, nv_crtc->index, true,
+ max_ac_packet, rekey, scdc, hda);
+ if (ret)
+ return;
+
+ /* AVI InfoFrame. */
+ args.infoframe.version = 0;
+ args.infoframe.head = nv_crtc->index;
+
+ if (!drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi, &nv_connector->base, mode)) {
+ drm_hdmi_avi_infoframe_quant_range(&infoframe.avi, &nv_connector->base, mode,
+ HDMI_QUANTIZATION_RANGE_FULL);
+
+ size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
+ } else {
+ size = 0;
+ }
+
+ nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, &args.infoframe, size);
+
+ /* Vendor InfoFrame. */
+ memset(&args.data, 0, sizeof(args.data));
+ if (!drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi,
+ &nv_connector->base, mode))
+ size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
+ else
+ size = 0;
+
+ nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, &args.infoframe, size);
+
+ nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
+}
+
+/******************************************************************************
+ * MST
+ *****************************************************************************/
+#define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
+#define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
+#define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
+
+struct nv50_mstc {
+ struct nv50_mstm *mstm;
+ struct drm_dp_mst_port *port;
+ struct drm_connector connector;
+
+ struct drm_display_mode *native;
+ struct edid *edid;
+};
+
+struct nv50_msto {
+ struct drm_encoder encoder;
+
+ /* head is statically assigned on msto creation */
+ struct nv50_head *head;
+ struct nv50_mstc *mstc;
+ bool disabled;
+ bool enabled;
+};
+
+struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
+{
+ struct nv50_msto *msto;
+
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST)
+ return nouveau_encoder(encoder);
+
+ msto = nv50_msto(encoder);
+ if (!msto->mstc)
+ return NULL;
+ return msto->mstc->mstm->outp;
+}
+
+static void
+nv50_msto_cleanup(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct nv50_msto *msto)
+{
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct drm_dp_mst_atomic_payload *payload =
+ drm_atomic_get_mst_payload_state(mst_state, msto->mstc->port);
+
+ NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
+
+ if (msto->disabled) {
+ msto->mstc = NULL;
+ msto->disabled = false;
+ } else if (msto->enabled) {
+ drm_dp_add_payload_part2(mgr, state, payload);
+ msto->enabled = false;
+ }
+}
+
+static void
+nv50_msto_prepare(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct nv50_msto *msto)
+{
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+ struct drm_dp_mst_topology_state *old_mst_state;
+ struct drm_dp_mst_atomic_payload *payload, *old_payload;
+
+ NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
+
+ old_mst_state = drm_atomic_get_old_mst_topology_state(state, mgr);
+
+ payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
+ old_payload = drm_atomic_get_mst_payload_state(old_mst_state, mstc->port);
+
+ // TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
+ if (msto->disabled) {
+ drm_dp_remove_payload(mgr, mst_state, old_payload, payload);
+
+ nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
+ } else {
+ if (msto->enabled)
+ drm_dp_add_payload_part1(mgr, mst_state, payload);
+
+ nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index,
+ payload->vc_start_slot, payload->time_slots,
+ payload->pbn, payload->time_slots * mst_state->pbn_div);
+ }
+}
+
+static int
+nv50_msto_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_connector *connector = conn_state->connector;
+ struct drm_dp_mst_topology_state *mst_state;
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ struct nv50_mstm *mstm = mstc->mstm;
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
+ int slots;
+ int ret;
+
+ ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+ mstc->native);
+ if (ret)
+ return ret;
+
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return 0;
+
+ /*
+ * When restoring duplicated states, we need to make sure that the bw
+ * remains the same and avoid recalculating it, as the connector's bpc
+ * may have changed after the state was duplicated
+ */
+ if (!state->duplicated) {
+ const int clock = crtc_state->adjusted_mode.clock;
+
+ asyh->or.bpc = connector->display_info.bpc;
+ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3 << 4);
+ }
+
+ mst_state = drm_atomic_get_mst_topology_state(state, &mstm->mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ if (!mst_state->pbn_div) {
+ struct nouveau_encoder *outp = mstc->mstm->outp;
+
+ mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr,
+ outp->dp.link_bw, outp->dp.link_nr);
+ }
+
+ slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn);
+ if (slots < 0)
+ return slots;
+
+ asyh->dp.tu = slots;
+
+ return 0;
+}
+
+static u8
+nv50_dp_bpc_to_depth(unsigned int bpc)
+{
+ switch (bpc) {
+ case 6: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444;
+ case 8: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444;
+ case 10:
+ default: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444;
+ }
+}
+
+static void
+nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+{
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_head *head = msto->head;
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &head->base.base));
+ struct nv50_mstc *mstc = NULL;
+ struct nv50_mstm *mstm = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ u8 proto;
+
+ drm_connector_list_iter_begin(encoder->dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->state->best_encoder == &msto->encoder) {
+ mstc = nv50_mstc(connector);
+ mstm = mstc->mstm;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (WARN_ON(!mstc))
+ return;
+
+ if (!mstm->links++) {
+ /*XXX: MST audio. */
+ nvif_outp_acquire_dp(&mstm->outp->outp, mstm->outp->dp.dpcd, 0, 0, false, true);
+ }
+
+ if (mstm->outp->outp.or.link & 1)
+ proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A;
+ else
+ proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B;
+
+ mstm->outp->update(mstm->outp, head->base.index, asyh, proto,
+ nv50_dp_bpc_to_depth(asyh->or.bpc));
+
+ msto->mstc = mstc;
+ msto->enabled = true;
+ mstm->modified = true;
+}
+
+static void
+nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+{
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+
+ mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
+ mstm->modified = true;
+ if (!--mstm->links)
+ mstm->disabled = true;
+ msto->disabled = true;
+}
+
+static const struct drm_encoder_helper_funcs
+nv50_msto_help = {
+ .atomic_disable = nv50_msto_atomic_disable,
+ .atomic_enable = nv50_msto_atomic_enable,
+ .atomic_check = nv50_msto_atomic_check,
+};
+
+static void
+nv50_msto_destroy(struct drm_encoder *encoder)
+{
+ struct nv50_msto *msto = nv50_msto(encoder);
+ drm_encoder_cleanup(&msto->encoder);
+ kfree(msto);
+}
+
+static const struct drm_encoder_funcs
+nv50_msto = {
+ .destroy = nv50_msto_destroy,
+};
+
+static struct nv50_msto *
+nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id)
+{
+ struct nv50_msto *msto;
+ int ret;
+
+ msto = kzalloc(sizeof(*msto), GFP_KERNEL);
+ if (!msto)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
+ DRM_MODE_ENCODER_DPMST, "mst-%d", id);
+ if (ret) {
+ kfree(msto);
+ return ERR_PTR(ret);
+ }
+
+ drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
+ msto->encoder.possible_crtcs = drm_crtc_mask(&head->base.base);
+ msto->head = head;
+ return msto;
+}
+
+static struct drm_encoder *
+nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
+ connector);
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ struct drm_crtc *crtc = connector_state->crtc;
+
+ if (!(mstc->mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
+ return NULL;
+
+ return &nv50_head(crtc)->msto->encoder;
+}
+
+static enum drm_mode_status
+nv50_mstc_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ struct nouveau_encoder *outp = mstc->mstm->outp;
+
+ /* TODO: calculate the PBN from the dotclock and validate against the
+ * MSTB's max possible PBN
+ */
+
+ return nv50_dp_mode_valid(outp, mode, NULL);
+}
+
+static int
+nv50_mstc_get_modes(struct drm_connector *connector)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ int ret = 0;
+
+ mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
+ drm_connector_update_edid_property(&mstc->connector, mstc->edid);
+ if (mstc->edid)
+ ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
+
+ /*
+ * XXX: Since we don't use HDR in userspace quite yet, limit the bpc
+ * to 8 to save bandwidth on the topology. In the future, we'll want
+ * to properly fix this by dynamically selecting the highest possible
+ * bpc that would fit in the topology
+ */
+ if (connector->display_info.bpc)
+ connector->display_info.bpc =
+ clamp(connector->display_info.bpc, 6U, 8U);
+ else
+ connector->display_info.bpc = 8;
+
+ if (mstc->native)
+ drm_mode_destroy(mstc->connector.dev, mstc->native);
+ mstc->native = nouveau_conn_native_mode(&mstc->connector);
+ return ret;
+}
+
+static int
+nv50_mstc_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr;
+
+ return drm_dp_atomic_release_time_slots(state, mgr, mstc->port);
+}
+
+static int
+nv50_mstc_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ int ret;
+
+ if (drm_connector_is_unregistered(connector))
+ return connector_status_disconnected;
+
+ ret = pm_runtime_get_sync(connector->dev->dev);
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
+ }
+
+ ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
+ mstc->port);
+ if (ret != connector_status_connected)
+ goto out;
+
+out:
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs
+nv50_mstc_help = {
+ .get_modes = nv50_mstc_get_modes,
+ .mode_valid = nv50_mstc_mode_valid,
+ .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
+ .atomic_check = nv50_mstc_atomic_check,
+ .detect_ctx = nv50_mstc_detect,
+};
+
+static void
+nv50_mstc_destroy(struct drm_connector *connector)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+
+ drm_connector_cleanup(&mstc->connector);
+ drm_dp_mst_put_port_malloc(mstc->port);
+
+ kfree(mstc);
+}
+
+static const struct drm_connector_funcs
+nv50_mstc = {
+ .reset = nouveau_conn_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = nv50_mstc_destroy,
+ .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
+ .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
+ .atomic_set_property = nouveau_conn_atomic_set_property,
+ .atomic_get_property = nouveau_conn_atomic_get_property,
+};
+
+static int
+nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
+ const char *path, struct nv50_mstc **pmstc)
+{
+ struct drm_device *dev = mstm->outp->base.base.dev;
+ struct drm_crtc *crtc;
+ struct nv50_mstc *mstc;
+ int ret;
+
+ if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
+ return -ENOMEM;
+ mstc->mstm = mstm;
+ mstc->port = port;
+
+ ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ kfree(*pmstc);
+ *pmstc = NULL;
+ return ret;
+ }
+
+ drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
+
+ mstc->connector.funcs->reset(&mstc->connector);
+ nouveau_conn_attach_properties(&mstc->connector);
+
+ drm_for_each_crtc(crtc, dev) {
+ if (!(mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
+ continue;
+
+ drm_connector_attach_encoder(&mstc->connector,
+ &nv50_head(crtc)->msto->encoder);
+ }
+
+ drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
+ drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
+ drm_connector_set_path_property(&mstc->connector, path);
+ drm_dp_mst_get_port_malloc(port);
+ return 0;
+}
+
+static void
+nv50_mstm_cleanup(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct nv50_mstm *mstm)
+{
+ struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
+ struct drm_encoder *encoder;
+
+ NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
+ drm_dp_check_act_status(&mstm->mgr);
+
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ if (mstc && mstc->mstm == mstm)
+ nv50_msto_cleanup(state, mst_state, &mstm->mgr, msto);
+ }
+ }
+
+ mstm->modified = false;
+}
+
+static void
+nv50_mstm_prepare(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct nv50_mstm *mstm)
+{
+ struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
+ struct drm_encoder *encoder;
+
+ NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
+
+ /* Disable payloads first */
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ if (mstc && mstc->mstm == mstm && msto->disabled)
+ nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
+ }
+ }
+
+ /* Add payloads for new heads, while also updating the start slots of any unmodified (but
+ * active) heads that may have had their VC slots shifted left after the previous step
+ */
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ if (mstc && mstc->mstm == mstm && !msto->disabled)
+ nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
+ }
+ }
+
+ if (mstm->disabled) {
+ if (!mstm->links)
+ nvif_outp_release(&mstm->outp->outp);
+ mstm->disabled = false;
+ }
+}
+
+static struct drm_connector *
+nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, const char *path)
+{
+ struct nv50_mstm *mstm = nv50_mstm(mgr);
+ struct nv50_mstc *mstc;
+ int ret;
+
+ ret = nv50_mstc_new(mstm, port, path, &mstc);
+ if (ret)
+ return NULL;
+
+ return &mstc->connector;
+}
+
+static const struct drm_dp_mst_topology_cbs
+nv50_mstm = {
+ .add_connector = nv50_mstm_add_connector,
+};
+
+bool
+nv50_mstm_service(struct nouveau_drm *drm,
+ struct nouveau_connector *nv_connector,
+ struct nv50_mstm *mstm)
+{
+ struct drm_dp_aux *aux = &nv_connector->aux;
+ bool handled = true, ret = true;
+ int rc;
+ u8 esi[8] = {};
+
+ while (handled) {
+ u8 ack[8] = {};
+
+ rc = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
+ if (rc != 8) {
+ ret = false;
+ break;
+ }
+
+ drm_dp_mst_hpd_irq_handle_event(&mstm->mgr, esi, ack, &handled);
+ if (!handled)
+ break;
+
+ rc = drm_dp_dpcd_writeb(aux, DP_SINK_COUNT_ESI + 1, ack[1]);
+
+ if (rc != 1) {
+ ret = false;
+ break;
+ }
+
+ drm_dp_mst_hpd_irq_send_new_request(&mstm->mgr);
+ }
+
+ if (!ret)
+ NV_DEBUG(drm, "Failed to handle ESI on %s: %d\n",
+ nv_connector->base.name, rc);
+
+ return ret;
+}
+
+void
+nv50_mstm_remove(struct nv50_mstm *mstm)
+{
+ mstm->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+}
+
+int
+nv50_mstm_detect(struct nouveau_encoder *outp)
+{
+ struct nv50_mstm *mstm = outp->dp.mstm;
+ struct drm_dp_aux *aux;
+ int ret;
+
+ if (!mstm || !mstm->can_mst)
+ return 0;
+
+ aux = mstm->mgr.aux;
+
+ /* Clear any leftover MST state we didn't set ourselves by first
+ * disabling MST if it was already enabled
+ */
+ ret = drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* And start enabling */
+ ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, true);
+ if (ret)
+ return ret;
+
+ mstm->is_mst = true;
+ return 1;
+}
+
+static void
+nv50_mstm_fini(struct nouveau_encoder *outp)
+{
+ struct nv50_mstm *mstm = outp->dp.mstm;
+
+ if (!mstm)
+ return;
+
+ /* Don't change the MST state of this connector until we've finished
+ * resuming, since we can't safely grab hpd_irq_lock in our resume
+ * path to protect mstm->is_mst without potentially deadlocking
+ */
+ mutex_lock(&outp->dp.hpd_irq_lock);
+ mstm->suspended = true;
+ mutex_unlock(&outp->dp.hpd_irq_lock);
+
+ if (mstm->is_mst)
+ drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
+}
+
+static void
+nv50_mstm_init(struct nouveau_encoder *outp, bool runtime)
+{
+ struct nv50_mstm *mstm = outp->dp.mstm;
+ int ret = 0;
+
+ if (!mstm)
+ return;
+
+ if (mstm->is_mst) {
+ ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime);
+ if (ret == -1)
+ nv50_mstm_remove(mstm);
+ }
+
+ mutex_lock(&outp->dp.hpd_irq_lock);
+ mstm->suspended = false;
+ mutex_unlock(&outp->dp.hpd_irq_lock);
+
+ if (ret == -1)
+ drm_kms_helper_hotplug_event(mstm->mgr.dev);
+}
+
+static void
+nv50_mstm_del(struct nv50_mstm **pmstm)
+{
+ struct nv50_mstm *mstm = *pmstm;
+ if (mstm) {
+ drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
+ kfree(*pmstm);
+ *pmstm = NULL;
+ }
+}
+
+static int
+nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
+ int conn_base_id, struct nv50_mstm **pmstm)
+{
+ const int max_payloads = hweight8(outp->dcb->heads);
+ struct drm_device *dev = outp->base.base.dev;
+ struct nv50_mstm *mstm;
+ int ret;
+
+ if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
+ return -ENOMEM;
+ mstm->outp = outp;
+ mstm->mgr.cbs = &nv50_mstm;
+
+ ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
+ max_payloads, conn_base_id);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/******************************************************************************
+ * SOR
+ *****************************************************************************/
+static void
+nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
+ struct nv50_head_atom *asyh, u8 proto, u8 depth)
+{
+ struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
+ struct nv50_core *core = disp->core;
+
+ if (!asyh) {
+ nv_encoder->ctrl &= ~BIT(head);
+ if (NVDEF_TEST(nv_encoder->ctrl, NV507D, SOR_SET_CONTROL, OWNER, ==, NONE))
+ nv_encoder->ctrl = 0;
+ } else {
+ nv_encoder->ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, PROTOCOL, proto);
+ nv_encoder->ctrl |= BIT(head);
+ asyh->or.depth = depth;
+ }
+
+ core->func->sor->ctrl(core, nv_encoder->outp.or.id, nv_encoder->ctrl, asyh);
+}
+
+/* TODO: Should we extend this to PWM-only backlights?
+ * As well, should we add a DRM helper for waiting for the backlight to acknowledge
+ * the panel backlight has been shut off? Intel doesn't seem to do this, and uses a
+ * fixed time delay from the vbios…
+ */
+static void
+nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+ struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder);
+#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+ struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
+ struct nouveau_backlight *backlight = nv_connector->backlight;
+#endif
+ struct drm_dp_aux *aux = &nv_connector->aux;
+ int ret;
+ u8 pwr;
+
+#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+ if (backlight && backlight->uses_dpcd) {
+ ret = drm_edp_backlight_disable(aux, &backlight->edp_info);
+ if (ret < 0)
+ NV_ERROR(drm, "Failed to disable backlight on [CONNECTOR:%d:%s]: %d\n",
+ nv_connector->base.base.id, nv_connector->base.name, ret);
+ }
+#endif
+
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
+ ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
+
+ if (ret == 0) {
+ pwr &= ~DP_SET_POWER_MASK;
+ pwr |= DP_SET_POWER_D3;
+ drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
+ }
+ }
+
+ nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
+ nv50_audio_disable(encoder, nv_crtc);
+ nvif_outp_release(&nv_encoder->outp);
+ nv_encoder->crtc = NULL;
+}
+
+static void
+nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
+ struct drm_display_mode *mode = &asyh->state.adjusted_mode;
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct nvif_outp *outp = &nv_encoder->outp;
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_connector *nv_connector;
+#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+ struct nouveau_backlight *backlight;
+#endif
+ struct nvbios *bios = &drm->vbios;
+ bool lvds_dual = false, lvds_8bpc = false, hda = false;
+ u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM;
+ u8 depth = NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT;
+
+ nv_connector = nv50_outp_get_new_connector(state, nv_encoder);
+ nv_encoder->crtc = &nv_crtc->base;
+
+ if ((disp->disp->object.oclass == GT214_DISP ||
+ disp->disp->object.oclass >= GF110_DISP) &&
+ drm_detect_monitor_audio(nv_connector->edid))
+ hda = true;
+
+ switch (nv_encoder->dcb->type) {
+ case DCB_OUTPUT_TMDS:
+ if (disp->disp->object.oclass == NV50_DISP ||
+ !drm_detect_hdmi_monitor(nv_connector->edid))
+ nvif_outp_acquire_tmds(outp, nv_crtc->index, false, 0, 0, 0, false);
+ else
+ nv50_hdmi_enable(encoder, nv_crtc, nv_connector, state, mode, hda);
+
+ if (nv_encoder->outp.or.link & 1) {
+ proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A;
+ /* Only enable dual-link if:
+ * - Need to (i.e. rate > 165MHz)
+ * - DCB says we can
+ * - Not an HDMI monitor, since there's no dual-link
+ * on HDMI.
+ */
+ if (mode->clock >= 165000 &&
+ nv_encoder->dcb->duallink_possible &&
+ !drm_detect_hdmi_monitor(nv_connector->edid))
+ proto = NV507D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS;
+ } else {
+ proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B;
+ }
+ break;
+ case DCB_OUTPUT_LVDS:
+ proto = NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM;
+
+ if (bios->fp_no_ddc) {
+ lvds_dual = bios->fp.dual_link;
+ lvds_8bpc = bios->fp.if_is_24bit;
+ } else {
+ if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+ if (((u8 *)nv_connector->edid)[121] == 2)
+ lvds_dual = true;
+ } else
+ if (mode->clock >= bios->fp.duallink_transition_clk) {
+ lvds_dual = true;
+ }
+
+ if (lvds_dual) {
+ if (bios->fp.strapless_is_24bit & 2)
+ lvds_8bpc = true;
+ } else {
+ if (bios->fp.strapless_is_24bit & 1)
+ lvds_8bpc = true;
+ }
+
+ if (asyh->or.bpc == 8)
+ lvds_8bpc = true;
+ }
+
+ nvif_outp_acquire_lvds(&nv_encoder->outp, lvds_dual, lvds_8bpc);
+ break;
+ case DCB_OUTPUT_DP:
+ nvif_outp_acquire_dp(&nv_encoder->outp, nv_encoder->dp.dpcd, 0, 0, hda, false);
+ depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
+
+ if (nv_encoder->outp.or.link & 1)
+ proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_A;
+ else
+ proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
+
+ nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
+
+#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+ backlight = nv_connector->backlight;
+ if (backlight && backlight->uses_dpcd)
+ drm_edp_backlight_enable(&nv_connector->aux, &backlight->edp_info,
+ (u16)backlight->dev->props.brightness);
+#endif
+
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ nv_encoder->update(nv_encoder, nv_crtc->index, asyh, proto, depth);
+}
+
+static const struct drm_encoder_helper_funcs
+nv50_sor_help = {
+ .atomic_check = nv50_outp_atomic_check,
+ .atomic_enable = nv50_sor_atomic_enable,
+ .atomic_disable = nv50_sor_atomic_disable,
+};
+
+static void
+nv50_sor_destroy(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ nvif_outp_dtor(&nv_encoder->outp);
+
+ nv50_mstm_del(&nv_encoder->dp.mstm);
+ drm_encoder_cleanup(encoder);
+
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+ mutex_destroy(&nv_encoder->dp.hpd_irq_lock);
+
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs
+nv50_sor_func = {
+ .destroy = nv50_sor_destroy,
+};
+
+bool nv50_has_mst(struct nouveau_drm *drm)
+{
+ struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
+ u32 data;
+ u8 ver, hdr, cnt, len;
+
+ data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len);
+ return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04);
+}
+
+static int
+nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+ struct nv50_disp *disp = nv50_disp(connector->dev);
+ int type, ret;
+
+ switch (dcbe->type) {
+ case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_DP:
+ default:
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ }
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->update = nv50_sor_update;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
+ "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_sor_help);
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+ nv50_outp_dump_caps(drm, nv_encoder);
+
+ if (dcbe->type == DCB_OUTPUT_DP) {
+ struct nvkm_i2c_aux *aux =
+ nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
+
+ mutex_init(&nv_encoder->dp.hpd_irq_lock);
+
+ if (aux) {
+ if (disp->disp->object.oclass < GF110_DISP) {
+ /* HW has no support for address-only
+ * transactions, so we're required to
+ * use custom I2C-over-AUX code.
+ */
+ nv_encoder->i2c = &aux->i2c;
+ } else {
+ nv_encoder->i2c = &nv_connector->aux.ddc;
+ }
+ nv_encoder->aux = aux;
+ }
+
+ if (nv_connector->type != DCB_CONNECTOR_eDP &&
+ nv50_has_mst(drm)) {
+ ret = nv50_mstm_new(nv_encoder, &nv_connector->aux,
+ 16, nv_connector->base.base.id,
+ &nv_encoder->dp.mstm);
+ if (ret)
+ return ret;
+ }
+ } else {
+ struct nvkm_i2c_bus *bus =
+ nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
+ if (bus)
+ nv_encoder->i2c = &bus->i2c;
+ }
+
+ return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
+}
+
+/******************************************************************************
+ * PIOR
+ *****************************************************************************/
+static int
+nv50_pior_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
+ if (ret)
+ return ret;
+ crtc_state->adjusted_mode.clock *= 2;
+ return 0;
+}
+
+static void
+nv50_pior_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_core *core = nv50_disp(encoder->dev)->core;
+ const u32 ctrl = NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, NONE);
+
+ core->func->pior->ctrl(core, nv_encoder->outp.or.id, ctrl, NULL);
+ nv_encoder->crtc = NULL;
+ nvif_outp_release(&nv_encoder->outp);
+}
+
+static void
+nv50_pior_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
+ struct nv50_core *core = nv50_disp(encoder->dev)->core;
+ u32 ctrl = 0;
+
+ switch (nv_crtc->index) {
+ case 0: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD0); break;
+ case 1: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD1); break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ switch (asyh->or.bpc) {
+ case 10: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444; break;
+ case 8: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444; break;
+ case 6: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444; break;
+ default: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT; break;
+ }
+
+ switch (nv_encoder->dcb->type) {
+ case DCB_OUTPUT_TMDS:
+ ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC);
+ nvif_outp_acquire_tmds(&nv_encoder->outp, false, false, 0, 0, 0, false);
+ break;
+ case DCB_OUTPUT_DP:
+ ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC);
+ nvif_outp_acquire_dp(&nv_encoder->outp, nv_encoder->dp.dpcd, 0, 0, false, false);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ core->func->pior->ctrl(core, nv_encoder->outp.or.id, ctrl, asyh);
+ nv_encoder->crtc = &nv_crtc->base;
+}
+
+static const struct drm_encoder_helper_funcs
+nv50_pior_help = {
+ .atomic_check = nv50_pior_atomic_check,
+ .atomic_enable = nv50_pior_atomic_enable,
+ .atomic_disable = nv50_pior_atomic_disable,
+};
+
+static void
+nv50_pior_destroy(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ nvif_outp_dtor(&nv_encoder->outp);
+
+ drm_encoder_cleanup(encoder);
+
+ mutex_destroy(&nv_encoder->dp.hpd_irq_lock);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs
+nv50_pior_func = {
+ .destroy = nv50_pior_destroy,
+};
+
+static int
+nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c_bus *bus = NULL;
+ struct nvkm_i2c_aux *aux = NULL;
+ struct i2c_adapter *ddc;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+ int type;
+
+ switch (dcbe->type) {
+ case DCB_OUTPUT_TMDS:
+ bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
+ ddc = bus ? &bus->i2c : NULL;
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ case DCB_OUTPUT_DP:
+ aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
+ ddc = aux ? &aux->i2c : NULL;
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->i2c = ddc;
+ nv_encoder->aux = aux;
+
+ mutex_init(&nv_encoder->dp.hpd_irq_lock);
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
+ "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_pior_help);
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+ nv50_outp_dump_caps(drm, nv_encoder);
+
+ return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
+}
+
+/******************************************************************************
+ * Atomic
+ *****************************************************************************/
+
+static void
+nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
+{
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
+ struct nouveau_drm *drm = nouveau_drm(state->dev);
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_core *core = disp->core;
+ struct nv50_mstm *mstm;
+ int i;
+
+ NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
+
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ mstm = nv50_mstm(mgr);
+ if (mstm->modified)
+ nv50_mstm_prepare(state, mst_state, mstm);
+ }
+
+ core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY);
+ core->func->update(core, interlock, true);
+ if (core->func->ntfy_wait_done(disp->sync, NV50_DISP_CORE_NTFY,
+ disp->core->chan.base.device))
+ NV_ERROR(drm, "core notifier timeout\n");
+
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ mstm = nv50_mstm(mgr);
+ if (mstm->modified)
+ nv50_mstm_cleanup(state, mst_state, mstm);
+ }
+}
+
+static void
+nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_plane *plane;
+ int i;
+
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (interlock[wndw->interlock.type] & wndw->interlock.data) {
+ if (wndw->func->update)
+ wndw->func->update(wndw, interlock);
+ }
+ }
+}
+
+static void
+nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_crtc_state *new_crtc_state, *old_crtc_state;
+ struct drm_crtc *crtc;
+ struct drm_plane_state *new_plane_state;
+ struct drm_plane *plane;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_atom *atom = nv50_atom(state);
+ struct nv50_core *core = disp->core;
+ struct nv50_outp_atom *outp, *outt;
+ u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {};
+ int i;
+ bool flushed = false;
+
+ NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
+ nv50_crc_atomic_stop_reporting(state);
+ drm_atomic_helper_wait_for_fences(dev, state, false);
+ drm_atomic_helper_wait_for_dependencies(state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_atomic_helper_calc_timestamping_constants(state);
+
+ if (atom->lock_core)
+ mutex_lock(&disp->mutex);
+
+ /* Disable head(s). */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
+ struct nv50_head *head = nv50_head(crtc);
+
+ NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
+ asyh->clr.mask, asyh->set.mask);
+
+ if (old_crtc_state->active && !new_crtc_state->active) {
+ pm_runtime_put_noidle(dev->dev);
+ drm_crtc_vblank_off(crtc);
+ }
+
+ if (asyh->clr.mask) {
+ nv50_head_flush_clr(head, asyh, atom->flush_disable);
+ interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
+ }
+ }
+
+ /* Disable plane(s). */
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+
+ NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
+ asyw->clr.mask, asyw->set.mask);
+ if (!asyw->clr.mask)
+ continue;
+
+ nv50_wndw_flush_clr(wndw, interlock, atom->flush_disable, asyw);
+ }
+
+ /* Disable output path(s). */
+ list_for_each_entry(outp, &atom->outp, head) {
+ const struct drm_encoder_helper_funcs *help;
+ struct drm_encoder *encoder;
+
+ encoder = outp->encoder;
+ help = encoder->helper_private;
+
+ NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
+ outp->clr.mask, outp->set.mask);
+
+ if (outp->clr.mask) {
+ help->atomic_disable(encoder, state);
+ interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
+ if (outp->flush_disable) {
+ nv50_disp_atomic_commit_wndw(state, interlock);
+ nv50_disp_atomic_commit_core(state, interlock);
+ memset(interlock, 0x00, sizeof(interlock));
+
+ flushed = true;
+ }
+ }
+ }
+
+ /* Flush disable. */
+ if (interlock[NV50_DISP_INTERLOCK_CORE]) {
+ if (atom->flush_disable) {
+ nv50_disp_atomic_commit_wndw(state, interlock);
+ nv50_disp_atomic_commit_core(state, interlock);
+ memset(interlock, 0x00, sizeof(interlock));
+
+ flushed = true;
+ }
+ }
+
+ if (flushed)
+ nv50_crc_atomic_release_notifier_contexts(state);
+ nv50_crc_atomic_init_notifier_contexts(state);
+
+ /* Update output path(s). */
+ list_for_each_entry_safe(outp, outt, &atom->outp, head) {
+ const struct drm_encoder_helper_funcs *help;
+ struct drm_encoder *encoder;
+
+ encoder = outp->encoder;
+ help = encoder->helper_private;
+
+ NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
+ outp->set.mask, outp->clr.mask);
+
+ if (outp->set.mask) {
+ help->atomic_enable(encoder, state);
+ interlock[NV50_DISP_INTERLOCK_CORE] = 1;
+ }
+
+ list_del(&outp->head);
+ kfree(outp);
+ }
+
+ /* Update head(s). */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
+ struct nv50_head *head = nv50_head(crtc);
+
+ NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
+ asyh->set.mask, asyh->clr.mask);
+
+ if (asyh->set.mask) {
+ nv50_head_flush_set(head, asyh);
+ interlock[NV50_DISP_INTERLOCK_CORE] = 1;
+ }
+
+ if (new_crtc_state->active) {
+ if (!old_crtc_state->active) {
+ drm_crtc_vblank_on(crtc);
+ pm_runtime_get_noresume(dev->dev);
+ }
+ if (new_crtc_state->event)
+ drm_crtc_vblank_get(crtc);
+ }
+ }
+
+ /* Update window->head assignment.
+ *
+ * This has to happen in an update that's not interlocked with
+ * any window channels to avoid hitting HW error checks.
+ *
+ *TODO: Proper handling of window ownership (Turing apparently
+ * supports non-fixed mappings).
+ */
+ if (core->assign_windows) {
+ core->func->wndw.owner(core);
+ nv50_disp_atomic_commit_core(state, interlock);
+ core->assign_windows = false;
+ interlock[NV50_DISP_INTERLOCK_CORE] = 0;
+ }
+
+ /* Finish updating head(s)...
+ *
+ * NVD is rather picky about both where window assignments can change,
+ * *and* about certain core and window channel states matching.
+ *
+ * The EFI GOP driver on newer GPUs configures window channels with a
+ * different output format to what we do, and the core channel update
+ * in the assign_windows case above would result in a state mismatch.
+ *
+ * Delay some of the head update until after that point to workaround
+ * the issue. This only affects the initial modeset.
+ *
+ * TODO: handle this better when adding flexible window mapping
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
+ struct nv50_head *head = nv50_head(crtc);
+
+ NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
+ asyh->set.mask, asyh->clr.mask);
+
+ if (asyh->set.mask) {
+ nv50_head_flush_set_wndw(head, asyh);
+ interlock[NV50_DISP_INTERLOCK_CORE] = 1;
+ }
+ }
+
+ /* Update plane(s). */
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+
+ NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
+ asyw->set.mask, asyw->clr.mask);
+ if ( !asyw->set.mask &&
+ (!asyw->clr.mask || atom->flush_disable))
+ continue;
+
+ nv50_wndw_flush_set(wndw, interlock, asyw);
+ }
+
+ /* Flush update. */
+ nv50_disp_atomic_commit_wndw(state, interlock);
+
+ if (interlock[NV50_DISP_INTERLOCK_CORE]) {
+ if (interlock[NV50_DISP_INTERLOCK_BASE] ||
+ interlock[NV50_DISP_INTERLOCK_OVLY] ||
+ interlock[NV50_DISP_INTERLOCK_WNDW] ||
+ !atom->state.legacy_cursor_update)
+ nv50_disp_atomic_commit_core(state, interlock);
+ else
+ disp->core->func->update(disp->core, interlock, false);
+ }
+
+ if (atom->lock_core)
+ mutex_unlock(&disp->mutex);
+
+ /* Wait for HW to signal completion. */
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ int ret = nv50_wndw_wait_armed(wndw, asyw);
+ if (ret)
+ NV_ERROR(drm, "%s: timeout\n", plane->name);
+ }
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->event) {
+ unsigned long flags;
+ /* Get correct count/ts if racing with vblank irq */
+ if (new_crtc_state->active)
+ drm_crtc_accurate_vblank_count(crtc);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ new_crtc_state->event = NULL;
+ if (new_crtc_state->active)
+ drm_crtc_vblank_put(crtc);
+ }
+ }
+
+ nv50_crc_atomic_start_reporting(state);
+ if (!flushed)
+ nv50_crc_atomic_release_notifier_contexts(state);
+
+ drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_helper_commit_cleanup_done(state);
+ drm_atomic_state_put(state);
+
+ /* Drop the RPM ref we got from nv50_disp_atomic_commit() */
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+}
+
+static void
+nv50_disp_atomic_commit_work(struct work_struct *work)
+{
+ struct drm_atomic_state *state =
+ container_of(work, typeof(*state), commit_work);
+ nv50_disp_atomic_commit_tail(state);
+}
+
+static int
+nv50_disp_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state, bool nonblock)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_plane *plane;
+ int ret, i;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+ }
+
+ ret = drm_atomic_helper_setup_commit(state, nonblock);
+ if (ret)
+ goto done;
+
+ INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ goto done;
+
+ if (!nonblock) {
+ ret = drm_atomic_helper_wait_for_fences(dev, state, true);
+ if (ret)
+ goto err_cleanup;
+ }
+
+ ret = drm_atomic_helper_swap_state(state, true);
+ if (ret)
+ goto err_cleanup;
+
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+
+ if (asyw->set.image)
+ nv50_wndw_ntfy_enable(wndw, asyw);
+ }
+
+ drm_atomic_state_get(state);
+
+ /*
+ * Grab another RPM ref for the commit tail, which will release the
+ * ref when it's finished
+ */
+ pm_runtime_get_noresume(dev->dev);
+
+ if (nonblock)
+ queue_work(system_unbound_wq, &state->commit_work);
+ else
+ nv50_disp_atomic_commit_tail(state);
+
+err_cleanup:
+ if (ret)
+ drm_atomic_helper_unprepare_planes(dev, state);
+done:
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+}
+
+static struct nv50_outp_atom *
+nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
+{
+ struct nv50_outp_atom *outp;
+
+ list_for_each_entry(outp, &atom->outp, head) {
+ if (outp->encoder == encoder)
+ return outp;
+ }
+
+ outp = kzalloc(sizeof(*outp), GFP_KERNEL);
+ if (!outp)
+ return ERR_PTR(-ENOMEM);
+
+ list_add(&outp->head, &atom->outp);
+ outp->encoder = encoder;
+ return outp;
+}
+
+static int
+nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
+ struct drm_connector_state *old_connector_state)
+{
+ struct drm_encoder *encoder = old_connector_state->best_encoder;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_crtc *crtc;
+ struct nv50_outp_atom *outp;
+
+ if (!(crtc = old_connector_state->crtc))
+ return 0;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
+ if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ outp = nv50_disp_outp_atomic_add(atom, encoder);
+ if (IS_ERR(outp))
+ return PTR_ERR(outp);
+
+ if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ outp->flush_disable = true;
+ atom->flush_disable = true;
+ }
+ outp->clr.ctrl = true;
+ atom->lock_core = true;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
+ struct drm_connector_state *connector_state)
+{
+ struct drm_encoder *encoder = connector_state->best_encoder;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
+ struct nv50_outp_atom *outp;
+
+ if (!(crtc = connector_state->crtc))
+ return 0;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
+ if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ outp = nv50_disp_outp_atomic_add(atom, encoder);
+ if (IS_ERR(outp))
+ return PTR_ERR(outp);
+
+ outp->set.ctrl = true;
+ atom->lock_core = true;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ struct nv50_core *core = nv50_disp(dev)->core;
+ struct drm_connector_state *old_connector_state, *new_connector_state;
+ struct drm_connector *connector;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
+ struct nv50_head *head;
+ struct nv50_head_atom *asyh;
+ int ret, i;
+
+ if (core->assign_windows && core->func->head->static_wndw_map) {
+ drm_for_each_crtc(crtc, dev) {
+ new_crtc_state = drm_atomic_get_crtc_state(state,
+ crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ head = nv50_head(crtc);
+ asyh = nv50_head_atom(new_crtc_state);
+ core->func->head->static_wndw_map(head, asyh);
+ }
+ }
+
+ /* We need to handle colour management on a per-plane basis. */
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->color_mgmt_changed) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
+
+ for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
+ ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
+ if (ret)
+ return ret;
+
+ ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
+ if (ret)
+ return ret;
+ }
+
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret)
+ return ret;
+
+ nv50_crc_atomic_check_outp(atom);
+
+ return 0;
+}
+
+static void
+nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ struct nv50_outp_atom *outp, *outt;
+
+ list_for_each_entry_safe(outp, outt, &atom->outp, head) {
+ list_del(&outp->head);
+ kfree(outp);
+ }
+
+ drm_atomic_state_default_clear(state);
+}
+
+static void
+nv50_disp_atomic_state_free(struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ drm_atomic_state_default_release(&atom->state);
+ kfree(atom);
+}
+
+static struct drm_atomic_state *
+nv50_disp_atomic_state_alloc(struct drm_device *dev)
+{
+ struct nv50_atom *atom;
+ if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
+ drm_atomic_state_init(dev, &atom->state) < 0) {
+ kfree(atom);
+ return NULL;
+ }
+ INIT_LIST_HEAD(&atom->outp);
+ return &atom->state;
+}
+
+static const struct drm_mode_config_funcs
+nv50_disp_func = {
+ .fb_create = nouveau_user_framebuffer_create,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
+ .atomic_check = nv50_disp_atomic_check,
+ .atomic_commit = nv50_disp_atomic_commit,
+ .atomic_state_alloc = nv50_disp_atomic_state_alloc,
+ .atomic_state_clear = nv50_disp_atomic_state_clear,
+ .atomic_state_free = nv50_disp_atomic_state_free,
+};
+
+static const struct drm_mode_config_helper_funcs
+nv50_disp_helper_func = {
+ .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+};
+
+/******************************************************************************
+ * Init
+ *****************************************************************************/
+
+static void
+nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST)
+ nv50_mstm_fini(nouveau_encoder(encoder));
+ }
+
+ if (!runtime)
+ cancel_work_sync(&drm->hpd_work);
+}
+
+static int
+nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
+{
+ struct nv50_core *core = nv50_disp(dev)->core;
+ struct drm_encoder *encoder;
+
+ if (resume || runtime)
+ core->func->init(core);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ struct nouveau_encoder *nv_encoder =
+ nouveau_encoder(encoder);
+ nv50_mstm_init(nv_encoder, runtime);
+ }
+ }
+
+ return 0;
+}
+
+static void
+nv50_display_destroy(struct drm_device *dev)
+{
+ struct nv50_disp *disp = nv50_disp(dev);
+
+ nv50_audio_component_fini(nouveau_drm(dev));
+
+ nvif_object_unmap(&disp->caps);
+ nvif_object_dtor(&disp->caps);
+ nv50_core_del(&disp->core);
+
+ nouveau_bo_unmap(disp->sync);
+ if (disp->sync)
+ nouveau_bo_unpin(disp->sync);
+ nouveau_bo_ref(NULL, &disp->sync);
+
+ nouveau_display(dev)->priv = NULL;
+ kfree(disp);
+}
+
+int
+nv50_display_create(struct drm_device *dev)
+{
+ struct nvif_device *device = &nouveau_drm(dev)->client.device;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct dcb_table *dcb = &drm->vbios.dcb;
+ struct drm_connector *connector, *tmp;
+ struct nv50_disp *disp;
+ struct dcb_output *dcbe;
+ int crtcs, ret, i;
+ bool has_mst = nv50_has_mst(drm);
+
+ disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+ if (!disp)
+ return -ENOMEM;
+
+ mutex_init(&disp->mutex);
+
+ nouveau_display(dev)->priv = disp;
+ nouveau_display(dev)->dtor = nv50_display_destroy;
+ nouveau_display(dev)->init = nv50_display_init;
+ nouveau_display(dev)->fini = nv50_display_fini;
+ disp->disp = &nouveau_display(dev)->disp;
+ dev->mode_config.funcs = &nv50_disp_func;
+ dev->mode_config.helper_private = &nv50_disp_helper_func;
+ dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true;
+ dev->mode_config.normalize_zpos = true;
+
+ /* small shared memory area we use for notifiers and semaphores */
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
+ NOUVEAU_GEM_DOMAIN_VRAM,
+ 0, 0x0000, NULL, NULL, &disp->sync);
+ if (!ret) {
+ ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true);
+ if (!ret) {
+ ret = nouveau_bo_map(disp->sync);
+ if (ret)
+ nouveau_bo_unpin(disp->sync);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &disp->sync);
+ }
+
+ if (ret)
+ goto out;
+
+ /* allocate master evo channel */
+ ret = nv50_core_new(drm, &disp->core);
+ if (ret)
+ goto out;
+
+ disp->core->func->init(disp->core);
+ if (disp->core->func->caps_init) {
+ ret = disp->core->func->caps_init(drm, disp);
+ if (ret)
+ goto out;
+ }
+
+ /* Assign the correct format modifiers */
+ if (disp->disp->object.oclass >= TU102_DISP)
+ nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
+ else
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+ nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
+ else
+ nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
+
+ /* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later
+ * generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The
+ * proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to
+ * small page allocations in prepare_fb(). When this is implemented, we should also force
+ * large pages (128K) for ovly fbs in order to fix Kepler ovlys.
+ * But until then, just limit cursors to 128x128 - which is small enough to avoid ever using
+ * large pages.
+ */
+ if (disp->disp->object.oclass >= GM107_DISP) {
+ dev->mode_config.cursor_width = 256;
+ dev->mode_config.cursor_height = 256;
+ } else if (disp->disp->object.oclass >= GK104_DISP) {
+ dev->mode_config.cursor_width = 128;
+ dev->mode_config.cursor_height = 128;
+ } else {
+ dev->mode_config.cursor_width = 64;
+ dev->mode_config.cursor_height = 64;
+ }
+
+ /* create crtc objects to represent the hw heads */
+ if (disp->disp->object.oclass >= GV100_DISP)
+ crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
+ else
+ if (disp->disp->object.oclass >= GF110_DISP)
+ crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
+ else
+ crtcs = 0x3;
+
+ for (i = 0; i < fls(crtcs); i++) {
+ struct nv50_head *head;
+
+ if (!(crtcs & (1 << i)))
+ continue;
+
+ head = nv50_head_create(dev, i);
+ if (IS_ERR(head)) {
+ ret = PTR_ERR(head);
+ goto out;
+ }
+
+ if (has_mst) {
+ head->msto = nv50_msto_new(dev, head, i);
+ if (IS_ERR(head->msto)) {
+ ret = PTR_ERR(head->msto);
+ head->msto = NULL;
+ goto out;
+ }
+
+ /*
+ * FIXME: This is a hack to workaround the following
+ * issues:
+ *
+ * https://gitlab.gnome.org/GNOME/mutter/issues/759
+ * https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277
+ *
+ * Once these issues are closed, this should be
+ * removed
+ */
+ head->msto->encoder.possible_crtcs = crtcs;
+ }
+ }
+
+ /* create encoder/connector objects based on VBIOS DCB table */
+ for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
+ connector = nouveau_connector_create(dev, dcbe);
+ if (IS_ERR(connector))
+ continue;
+
+ if (dcbe->location == DCB_LOC_ON_CHIP) {
+ switch (dcbe->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_LVDS:
+ case DCB_OUTPUT_DP:
+ ret = nv50_sor_create(connector, dcbe);
+ break;
+ case DCB_OUTPUT_ANALOG:
+ ret = nv50_dac_create(connector, dcbe);
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
+ } else {
+ ret = nv50_pior_create(connector, dcbe);
+ }
+
+ if (ret) {
+ NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
+ dcbe->location, dcbe->type,
+ ffs(dcbe->or) - 1, ret);
+ ret = 0;
+ }
+ }
+
+ /* cull any connectors we created that don't have an encoder */
+ list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
+ if (connector->possible_encoders)
+ continue;
+
+ NV_WARN(drm, "%s has no encoders, removing\n",
+ connector->name);
+ connector->funcs->destroy(connector);
+ }
+
+ /* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
+ dev->vblank_disable_immediate = true;
+
+ nv50_audio_component_init(drm);
+
+out:
+ if (ret)
+ nv50_display_destroy(dev);
+ return ret;
+}
+
+/******************************************************************************
+ * Format modifiers
+ *****************************************************************************/
+
+/****************************************************************
+ * Log2(block height) ----------------------------+ *
+ * Page Kind ----------------------------------+ | *
+ * Gob Height/Page Kind Generation ------+ | | *
+ * Sector layout -------+ | | | *
+ * Compression ------+ | | | | */
+const u64 disp50xx_modifiers[] = { /* | | | | | */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 5),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 5),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 5),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+/****************************************************************
+ * Log2(block height) ----------------------------+ *
+ * Page Kind ----------------------------------+ | *
+ * Gob Height/Page Kind Generation ------+ | | *
+ * Sector layout -------+ | | | *
+ * Compression ------+ | | | | */
+const u64 disp90xx_modifiers[] = { /* | | | | | */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 5),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
new file mode 100644
index 0000000000..9d66c9c726
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -0,0 +1,117 @@
+#ifndef __NV50_KMS_H__
+#define __NV50_KMS_H__
+#include <linux/workqueue.h>
+#include <nvif/mem.h>
+#include <nvif/push.h>
+
+#include "nouveau_display.h"
+
+struct nv50_msto;
+struct nouveau_encoder;
+
+struct nv50_disp {
+ struct nvif_disp *disp;
+ struct nv50_core *core;
+ struct nvif_object caps;
+
+#define NV50_DISP_SYNC(c, o) ((c) * 0x040 + (o))
+#define NV50_DISP_CORE_NTFY NV50_DISP_SYNC(0 , 0x00)
+#define NV50_DISP_WNDW_SEM0(c) NV50_DISP_SYNC(1 + (c), 0x00)
+#define NV50_DISP_WNDW_SEM1(c) NV50_DISP_SYNC(1 + (c), 0x10)
+#define NV50_DISP_WNDW_NTFY(c) NV50_DISP_SYNC(1 + (c), 0x20)
+#define NV50_DISP_BASE_SEM0(c) NV50_DISP_WNDW_SEM0(0 + (c))
+#define NV50_DISP_BASE_SEM1(c) NV50_DISP_WNDW_SEM1(0 + (c))
+#define NV50_DISP_BASE_NTFY(c) NV50_DISP_WNDW_NTFY(0 + (c))
+#define NV50_DISP_OVLY_SEM0(c) NV50_DISP_WNDW_SEM0(4 + (c))
+#define NV50_DISP_OVLY_SEM1(c) NV50_DISP_WNDW_SEM1(4 + (c))
+#define NV50_DISP_OVLY_NTFY(c) NV50_DISP_WNDW_NTFY(4 + (c))
+ struct nouveau_bo *sync;
+
+ struct mutex mutex;
+};
+
+static inline struct nv50_disp *
+nv50_disp(struct drm_device *dev)
+{
+ return nouveau_display(dev)->priv;
+}
+
+struct nv50_disp_interlock {
+ enum nv50_disp_interlock_type {
+ NV50_DISP_INTERLOCK_CORE = 0,
+ NV50_DISP_INTERLOCK_CURS,
+ NV50_DISP_INTERLOCK_BASE,
+ NV50_DISP_INTERLOCK_OVLY,
+ NV50_DISP_INTERLOCK_WNDW,
+ NV50_DISP_INTERLOCK_WIMM,
+ NV50_DISP_INTERLOCK__SIZE
+ } type;
+ u32 data;
+ u32 wimm;
+};
+
+void corec37d_ntfy_init(struct nouveau_bo *, u32);
+
+void head907d_olut_load(struct drm_color_lut *, int size, void __iomem *);
+
+struct nv50_chan {
+ struct nvif_object user;
+ struct nvif_device *device;
+};
+
+struct nv50_dmac {
+ struct nv50_chan base;
+
+ struct nvif_push _push;
+ struct nvif_push *push;
+ u32 *ptr;
+
+ struct nvif_object sync;
+ struct nvif_object vram;
+
+ /* Protects against concurrent pushbuf access to this channel, lock is
+ * grabbed by evo_wait (if the pushbuf reservation is successful) and
+ * dropped again by evo_kick. */
+ struct mutex lock;
+
+ u32 cur;
+ u32 put;
+ u32 max;
+};
+
+struct nv50_outp_atom {
+ struct list_head head;
+
+ struct drm_encoder *encoder;
+ bool flush_disable;
+
+ union nv50_outp_atom_mask {
+ struct {
+ bool ctrl:1;
+ };
+ u8 mask;
+ } set, clr;
+};
+
+int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+ const s32 *oclass, u8 head, void *data, u32 size,
+ s64 syncbuf, struct nv50_dmac *dmac);
+void nv50_dmac_destroy(struct nv50_dmac *);
+
+/*
+ * For normal encoders this just returns the encoder. For active MST encoders,
+ * this returns the real outp that's driving displays on the topology.
+ * Inactive MST encoders return NULL, since they would have no real outp to
+ * return anyway.
+ */
+struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder);
+
+bool nv50_has_mst(struct nouveau_drm *drm);
+
+u32 *evo_wait(struct nv50_dmac *, int nr);
+void evo_kick(u32 *, struct nv50_dmac *);
+
+extern const u64 disp50xx_modifiers[];
+extern const u64 disp90xx_modifiers[];
+extern const u64 wndwc57e_modifiers[];
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/handles.h b/drivers/gpu/drm/nouveau/dispnv50/handles.h
new file mode 100644
index 0000000000..a97a7bd292
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/handles.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NV50_KMS_HANDLES_H__
+#define __NV50_KMS_HANDLES_H__
+
+/*
+ * Various hard-coded object handles that nouveau uses. These are made-up by
+ * nouveau developers, not Nvidia. The only significance of the handles chosen
+ * is that they must all be unique.
+ */
+#define NV50_DISP_HANDLE_SYNCBUF 0xf0000000
+#define NV50_DISP_HANDLE_VRAM 0xf0000001
+
+#define NV50_DISP_HANDLE_WNDW_CTX(kind) (0xfb000000 | kind)
+#define NV50_DISP_HANDLE_CRC_CTX(head, i) (0xfc000000 | head->base.index << 1 | i)
+
+#endif /* !__NV50_KMS_HANDLES_H__ */
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
new file mode 100644
index 0000000000..5f490fbf18
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -0,0 +1,637 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "base.h"
+#include "core.h"
+#include "curs.h"
+#include "ovly.h"
+#include "crc.h"
+
+#include <nvif/class.h>
+#include <nvif/event.h>
+#include <nvif/cl0046.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_vblank.h>
+#include "nouveau_connector.h"
+
+void
+nv50_head_flush_clr(struct nv50_head *head,
+ struct nv50_head_atom *asyh, bool flush)
+{
+ union nv50_head_atom_mask clr = {
+ .mask = asyh->clr.mask & ~(flush ? 0 : asyh->set.mask),
+ };
+ if (clr.crc) nv50_crc_atomic_clr(head);
+ if (clr.olut) head->func->olut_clr(head);
+ if (clr.core) head->func->core_clr(head);
+ if (clr.curs) head->func->curs_clr(head);
+}
+
+void
+nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ if (asyh->set.curs ) head->func->curs_set(head, asyh);
+ if (asyh->set.olut ) {
+ asyh->olut.offset = nv50_lut_load(&head->olut,
+ asyh->olut.buffer,
+ asyh->state.gamma_lut,
+ asyh->olut.load);
+ head->func->olut_set(head, asyh);
+ }
+}
+
+void
+nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ if (asyh->set.view ) head->func->view (head, asyh);
+ if (asyh->set.mode ) head->func->mode (head, asyh);
+ if (asyh->set.core ) head->func->core_set(head, asyh);
+ if (asyh->set.base ) head->func->base (head, asyh);
+ if (asyh->set.ovly ) head->func->ovly (head, asyh);
+ if (asyh->set.dither ) head->func->dither (head, asyh);
+ if (asyh->set.procamp) head->func->procamp (head, asyh);
+ if (asyh->set.crc ) nv50_crc_atomic_set (head, asyh);
+ if (asyh->set.or ) head->func->or (head, asyh);
+}
+
+static void
+nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
+{
+ const int vib = asyc->procamp.color_vibrance - 100;
+ const int hue = asyc->procamp.vibrant_hue - 90;
+ const int adj = (vib > 0) ? 50 : 0;
+ asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
+ asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
+ asyh->set.procamp = true;
+}
+
+static void
+nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
+{
+ u32 mode = 0x00;
+
+ if (asyc->dither.mode) {
+ if (asyc->dither.mode == DITHERING_MODE_AUTO) {
+ if (asyh->base.depth > asyh->or.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = asyc->dither.mode;
+ }
+
+ if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
+ if (asyh->or.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= asyc->dither.depth;
+ }
+ }
+
+ asyh->dither.enable = NVVAL_GET(mode, NV507D, HEAD_SET_DITHER_CONTROL, ENABLE);
+ asyh->dither.bits = NVVAL_GET(mode, NV507D, HEAD_SET_DITHER_CONTROL, BITS);
+ asyh->dither.mode = NVVAL_GET(mode, NV507D, HEAD_SET_DITHER_CONTROL, MODE);
+ asyh->set.dither = true;
+}
+
+static void
+nv50_head_atomic_check_view(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
+{
+ struct drm_connector *connector = asyc->state.connector;
+ struct drm_display_mode *omode = &asyh->state.adjusted_mode;
+ struct drm_display_mode *umode = &asyh->state.mode;
+ int mode = asyc->scaler.mode;
+ struct edid *edid;
+ int umode_vdisplay, omode_hdisplay, omode_vdisplay;
+
+ if (connector->edid_blob_ptr)
+ edid = (struct edid *)connector->edid_blob_ptr->data;
+ else
+ edid = NULL;
+
+ if (!asyc->scaler.full) {
+ if (mode == DRM_MODE_SCALE_NONE)
+ omode = umode;
+ } else {
+ /* Non-EDID LVDS/eDP mode. */
+ mode = DRM_MODE_SCALE_FULLSCREEN;
+ }
+
+ /* For the user-specified mode, we must ignore doublescan and
+ * the like, but honor frame packing.
+ */
+ umode_vdisplay = umode->vdisplay;
+ if ((umode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
+ umode_vdisplay += umode->vtotal;
+ asyh->view.iW = umode->hdisplay;
+ asyh->view.iH = umode_vdisplay;
+ /* For the output mode, we can just use the stock helper. */
+ drm_mode_get_hv_timing(omode, &omode_hdisplay, &omode_vdisplay);
+ asyh->view.oW = omode_hdisplay;
+ asyh->view.oH = omode_vdisplay;
+
+ /* Add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
+ (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
+ drm_detect_hdmi_monitor(edid)))) {
+ u32 bX = asyc->scaler.underscan.hborder;
+ u32 bY = asyc->scaler.underscan.vborder;
+ u32 r = (asyh->view.oH << 19) / asyh->view.oW;
+
+ if (bX) {
+ asyh->view.oW -= (bX * 2);
+ if (bY) asyh->view.oH -= (bY * 2);
+ else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
+ } else {
+ asyh->view.oW -= (asyh->view.oW >> 4) + 32;
+ if (bY) asyh->view.oH -= (bY * 2);
+ else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
+ }
+ }
+
+ /* Handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation.
+ */
+ switch (mode) {
+ case DRM_MODE_SCALE_CENTER:
+ /* NOTE: This will cause scaling when the input is
+ * larger than the output.
+ */
+ asyh->view.oW = min(asyh->view.iW, asyh->view.oW);
+ asyh->view.oH = min(asyh->view.iH, asyh->view.oH);
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ /* Determine whether the scaling should be on width or on
+ * height. This is done by comparing the aspect ratios of the
+ * sizes. If the output AR is larger than input AR, that means
+ * we want to change the width (letterboxed on the
+ * left/right), otherwise on the height (letterboxed on the
+ * top/bottom).
+ *
+ * E.g. 4:3 (1.333) AR image displayed on a 16:10 (1.6) AR
+ * screen will have letterboxes on the left/right. However a
+ * 16:9 (1.777) AR image on that same screen will have
+ * letterboxes on the top/bottom.
+ *
+ * inputAR = iW / iH; outputAR = oW / oH
+ * outputAR > inputAR is equivalent to oW * iH > iW * oH
+ */
+ if (asyh->view.oW * asyh->view.iH > asyh->view.iW * asyh->view.oH) {
+ /* Recompute output width, i.e. left/right letterbox */
+ u32 r = (asyh->view.iW << 19) / asyh->view.iH;
+ asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
+ } else {
+ /* Recompute output height, i.e. top/bottom letterbox */
+ u32 r = (asyh->view.iH << 19) / asyh->view.iW;
+ asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
+ }
+ break;
+ default:
+ break;
+ }
+
+ asyh->set.view = true;
+}
+
+static int
+nv50_head_atomic_check_lut(struct nv50_head *head,
+ struct nv50_head_atom *asyh)
+{
+ struct drm_device *dev = head->base.base.dev;
+ struct drm_crtc *crtc = &head->base.base;
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct drm_property_blob *olut = asyh->state.gamma_lut,
+ *ilut = asyh->state.degamma_lut;
+ int size;
+
+ /* Ensure that the ilut is valid */
+ if (ilut) {
+ size = drm_color_lut_size(ilut);
+ if (!head->func->ilut_check(size)) {
+ NV_ATOMIC(drm, "Invalid size %d for degamma on [CRTC:%d:%s]\n",
+ size, crtc->base.id, crtc->name);
+ return -EINVAL;
+ }
+ }
+
+ /* Determine whether core output LUT should be enabled. */
+ if (olut) {
+ /* Check if any window(s) have stolen the core output LUT
+ * to as an input LUT for legacy gamma + I8 colour format.
+ */
+ if (asyh->wndw.olut) {
+ /* If any window has stolen the core output LUT,
+ * all of them must.
+ */
+ if (asyh->wndw.olut != asyh->wndw.mask)
+ return -EINVAL;
+ olut = NULL;
+ }
+ }
+
+ if (!olut) {
+ if (!head->func->olut_identity) {
+ asyh->olut.handle = 0;
+ return 0;
+ }
+ size = 0;
+ } else {
+ size = drm_color_lut_size(olut);
+ }
+
+ if (!head->func->olut(head, asyh, size)) {
+ NV_ATOMIC(drm, "Invalid size %d for gamma on [CRTC:%d:%s]\n",
+ size, crtc->base.id, crtc->name);
+ return -EINVAL;
+ }
+ asyh->olut.handle = disp->core->chan.vram.handle;
+ asyh->olut.buffer = !asyh->olut.buffer;
+
+ return 0;
+}
+
+static void
+nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct drm_display_mode *mode = &asyh->state.adjusted_mode;
+ struct nv50_head_mode *m = &asyh->mode;
+ u32 blankus;
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V | CRTC_STEREO_DOUBLE);
+
+ /*
+ * DRM modes are defined in terms of a repeating interval
+ * starting with the active display area. The hardware modes
+ * are defined in terms of a repeating interval starting one
+ * unit (pixel or line) into the sync pulse. So, add bias.
+ */
+
+ m->h.active = mode->crtc_htotal;
+ m->h.synce = mode->crtc_hsync_end - mode->crtc_hsync_start - 1;
+ m->h.blanke = mode->crtc_hblank_end - mode->crtc_hsync_start - 1;
+ m->h.blanks = m->h.blanke + mode->crtc_hdisplay;
+
+ m->v.active = mode->crtc_vtotal;
+ m->v.synce = mode->crtc_vsync_end - mode->crtc_vsync_start - 1;
+ m->v.blanke = mode->crtc_vblank_end - mode->crtc_vsync_start - 1;
+ m->v.blanks = m->v.blanke + mode->crtc_vdisplay;
+
+ /*XXX: Safe underestimate, even "0" works */
+ blankus = (m->v.active - mode->crtc_vdisplay - 2) * m->h.active;
+ blankus *= 1000;
+ blankus /= mode->crtc_clock;
+ m->v.blankus = blankus;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ m->v.blank2e = m->v.active + m->v.blanke;
+ m->v.blank2s = m->v.blank2e + mode->crtc_vdisplay;
+ m->v.active = (m->v.active * 2) + 1;
+ m->interlace = true;
+ } else {
+ m->v.blank2e = 0;
+ m->v.blank2s = 1;
+ m->interlace = false;
+ }
+ m->clock = mode->crtc_clock;
+
+ asyh->or.nhsync = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+ asyh->or.nvsync = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+ asyh->set.or = head->func->or != NULL;
+ asyh->set.mode = true;
+}
+
+static int
+nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+ struct nv50_head *head = nv50_head(crtc);
+ struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state);
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
+ struct nouveau_conn_atom *asyc = NULL;
+ struct drm_connector_state *conns;
+ struct drm_connector *conn;
+ int i, ret;
+ bool check_lut = asyh->state.color_mgmt_changed ||
+ memcmp(&armh->wndw, &asyh->wndw, sizeof(asyh->wndw));
+
+ NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
+
+ if (check_lut) {
+ ret = nv50_head_atomic_check_lut(head, asyh);
+ if (ret)
+ return ret;
+ }
+
+ if (asyh->state.active) {
+ for_each_new_connector_in_state(asyh->state.state, conn, conns, i) {
+ if (conns->crtc == crtc) {
+ asyc = nouveau_conn_atom(conns);
+ break;
+ }
+ }
+
+ if (armh->state.active) {
+ if (asyc) {
+ if (asyh->state.mode_changed)
+ asyc->set.scaler = true;
+ if (armh->base.depth != asyh->base.depth)
+ asyc->set.dither = true;
+ }
+ } else {
+ if (asyc)
+ asyc->set.mask = ~0;
+ asyh->set.mask = ~0;
+ asyh->set.or = head->func->or != NULL;
+ }
+
+ if (asyh->state.mode_changed || asyh->state.connectors_changed)
+ nv50_head_atomic_check_mode(head, asyh);
+
+ if (check_lut)
+ asyh->olut.visible = asyh->olut.handle != 0;
+
+ if (asyc) {
+ if (asyc->set.scaler)
+ nv50_head_atomic_check_view(armh, asyh, asyc);
+ if (asyc->set.dither)
+ nv50_head_atomic_check_dither(armh, asyh, asyc);
+ if (asyc->set.procamp)
+ nv50_head_atomic_check_procamp(armh, asyh, asyc);
+ }
+
+ if (head->func->core_calc) {
+ head->func->core_calc(head, asyh);
+ if (!asyh->core.visible)
+ asyh->olut.visible = false;
+ }
+
+ asyh->set.base = armh->base.cpp != asyh->base.cpp;
+ asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
+ } else {
+ asyh->olut.visible = false;
+ asyh->core.visible = false;
+ asyh->curs.visible = false;
+ asyh->base.cpp = 0;
+ asyh->ovly.cpp = 0;
+ }
+
+ if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
+ if (asyh->core.visible) {
+ if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
+ asyh->set.core = true;
+ } else
+ if (armh->core.visible) {
+ asyh->clr.core = true;
+ }
+
+ if (asyh->curs.visible) {
+ if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
+ asyh->set.curs = true;
+ } else
+ if (armh->curs.visible) {
+ asyh->clr.curs = true;
+ }
+
+ if (asyh->olut.visible) {
+ if (memcmp(&armh->olut, &asyh->olut, sizeof(asyh->olut)))
+ asyh->set.olut = true;
+ } else
+ if (armh->olut.visible) {
+ asyh->clr.olut = true;
+ }
+ } else {
+ asyh->clr.olut = armh->olut.visible;
+ asyh->clr.core = armh->core.visible;
+ asyh->clr.curs = armh->curs.visible;
+ asyh->set.olut = asyh->olut.visible;
+ asyh->set.core = asyh->core.visible;
+ asyh->set.curs = asyh->curs.visible;
+ }
+
+ ret = nv50_crc_atomic_check_head(head, asyh, armh);
+ if (ret)
+ return ret;
+
+ if (asyh->clr.mask || asyh->set.mask)
+ nv50_atom(asyh->state.state)->lock_core = true;
+ return 0;
+}
+
+static const struct drm_crtc_helper_funcs
+nv50_head_help = {
+ .atomic_check = nv50_head_atomic_check,
+ .get_scanout_position = nouveau_display_scanoutpos,
+};
+
+static void
+nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct nv50_head_atom *asyh = nv50_head_atom(state);
+ __drm_atomic_helper_crtc_destroy_state(&asyh->state);
+ kfree(asyh);
+}
+
+static struct drm_crtc_state *
+nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
+ struct nv50_head_atom *asyh;
+ if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
+ return NULL;
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
+ asyh->wndw = armh->wndw;
+ asyh->view = armh->view;
+ asyh->mode = armh->mode;
+ asyh->olut = armh->olut;
+ asyh->core = armh->core;
+ asyh->curs = armh->curs;
+ asyh->base = armh->base;
+ asyh->ovly = armh->ovly;
+ asyh->dither = armh->dither;
+ asyh->procamp = armh->procamp;
+ asyh->crc = armh->crc;
+ asyh->or = armh->or;
+ asyh->dp = armh->dp;
+ asyh->clr.mask = 0;
+ asyh->set.mask = 0;
+ return &asyh->state;
+}
+
+static void
+nv50_head_reset(struct drm_crtc *crtc)
+{
+ struct nv50_head_atom *asyh;
+
+ if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
+ return;
+
+ if (crtc->state)
+ nv50_head_atomic_destroy_state(crtc, crtc->state);
+
+ __drm_atomic_helper_crtc_reset(crtc, &asyh->state);
+}
+
+static int
+nv50_head_late_register(struct drm_crtc *crtc)
+{
+ return nv50_head_crc_late_register(nv50_head(crtc));
+}
+
+static void
+nv50_head_destroy(struct drm_crtc *crtc)
+{
+ struct nv50_head *head = nv50_head(crtc);
+
+ nvif_event_dtor(&head->base.vblank);
+ nvif_head_dtor(&head->base.head);
+ nv50_lut_fini(&head->olut);
+ drm_crtc_cleanup(crtc);
+ kfree(head);
+}
+
+static const struct drm_crtc_funcs
+nv50_head_func = {
+ .reset = nv50_head_reset,
+ .destroy = nv50_head_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = nv50_head_atomic_duplicate_state,
+ .atomic_destroy_state = nv50_head_atomic_destroy_state,
+ .enable_vblank = nouveau_display_vblank_enable,
+ .disable_vblank = nouveau_display_vblank_disable,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+ .late_register = nv50_head_late_register,
+};
+
+static const struct drm_crtc_funcs
+nvd9_head_func = {
+ .reset = nv50_head_reset,
+ .destroy = nv50_head_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = nv50_head_atomic_duplicate_state,
+ .atomic_destroy_state = nv50_head_atomic_destroy_state,
+ .enable_vblank = nouveau_display_vblank_enable,
+ .disable_vblank = nouveau_display_vblank_disable,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+ .verify_crc_source = nv50_crc_verify_source,
+ .get_crc_sources = nv50_crc_get_sources,
+ .set_crc_source = nv50_crc_set_source,
+ .late_register = nv50_head_late_register,
+};
+
+static int
+nv50_head_vblank_handler(struct nvif_event *event, void *repv, u32 repc)
+{
+ struct nouveau_crtc *nv_crtc = container_of(event, struct nouveau_crtc, vblank);
+
+ if (drm_crtc_handle_vblank(&nv_crtc->base))
+ nv50_crc_handle_vblank(nv50_head(&nv_crtc->base));
+
+ return NVIF_EVENT_KEEP;
+}
+
+struct nv50_head *
+nv50_head_create(struct drm_device *dev, int index)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_head *head;
+ struct nv50_wndw *base, *ovly, *curs;
+ struct nouveau_crtc *nv_crtc;
+ struct drm_crtc *crtc;
+ const struct drm_crtc_funcs *funcs;
+ int ret;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
+ return ERR_PTR(-ENOMEM);
+
+ head->func = disp->core->func->head;
+ head->base.index = index;
+
+ if (disp->disp->object.oclass < GF110_DISP)
+ funcs = &nv50_head_func;
+ else
+ funcs = &nvd9_head_func;
+
+ if (disp->disp->object.oclass < GV100_DISP) {
+ ret = nv50_base_new(drm, head->base.index, &base);
+ ret = nv50_ovly_new(drm, head->base.index, &ovly);
+ } else {
+ ret = nv50_wndw_new(drm, DRM_PLANE_TYPE_PRIMARY,
+ head->base.index * 2 + 0, &base);
+ ret = nv50_wndw_new(drm, DRM_PLANE_TYPE_OVERLAY,
+ head->base.index * 2 + 1, &ovly);
+ }
+ if (ret == 0)
+ ret = nv50_curs_new(drm, head->base.index, &curs);
+ if (ret) {
+ kfree(head);
+ return ERR_PTR(ret);
+ }
+
+ nv_crtc = &head->base;
+ crtc = &nv_crtc->base;
+ drm_crtc_init_with_planes(dev, crtc, &base->plane, &curs->plane,
+ funcs, "head-%d", head->base.index);
+ drm_crtc_helper_add(crtc, &nv50_head_help);
+ /* Keep the legacy gamma size at 256 to avoid compatibility issues */
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+ drm_crtc_enable_color_mgmt(crtc, base->func->ilut_size,
+ disp->disp->object.oclass >= GF110_DISP,
+ head->func->olut_size);
+
+ if (head->func->olut_set) {
+ ret = nv50_lut_init(disp, &drm->client.mmu, &head->olut);
+ if (ret) {
+ nv50_head_destroy(crtc);
+ return ERR_PTR(ret);
+ }
+ }
+
+ ret = nvif_head_ctor(disp->disp, head->base.base.name, head->base.index, &head->base.head);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = nvif_head_vblank_event_ctor(&head->base.head, "kmsVbl", nv50_head_vblank_handler,
+ false, &nv_crtc->vblank);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return head;
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
new file mode 100644
index 0000000000..41c8788dfb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -0,0 +1,100 @@
+#ifndef __NV50_KMS_HEAD_H__
+#define __NV50_KMS_HEAD_H__
+#define nv50_head(c) container_of((c), struct nv50_head, base.base)
+#include <linux/workqueue.h>
+
+#include "disp.h"
+#include "atom.h"
+#include "crc.h"
+#include "lut.h"
+
+#include "nouveau_crtc.h"
+#include "nouveau_encoder.h"
+
+struct nv50_head {
+ const struct nv50_head_func *func;
+ struct nouveau_crtc base;
+ struct nv50_crc crc;
+ struct nv50_lut olut;
+ struct nv50_msto *msto;
+};
+
+struct nv50_head *nv50_head_create(struct drm_device *, int index);
+void nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh);
+void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh);
+void nv50_head_flush_clr(struct nv50_head *head,
+ struct nv50_head_atom *asyh, bool flush);
+
+struct nv50_head_func {
+ int (*view)(struct nv50_head *, struct nv50_head_atom *);
+ int (*mode)(struct nv50_head *, struct nv50_head_atom *);
+ bool (*olut)(struct nv50_head *, struct nv50_head_atom *, int);
+ bool (*ilut_check)(int size);
+ bool olut_identity;
+ int olut_size;
+ int (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
+ int (*olut_clr)(struct nv50_head *);
+ void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
+ int (*core_set)(struct nv50_head *, struct nv50_head_atom *);
+ int (*core_clr)(struct nv50_head *);
+ int (*curs_layout)(struct nv50_head *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+ int (*curs_format)(struct nv50_head *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+ int (*curs_set)(struct nv50_head *, struct nv50_head_atom *);
+ int (*curs_clr)(struct nv50_head *);
+ int (*base)(struct nv50_head *, struct nv50_head_atom *);
+ int (*ovly)(struct nv50_head *, struct nv50_head_atom *);
+ int (*dither)(struct nv50_head *, struct nv50_head_atom *);
+ int (*procamp)(struct nv50_head *, struct nv50_head_atom *);
+ int (*or)(struct nv50_head *, struct nv50_head_atom *);
+ void (*static_wndw_map)(struct nv50_head *, struct nv50_head_atom *);
+};
+
+extern const struct nv50_head_func head507d;
+int head507d_view(struct nv50_head *, struct nv50_head_atom *);
+int head507d_mode(struct nv50_head *, struct nv50_head_atom *);
+bool head507d_olut(struct nv50_head *, struct nv50_head_atom *, int);
+void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *);
+int head507d_core_clr(struct nv50_head *);
+int head507d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+int head507d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+int head507d_base(struct nv50_head *, struct nv50_head_atom *);
+int head507d_ovly(struct nv50_head *, struct nv50_head_atom *);
+int head507d_dither(struct nv50_head *, struct nv50_head_atom *);
+int head507d_procamp(struct nv50_head *, struct nv50_head_atom *);
+
+extern const struct nv50_head_func head827d;
+
+extern const struct nv50_head_func head907d;
+int head907d_view(struct nv50_head *, struct nv50_head_atom *);
+int head907d_mode(struct nv50_head *, struct nv50_head_atom *);
+bool head907d_olut(struct nv50_head *, struct nv50_head_atom *, int);
+bool head907d_ilut_check(int size);
+int head907d_olut_set(struct nv50_head *, struct nv50_head_atom *);
+int head907d_olut_clr(struct nv50_head *);
+int head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
+int head907d_core_clr(struct nv50_head *);
+int head907d_curs_set(struct nv50_head *, struct nv50_head_atom *);
+int head907d_curs_clr(struct nv50_head *);
+int head907d_ovly(struct nv50_head *, struct nv50_head_atom *);
+int head907d_procamp(struct nv50_head *, struct nv50_head_atom *);
+int head907d_or(struct nv50_head *, struct nv50_head_atom *);
+
+extern const struct nv50_head_func head917d;
+int head917d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+
+extern const struct nv50_head_func headc37d;
+int headc37d_view(struct nv50_head *, struct nv50_head_atom *);
+int headc37d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+int headc37d_curs_set(struct nv50_head *, struct nv50_head_atom *);
+int headc37d_curs_clr(struct nv50_head *);
+int headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
+void headc37d_static_wndw_map(struct nv50_head *, struct nv50_head_atom *);
+
+extern const struct nv50_head_func headc57d;
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
new file mode 100644
index 0000000000..0edd4e520c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "core.h"
+
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl507d.h>
+
+int
+head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_PROCAMP(i),
+ NVDEF(NV507D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+ NVDEF(NV507D, HEAD_SET_PROCAMP, CHROMA_LPF, AUTO) |
+ NVVAL(NV507D, HEAD_SET_PROCAMP, SAT_COS, asyh->procamp.sat.cos) |
+ NVVAL(NV507D, HEAD_SET_PROCAMP, SAT_SINE, asyh->procamp.sat.sin) |
+ NVDEF(NV507D, HEAD_SET_PROCAMP, TRANSITION, HARD));
+ return 0;
+}
+
+int
+head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_DITHER_CONTROL(i),
+ NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+ NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+ NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+ NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+ return 0;
+}
+
+int
+head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ u32 bounds = 0;
+ int ret;
+
+ if (asyh->ovly.cpp) {
+ switch (asyh->ovly.cpp) {
+ case 4: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+ case 2: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
+ } else {
+ bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16);
+ }
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
+ return 0;
+}
+
+int
+head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ u32 bounds = 0;
+ int ret;
+
+ if (asyh->base.cpp) {
+ switch (asyh->base.cpp) {
+ case 8: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
+ case 4: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+ case 2: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
+ case 1: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_8); break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
+ }
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
+ return 0;
+}
+
+static int
+head507d_curs_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+ NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8) |
+ NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, SIZE, W64_H64));
+ return 0;
+}
+
+static int
+head507d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 3)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND) |
+ NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, SUB_OWNER, NONE),
+
+ HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+ return 0;
+}
+
+int
+head507d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ switch (asyw->image.format) {
+ case 0xcf: asyh->curs.format = NV507D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int
+head507d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ switch (asyw->image.w) {
+ case 32: asyh->curs.layout = NV507D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32; break;
+ case 64: asyh->curs.layout = NV507D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64; break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int
+head507d_core_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_CONTEXT_DMA_ISO(i), 0x00000000);
+ return 0;
+}
+
+static int
+head507d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 9)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_OFFSET(i, 0),
+ NVVAL(NV507D, HEAD_SET_OFFSET, ORIGIN, asyh->core.offset >> 8));
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_SIZE(i),
+ NVVAL(NV507D, HEAD_SET_SIZE, WIDTH, asyh->core.w) |
+ NVVAL(NV507D, HEAD_SET_SIZE, HEIGHT, asyh->core.h),
+
+ HEAD_SET_STORAGE(i),
+ NVVAL(NV507D, HEAD_SET_STORAGE, BLOCK_HEIGHT, asyh->core.blockh) |
+ NVVAL(NV507D, HEAD_SET_STORAGE, PITCH, asyh->core.pitch >> 8) |
+ NVVAL(NV507D, HEAD_SET_STORAGE, PITCH, asyh->core.blocks) |
+ NVVAL(NV507D, HEAD_SET_STORAGE, MEMORY_LAYOUT, asyh->core.layout),
+
+ HEAD_SET_PARAMS(i),
+ NVVAL(NV507D, HEAD_SET_PARAMS, FORMAT, asyh->core.format) |
+ NVVAL(NV507D, HEAD_SET_PARAMS, KIND, asyh->core.kind) |
+ NVDEF(NV507D, HEAD_SET_PARAMS, PART_STRIDE, PARTSTRIDE_256),
+
+ HEAD_SET_CONTEXT_DMA_ISO(i),
+ NVVAL(NV507D, HEAD_SET_CONTEXT_DMA_ISO, HANDLE, asyh->core.handle));
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_VIEWPORT_POINT_IN(i, 0),
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_POINT_IN, X, asyh->core.x) |
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_POINT_IN, Y, asyh->core.y));
+
+ /* EVO will complain with INVALID_STATE if we have an
+ * active cursor and (re)specify HeadSetContextDmaIso
+ * without also updating HeadSetOffsetCursor.
+ */
+ asyh->set.curs = asyh->curs.visible;
+ asyh->set.olut = asyh->olut.handle != 0;
+ return 0;
+}
+
+void
+head507d_core_calc(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_disp *disp = nv50_disp(head->base.base.dev);
+ if ((asyh->core.visible = (asyh->base.cpp != 0))) {
+ asyh->core.x = asyh->base.x;
+ asyh->core.y = asyh->base.y;
+ asyh->core.w = asyh->base.w;
+ asyh->core.h = asyh->base.h;
+ } else
+ if ((asyh->core.visible = (asyh->ovly.cpp != 0)) ||
+ (asyh->core.visible = asyh->curs.visible)) {
+ /*XXX: We need to either find some way of having the
+ * primary base layer appear black, while still
+ * being able to display the other layers, or we
+ * need to allocate a dummy black surface here.
+ */
+ asyh->core.x = 0;
+ asyh->core.y = 0;
+ asyh->core.w = asyh->state.mode.hdisplay;
+ asyh->core.h = asyh->state.mode.vdisplay;
+ }
+ asyh->core.handle = disp->core->chan.vram.handle;
+ asyh->core.offset = 0;
+ asyh->core.format = NV507D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8;
+ asyh->core.kind = NV507D_HEAD_SET_PARAMS_KIND_KIND_PITCH;
+ asyh->core.layout = NV507D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH;
+ asyh->core.blockh = NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB;
+ asyh->core.blocks = 0;
+ asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
+}
+
+static int
+head507d_olut_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_BASE_LUT_LO(i),
+ NVDEF(NV507D, HEAD_SET_BASE_LUT_LO, ENABLE, DISABLE));
+ return 0;
+}
+
+static int
+head507d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 3)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_BASE_LUT_LO(i),
+ NVDEF(NV507D, HEAD_SET_BASE_LUT_LO, ENABLE, ENABLE) |
+ NVVAL(NV507D, HEAD_SET_BASE_LUT_LO, MODE, asyh->olut.mode) |
+ NVVAL(NV507D, HEAD_SET_BASE_LUT_LO, ORIGIN, 0),
+
+ HEAD_SET_BASE_LUT_HI(i),
+ NVVAL(NV507D, HEAD_SET_BASE_LUT_HI, ORIGIN, asyh->olut.offset >> 8));
+ return 0;
+}
+
+static void
+head507d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ for (; size--; in++, mem += 8) {
+ writew(drm_color_lut_extract(in-> red, 11) << 3, mem + 0);
+ writew(drm_color_lut_extract(in->green, 11) << 3, mem + 2);
+ writew(drm_color_lut_extract(in-> blue, 11) << 3, mem + 4);
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
+bool
+head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
+{
+ if (size != 256)
+ return false;
+
+ if (asyh->base.cpp == 1)
+ asyh->olut.mode = NV507D_HEAD_SET_BASE_LUT_LO_MODE_LORES;
+ else
+ asyh->olut.mode = NV507D_HEAD_SET_BASE_LUT_LO_MODE_HIRES;
+
+ asyh->olut.load = head507d_olut_load;
+ return true;
+}
+
+int
+head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nv50_head_mode *m = &asyh->mode;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 13)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_PIXEL_CLOCK(i),
+ NVVAL(NV507D, HEAD_SET_PIXEL_CLOCK, FREQUENCY, m->clock) |
+ NVDEF(NV507D, HEAD_SET_PIXEL_CLOCK, MODE, CLK_CUSTOM) |
+ NVDEF(NV507D, HEAD_SET_PIXEL_CLOCK, ADJ1000DIV1001, FALSE) |
+ NVDEF(NV507D, HEAD_SET_PIXEL_CLOCK, NOT_DRIVER, FALSE),
+
+ HEAD_SET_CONTROL(i),
+ NVVAL(NV507D, HEAD_SET_CONTROL, STRUCTURE, m->interlace));
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_OVERSCAN_COLOR(i),
+ NVVAL(NV507D, HEAD_SET_OVERSCAN_COLOR, RED, 0) |
+ NVVAL(NV507D, HEAD_SET_OVERSCAN_COLOR, GRN, 0) |
+ NVVAL(NV507D, HEAD_SET_OVERSCAN_COLOR, BLU, 0),
+
+ HEAD_SET_RASTER_SIZE(i),
+ NVVAL(NV507D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+ NVVAL(NV507D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+ HEAD_SET_RASTER_SYNC_END(i),
+ NVVAL(NV507D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+ NVVAL(NV507D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+ HEAD_SET_RASTER_BLANK_END(i),
+ NVVAL(NV507D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+ NVVAL(NV507D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+ HEAD_SET_RASTER_BLANK_START(i),
+ NVVAL(NV507D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+ NVVAL(NV507D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks),
+
+ HEAD_SET_RASTER_VERT_BLANK2(i),
+ NVVAL(NV507D, HEAD_SET_RASTER_VERT_BLANK2, YSTART, m->v.blank2s) |
+ NVVAL(NV507D, HEAD_SET_RASTER_VERT_BLANK2, YEND, m->v.blank2e),
+
+ HEAD_SET_RASTER_VERT_BLANK_DMI(i),
+ NVVAL(NV507D, HEAD_SET_RASTER_VERT_BLANK_DMI, DURATION, m->v.blankus));
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_DEFAULT_BASE_COLOR(i),
+ NVVAL(NV507D, HEAD_SET_DEFAULT_BASE_COLOR, RED, 0) |
+ NVVAL(NV507D, HEAD_SET_DEFAULT_BASE_COLOR, GREEN, 0) |
+ NVVAL(NV507D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0));
+ return 0;
+}
+
+int
+head507d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 7)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER(i),
+ NVDEF(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, VERTICAL_TAPS, TAPS_1) |
+ NVDEF(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, HORIZONTAL_TAPS, TAPS_1) |
+ NVVAL(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, HRESPONSE_BIAS, 0) |
+ NVVAL(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, VRESPONSE_BIAS, 0));
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_VIEWPORT_SIZE_IN(i),
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH),
+
+ HEAD_SET_VIEWPORT_SIZE_OUT_MIN(i),
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, WIDTH, asyh->view.oW) |
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, HEIGHT, asyh->view.oH));
+ return 0;
+}
+
+const struct nv50_head_func
+head507d = {
+ .view = head507d_view,
+ .mode = head507d_mode,
+ .olut = head507d_olut,
+ .olut_size = 256,
+ .olut_set = head507d_olut_set,
+ .olut_clr = head507d_olut_clr,
+ .core_calc = head507d_core_calc,
+ .core_set = head507d_core_set,
+ .core_clr = head507d_core_clr,
+ .curs_layout = head507d_curs_layout,
+ .curs_format = head507d_curs_format,
+ .curs_set = head507d_curs_set,
+ .curs_clr = head507d_curs_clr,
+ .base = head507d_base,
+ .ovly = head507d_ovly,
+ .dither = head507d_dither,
+ .procamp = head507d_procamp,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
new file mode 100644
index 0000000000..194d1771c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "core.h"
+
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl827d.h>
+
+static int
+head827d_curs_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+ NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8) |
+ NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, SIZE, W64_H64));
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_CURSOR(i), 0x00000000);
+ return 0;
+}
+
+static int
+head827d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND) |
+ NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, SUB_OWNER, NONE),
+
+ HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
+ return 0;
+}
+
+static int
+head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 9)))
+ return ret;
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_OFFSET(i, 0),
+ NVVAL(NV827D, HEAD_SET_OFFSET, ORIGIN, asyh->core.offset >> 8));
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_SIZE(i),
+ NVVAL(NV827D, HEAD_SET_SIZE, WIDTH, asyh->core.w) |
+ NVVAL(NV827D, HEAD_SET_SIZE, HEIGHT, asyh->core.h),
+
+ HEAD_SET_STORAGE(i),
+ NVVAL(NV827D, HEAD_SET_STORAGE, BLOCK_HEIGHT, asyh->core.blockh) |
+ NVVAL(NV827D, HEAD_SET_STORAGE, PITCH, asyh->core.pitch >> 8) |
+ NVVAL(NV827D, HEAD_SET_STORAGE, PITCH, asyh->core.blocks) |
+ NVVAL(NV827D, HEAD_SET_STORAGE, MEMORY_LAYOUT, asyh->core.layout),
+
+ HEAD_SET_PARAMS(i),
+ NVVAL(NV827D, HEAD_SET_PARAMS, FORMAT, asyh->core.format) |
+ NVDEF(NV827D, HEAD_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+ NVDEF(NV827D, HEAD_SET_PARAMS, GAMMA, LINEAR),
+
+ HEAD_SET_CONTEXT_DMAS_ISO(i, 0),
+ NVVAL(NV827D, HEAD_SET_CONTEXT_DMAS_ISO, HANDLE, asyh->core.handle));
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_VIEWPORT_POINT_IN(i, 0),
+ NVVAL(NV827D, HEAD_SET_VIEWPORT_POINT_IN, X, asyh->core.x) |
+ NVVAL(NV827D, HEAD_SET_VIEWPORT_POINT_IN, Y, asyh->core.y));
+ return 0;
+}
+
+static int
+head827d_olut_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_BASE_LUT_LO(i),
+ NVDEF(NV827D, HEAD_SET_BASE_LUT_LO, ENABLE, DISABLE));
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_LUT(i), 0x00000000);
+ return 0;
+}
+
+static int
+head827d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_BASE_LUT_LO(i),
+ NVDEF(NV827D, HEAD_SET_BASE_LUT_LO, ENABLE, ENABLE) |
+ NVVAL(NV827D, HEAD_SET_BASE_LUT_LO, MODE, asyh->olut.mode) |
+ NVVAL(NV827D, HEAD_SET_BASE_LUT_LO, ORIGIN, 0),
+
+ HEAD_SET_BASE_LUT_HI(i),
+ NVVAL(NV827D, HEAD_SET_BASE_LUT_HI, ORIGIN, asyh->olut.offset >> 8));
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_LUT(i), asyh->olut.handle);
+ return 0;
+}
+
+const struct nv50_head_func
+head827d = {
+ .view = head507d_view,
+ .mode = head507d_mode,
+ .olut = head507d_olut,
+ .olut_size = 256,
+ .olut_set = head827d_olut_set,
+ .olut_clr = head827d_olut_clr,
+ .core_calc = head507d_core_calc,
+ .core_set = head827d_core_set,
+ .core_clr = head507d_core_clr,
+ .curs_layout = head507d_curs_layout,
+ .curs_format = head507d_curs_format,
+ .curs_set = head827d_curs_set,
+ .curs_clr = head827d_curs_clr,
+ .base = head507d_base,
+ .ovly = head507d_ovly,
+ .dither = head507d_dither,
+ .procamp = head507d_procamp,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
new file mode 100644
index 0000000000..18fe4c1e2d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <drm/drm_connector.h>
+#include <drm/drm_mode_config.h>
+#include <drm/drm_vblank.h>
+#include "nouveau_drv.h"
+#include "nouveau_bios.h"
+#include "nouveau_connector.h"
+#include "head.h"
+#include "core.h"
+#include "crc.h"
+
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907d.h>
+
+int
+head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 3)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
+ NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, asyh->or.depth),
+
+ HEAD_SET_CONTROL(i), 0x31ec6000 | head->base.index << 25 |
+ NVVAL(NV907D, HEAD_SET_CONTROL, STRUCTURE, asyh->mode.interlace));
+ return 0;
+}
+
+int
+head907d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_PROCAMP(i),
+ NVDEF(NV907D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+ NVDEF(NV907D, HEAD_SET_PROCAMP, CHROMA_LPF, AUTO) |
+ NVVAL(NV907D, HEAD_SET_PROCAMP, SAT_COS, asyh->procamp.sat.cos) |
+ NVVAL(NV907D, HEAD_SET_PROCAMP, SAT_SINE, asyh->procamp.sat.sin) |
+ NVDEF(NV907D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA) |
+ NVDEF(NV907D, HEAD_SET_PROCAMP, RANGE_COMPRESSION, DISABLE));
+ return 0;
+}
+
+static int
+head907d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_DITHER_CONTROL(i),
+ NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+ NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+ NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+ NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+ return 0;
+}
+
+int
+head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ u32 bounds = 0;
+ int ret;
+
+ if (asyh->ovly.cpp) {
+ switch (asyh->ovly.cpp) {
+ case 8: bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
+ case 4: bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+ case 2: bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, USABLE, TRUE);
+ } else {
+ bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16);
+ }
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS(i), bounds);
+ return 0;
+}
+
+static int
+head907d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ u32 bounds = 0;
+ int ret;
+
+ if (asyh->base.cpp) {
+ switch (asyh->base.cpp) {
+ case 8: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
+ case 4: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+ case 2: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
+ case 1: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_8); break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
+ }
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
+ return 0;
+}
+
+int
+head907d_curs_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+ NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8) |
+ NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, SIZE, W64_H64));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CURSOR(i), 0x00000000);
+ return 0;
+}
+
+int
+head907d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
+
+ HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
+ return 0;
+}
+
+int
+head907d_core_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMAS_ISO(i), 0x00000000);
+ return 0;
+}
+
+int
+head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 9)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_OFFSET(i),
+ NVVAL(NV907D, HEAD_SET_OFFSET, ORIGIN, asyh->core.offset >> 8));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_SIZE(i),
+ NVVAL(NV907D, HEAD_SET_SIZE, WIDTH, asyh->core.w) |
+ NVVAL(NV907D, HEAD_SET_SIZE, HEIGHT, asyh->core.h),
+
+ HEAD_SET_STORAGE(i),
+ NVVAL(NV907D, HEAD_SET_STORAGE, BLOCK_HEIGHT, asyh->core.blockh) |
+ NVVAL(NV907D, HEAD_SET_STORAGE, PITCH, asyh->core.pitch >> 8) |
+ NVVAL(NV907D, HEAD_SET_STORAGE, PITCH, asyh->core.blocks) |
+ NVVAL(NV907D, HEAD_SET_STORAGE, MEMORY_LAYOUT, asyh->core.layout),
+
+ HEAD_SET_PARAMS(i),
+ NVVAL(NV907D, HEAD_SET_PARAMS, FORMAT, asyh->core.format) |
+ NVDEF(NV907D, HEAD_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+ NVDEF(NV907D, HEAD_SET_PARAMS, GAMMA, LINEAR),
+
+ HEAD_SET_CONTEXT_DMAS_ISO(i),
+ NVVAL(NV907D, HEAD_SET_CONTEXT_DMAS_ISO, HANDLE, asyh->core.handle));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_VIEWPORT_POINT_IN(i),
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_POINT_IN, X, asyh->core.x) |
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_POINT_IN, Y, asyh->core.y));
+ return 0;
+}
+
+int
+head907d_olut_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_OUTPUT_LUT_LO(i),
+ NVDEF(NV907D, HEAD_SET_OUTPUT_LUT_LO, ENABLE, DISABLE));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_LUT(i), 0x00000000);
+ return 0;
+}
+
+int
+head907d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_OUTPUT_LUT_LO(i),
+ NVDEF(NV907D, HEAD_SET_OUTPUT_LUT_LO, ENABLE, ENABLE) |
+ NVVAL(NV907D, HEAD_SET_OUTPUT_LUT_LO, MODE, asyh->olut.mode) |
+ NVDEF(NV907D, HEAD_SET_OUTPUT_LUT_LO, NEVER_YIELD_TO_BASE, DISABLE),
+
+ HEAD_SET_OUTPUT_LUT_HI(i),
+ NVVAL(NV907D, HEAD_SET_OUTPUT_LUT_HI, ORIGIN, asyh->olut.offset >> 8));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_LUT(i), asyh->olut.handle);
+ return 0;
+}
+
+void
+head907d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ for (; size--; in++, mem += 8) {
+ writew(drm_color_lut_extract(in-> red, 14) + 0x6000, mem + 0);
+ writew(drm_color_lut_extract(in->green, 14) + 0x6000, mem + 2);
+ writew(drm_color_lut_extract(in-> blue, 14) + 0x6000, mem + 4);
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
+bool
+head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
+{
+ if (size != 256 && size != 1024)
+ return false;
+
+ if (size == 1024)
+ asyh->olut.mode = NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE;
+ else
+ asyh->olut.mode = NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE;
+
+ asyh->olut.load = head907d_olut_load;
+ return true;
+}
+
+bool head907d_ilut_check(int size)
+{
+ return size == 256 || size == 1024;
+}
+
+int
+head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nv50_head_mode *m = &asyh->mode;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 13)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_OVERSCAN_COLOR(i),
+ NVVAL(NV907D, HEAD_SET_OVERSCAN_COLOR, RED, 0) |
+ NVVAL(NV907D, HEAD_SET_OVERSCAN_COLOR, GRN, 0) |
+ NVVAL(NV907D, HEAD_SET_OVERSCAN_COLOR, BLU, 0),
+
+ HEAD_SET_RASTER_SIZE(i),
+ NVVAL(NV907D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+ NVVAL(NV907D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+ HEAD_SET_RASTER_SYNC_END(i),
+ NVVAL(NV907D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+ NVVAL(NV907D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+ HEAD_SET_RASTER_BLANK_END(i),
+ NVVAL(NV907D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+ NVVAL(NV907D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+ HEAD_SET_RASTER_BLANK_START(i),
+ NVVAL(NV907D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+ NVVAL(NV907D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks),
+
+ HEAD_SET_RASTER_VERT_BLANK2(i),
+ NVVAL(NV907D, HEAD_SET_RASTER_VERT_BLANK2, YSTART, m->v.blank2s) |
+ NVVAL(NV907D, HEAD_SET_RASTER_VERT_BLANK2, YEND, m->v.blank2e));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_DEFAULT_BASE_COLOR(i),
+ NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, RED, 0) |
+ NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, GREEN, 0) |
+ NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
+ NVVAL(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000) |
+ NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, ADJ1000DIV1001, FALSE),
+
+ HEAD_SET_PIXEL_CLOCK_CONFIGURATION(i),
+ NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_CONFIGURATION, MODE, CLK_CUSTOM) |
+ NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_CONFIGURATION, NOT_DRIVER, FALSE) |
+ NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_CONFIGURATION, ENABLE_HOPPING, FALSE),
+
+ HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
+ NVVAL(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000) |
+ NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, ADJ1000DIV1001, FALSE));
+ return 0;
+}
+
+int
+head907d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 8)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER(i),
+ NVDEF(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, VERTICAL_TAPS, TAPS_1) |
+ NVDEF(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, HORIZONTAL_TAPS, TAPS_1) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, HRESPONSE_BIAS, 0) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, VRESPONSE_BIAS, 0));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_VIEWPORT_SIZE_IN(i),
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH),
+
+ HEAD_SET_VIEWPORT_SIZE_OUT_MIN(i),
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, WIDTH, asyh->view.oW) |
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, HEIGHT, asyh->view.oH),
+
+ HEAD_SET_VIEWPORT_SIZE_OUT_MAX(i),
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MAX, WIDTH, asyh->view.oW) |
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MAX, HEIGHT, asyh->view.oH));
+ return 0;
+}
+
+const struct nv50_head_func
+head907d = {
+ .view = head907d_view,
+ .mode = head907d_mode,
+ .olut = head907d_olut,
+ .ilut_check = head907d_ilut_check,
+ .olut_size = 1024,
+ .olut_set = head907d_olut_set,
+ .olut_clr = head907d_olut_clr,
+ .core_calc = head507d_core_calc,
+ .core_set = head907d_core_set,
+ .core_clr = head907d_core_clr,
+ .curs_layout = head507d_curs_layout,
+ .curs_format = head507d_curs_format,
+ .curs_set = head907d_curs_set,
+ .curs_clr = head907d_curs_clr,
+ .base = head907d_base,
+ .ovly = head907d_ovly,
+ .dither = head907d_dither,
+ .procamp = head907d_procamp,
+ .or = head907d_or,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
new file mode 100644
index 0000000000..4ce47b55f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "core.h"
+
+#include "nvif/push.h"
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl917d.h>
+
+static int
+head917d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_DITHER_CONTROL(i),
+ NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+ NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+ NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+ NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+ return 0;
+}
+
+static int
+head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ u32 bounds = 0;
+ int ret;
+
+ if (asyh->base.cpp) {
+ switch (asyh->base.cpp) {
+ case 8: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
+ case 4: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+ case 2: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
+ case 1: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_8); break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
+ bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, BASE_LUT, USAGE_1025);
+ }
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
+ return 0;
+}
+
+static int
+head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
+
+ HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
+ return 0;
+}
+
+int
+head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ switch (asyw->state.fb->width) {
+ case 32: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32; break;
+ case 64: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64; break;
+ case 128: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128; break;
+ case 256: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256; break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+const struct nv50_head_func
+head917d = {
+ .view = head907d_view,
+ .mode = head907d_mode,
+ .olut = head907d_olut,
+ .ilut_check = head907d_ilut_check,
+ .olut_size = 1024,
+ .olut_set = head907d_olut_set,
+ .olut_clr = head907d_olut_clr,
+ .core_calc = head507d_core_calc,
+ .core_set = head907d_core_set,
+ .core_clr = head907d_core_clr,
+ .curs_layout = head917d_curs_layout,
+ .curs_format = head507d_curs_format,
+ .curs_set = head917d_curs_set,
+ .curs_clr = head907d_curs_clr,
+ .base = head917d_base,
+ .ovly = head907d_ovly,
+ .dither = head917d_dither,
+ .procamp = head907d_procamp,
+ .or = head907d_or,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
new file mode 100644
index 0000000000..a4a3b78ea4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "atom.h"
+#include "core.h"
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc37d.h>
+
+static int
+headc37d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ u8 depth;
+ int ret;
+
+ /*XXX: This is a dirty hack until OR depth handling is
+ * improved later for deep colour etc.
+ */
+ switch (asyh->or.depth) {
+ case 6: depth = 5; break;
+ case 5: depth = 4; break;
+ case 2: depth = 1; break;
+ case 0: depth = 4; break;
+ default:
+ depth = asyh->or.depth;
+ WARN_ON(1);
+ break;
+ }
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
+ NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, depth) |
+ NVDEF(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, COLOR_SPACE_OVERRIDE, DISABLE));
+ return 0;
+}
+
+static int
+headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_PROCAMP(i),
+ NVDEF(NVC37D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+ NVDEF(NVC37D, HEAD_SET_PROCAMP, CHROMA_LPF, DISABLE) |
+ NVVAL(NVC37D, HEAD_SET_PROCAMP, SAT_COS, asyh->procamp.sat.cos) |
+ NVVAL(NVC37D, HEAD_SET_PROCAMP, SAT_SINE, asyh->procamp.sat.sin) |
+ NVDEF(NVC37D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA) |
+ NVDEF(NVC37D, HEAD_SET_PROCAMP, RANGE_COMPRESSION, DISABLE) |
+ NVDEF(NVC37D, HEAD_SET_PROCAMP, BLACK_LEVEL, GRAPHICS));
+ return 0;
+}
+
+int
+headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_DITHER_CONTROL(i),
+ NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+ NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+ NVDEF(NVC37D, HEAD_SET_DITHER_CONTROL, OFFSET_ENABLE, DISABLE) |
+ NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+ NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+ return 0;
+}
+
+int
+headc37d_curs_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+ NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8));
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CURSOR(i, 0), 0x00000000);
+ return 0;
+}
+
+int
+headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 7)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, DE_GAMMA, NONE),
+
+ HEAD_SET_CONTROL_CURSOR_COMPOSITION(i),
+ NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, K1, 0xff) |
+ NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, CURSOR_COLOR_FACTOR_SELECT,
+ K1) |
+ NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, VIEWPORT_COLOR_FACTOR_SELECT,
+ NEG_K1_TIMES_SRC) |
+ NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, MODE, BLEND));
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CURSOR(i, 0), asyh->curs.handle);
+ PUSH_MTHD(push, NVC37D, HEAD_SET_OFFSET_CURSOR(i, 0), asyh->curs.offset >> 8);
+ return 0;
+}
+
+int
+headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ asyh->curs.format = asyw->image.format;
+ return 0;
+}
+
+static int
+headc37d_olut_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(i), 0x00000000);
+ return 0;
+}
+
+static int
+headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT(i),
+ NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT, SIZE, asyh->olut.size) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT, RANGE, asyh->olut.range) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT, OUTPUT_MODE, asyh->olut.output_mode),
+
+ HEAD_SET_OFFSET_OUTPUT_LUT(i), asyh->olut.offset >> 8,
+ HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(i), asyh->olut.handle);
+ return 0;
+}
+
+static bool
+headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
+{
+ if (size != 256 && size != 1024)
+ return false;
+
+ asyh->olut.size = size == 1024 ? NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_1025 :
+ NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_257;
+ asyh->olut.range = NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_UNITY;
+ asyh->olut.output_mode = NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE_INTERPOLATE;
+ asyh->olut.load = head907d_olut_load;
+ return true;
+}
+
+static int
+headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nv50_head_mode *m = &asyh->mode;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 15)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_RASTER_SIZE(i),
+ NVVAL(NVC37D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+ NVVAL(NVC37D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+ HEAD_SET_RASTER_SYNC_END(i),
+ NVVAL(NVC37D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+ NVVAL(NVC37D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+ HEAD_SET_RASTER_BLANK_END(i),
+ NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+ NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+ HEAD_SET_RASTER_BLANK_START(i),
+ NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+ NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks));
+
+ //XXX:
+ PUSH_NVSQ(push, NVC37D, 0x2074 + (i * 0x400), m->v.blank2e << 16 | m->v.blank2s);
+ PUSH_NVSQ(push, NVC37D, 0x2008 + (i * 0x400), m->interlace);
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
+ NVVAL(NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000));
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
+ NVVAL(NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000));
+
+ /*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
+ PUSH_MTHD(push, NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS(i),
+ NVDEF(NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS, CURSOR, USAGE_W256_H256) |
+ NVDEF(NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS, OUTPUT_LUT, USAGE_1025) |
+ NVDEF(NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS, UPSCALING_ALLOWED, TRUE));
+ return 0;
+}
+
+int
+headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_VIEWPORT_SIZE_IN(i),
+ NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
+ NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
+ NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
+ NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH));
+ return 0;
+}
+
+void
+headc37d_static_wndw_map(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ int i, end;
+
+ for (i = head->base.index * 2, end = i + 2; i < end; i++)
+ asyh->wndw.owned |= BIT(i);
+}
+
+const struct nv50_head_func
+headc37d = {
+ .view = headc37d_view,
+ .mode = headc37d_mode,
+ .olut = headc37d_olut,
+ .ilut_check = head907d_ilut_check,
+ .olut_size = 1024,
+ .olut_set = headc37d_olut_set,
+ .olut_clr = headc37d_olut_clr,
+ .curs_layout = head917d_curs_layout,
+ .curs_format = headc37d_curs_format,
+ .curs_set = headc37d_curs_set,
+ .curs_clr = headc37d_curs_clr,
+ .dither = headc37d_dither,
+ .procamp = headc37d_procamp,
+ .or = headc37d_or,
+ .static_wndw_map = headc37d_static_wndw_map,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
new file mode 100644
index 0000000000..543f08ceaa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "atom.h"
+#include "core.h"
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57d.h>
+
+static int
+headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ u8 depth;
+ int ret;
+
+ /*XXX: This is a dirty hack until OR depth handling is
+ * improved later for deep colour etc.
+ */
+ switch (asyh->or.depth) {
+ case 6: depth = 5; break;
+ case 5: depth = 4; break;
+ case 2: depth = 1; break;
+ case 0: depth = 4; break;
+ default:
+ depth = asyh->or.depth;
+ WARN_ON(1);
+ break;
+ }
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
+ NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
+ NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
+ NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
+ NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, depth) |
+ NVDEF(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, COLOR_SPACE_OVERRIDE, DISABLE) |
+ NVDEF(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, EXT_PACKET_WIN, NONE));
+ return 0;
+}
+
+static int
+headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ //TODO:
+ PUSH_MTHD(push, NVC57D, HEAD_SET_PROCAMP(i),
+ NVDEF(NVC57D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+ NVDEF(NVC57D, HEAD_SET_PROCAMP, CHROMA_LPF, DISABLE) |
+ NVDEF(NVC57D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA));
+ return 0;
+}
+
+static int
+headc57d_olut_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57D, HEAD_SET_CONTEXT_DMA_OLUT(i), 0x00000000);
+ return 0;
+}
+
+static int
+headc57d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57D, HEAD_SET_OLUT_CONTROL(i),
+ NVVAL(NVC57D, HEAD_SET_OLUT_CONTROL, INTERPOLATE, asyh->olut.output_mode) |
+ NVDEF(NVC57D, HEAD_SET_OLUT_CONTROL, MIRROR, DISABLE) |
+ NVVAL(NVC57D, HEAD_SET_OLUT_CONTROL, MODE, asyh->olut.mode) |
+ NVVAL(NVC57D, HEAD_SET_OLUT_CONTROL, SIZE, asyh->olut.size),
+
+ HEAD_SET_OLUT_FP_NORM_SCALE(i), 0xffffffff,
+ HEAD_SET_CONTEXT_DMA_OLUT(i), asyh->olut.handle,
+ HEAD_SET_OFFSET_OLUT(i), asyh->olut.offset >> 8);
+ return 0;
+}
+
+static void
+headc57d_olut_load_8(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ memset_io(mem, 0x00, 0x20); /* VSS header. */
+ mem += 0x20;
+
+ while (size--) {
+ u16 r = drm_color_lut_extract(in-> red + 0, 16);
+ u16 g = drm_color_lut_extract(in->green + 0, 16);
+ u16 b = drm_color_lut_extract(in-> blue + 0, 16);
+ u16 ri = 0, gi = 0, bi = 0, i;
+
+ if (in++, size) {
+ ri = (drm_color_lut_extract(in-> red, 16) - r) / 4;
+ gi = (drm_color_lut_extract(in->green, 16) - g) / 4;
+ bi = (drm_color_lut_extract(in-> blue, 16) - b) / 4;
+ }
+
+ for (i = 0; i < 4; i++, mem += 8) {
+ writew(r + ri * i, mem + 0);
+ writew(g + gi * i, mem + 2);
+ writew(b + bi * i, mem + 4);
+ }
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
+static void
+headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ memset_io(mem, 0x00, 0x20); /* VSS header. */
+ mem += 0x20;
+
+ for (; size--; in++, mem += 0x08) {
+ writew(drm_color_lut_extract(in-> red, 16), mem + 0);
+ writew(drm_color_lut_extract(in->green, 16), mem + 2);
+ writew(drm_color_lut_extract(in-> blue, 16), mem + 4);
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
+static bool
+headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
+{
+ if (size != 0 && size != 256 && size != 1024)
+ return false;
+
+ asyh->olut.mode = NVC57D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10;
+ asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */;
+ asyh->olut.output_mode = NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE;
+ if (size == 256)
+ asyh->olut.load = headc57d_olut_load_8;
+ else
+ asyh->olut.load = headc57d_olut_load;
+ return true;
+}
+
+static int
+headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nv50_head_mode *m = &asyh->mode;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 15)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57D, HEAD_SET_RASTER_SIZE(i),
+ NVVAL(NVC57D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+ NVVAL(NVC57D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+ HEAD_SET_RASTER_SYNC_END(i),
+ NVVAL(NVC57D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+ NVVAL(NVC57D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+ HEAD_SET_RASTER_BLANK_END(i),
+ NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+ NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+ HEAD_SET_RASTER_BLANK_START(i),
+ NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+ NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks));
+
+ //XXX:
+ PUSH_NVSQ(push, NVC57D, 0x2074 + (i * 0x400), m->v.blank2e << 16 | m->v.blank2s);
+ PUSH_NVSQ(push, NVC57D, 0x2008 + (i * 0x400), m->interlace);
+
+ PUSH_MTHD(push, NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
+ NVVAL(NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000));
+
+ PUSH_MTHD(push, NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
+ NVVAL(NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000));
+
+ /*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
+ PUSH_MTHD(push, NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS(i),
+ NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, CURSOR, USAGE_W256_H256) |
+ NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, OLUT_ALLOWED, TRUE) |
+ NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, OUTPUT_SCALER_TAPS, TAPS_2) |
+ NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, UPSCALING_ALLOWED, TRUE));
+ return 0;
+}
+
+const struct nv50_head_func
+headc57d = {
+ .view = headc37d_view,
+ .mode = headc57d_mode,
+ .olut = headc57d_olut,
+ .ilut_check = head907d_ilut_check,
+ .olut_identity = true,
+ .olut_size = 1024,
+ .olut_set = headc57d_olut_set,
+ .olut_clr = headc57d_olut_clr,
+ .curs_layout = head917d_curs_layout,
+ .curs_format = headc37d_curs_format,
+ .curs_set = headc37d_curs_set,
+ .curs_clr = headc37d_curs_clr,
+ .dither = headc37d_dither,
+ .procamp = headc57d_procamp,
+ .or = headc57d_or,
+ /* TODO: flexible window mappings */
+ .static_wndw_map = headc37d_static_wndw_map,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.c b/drivers/gpu/drm/nouveau/dispnv50/lut.c
new file mode 100644
index 0000000000..6b2ad1e6ea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "lut.h"
+#include "disp.h"
+
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_property.h>
+
+#include <nvif/class.h>
+
+u32
+nv50_lut_load(struct nv50_lut *lut, int buffer, struct drm_property_blob *blob,
+ void (*load)(struct drm_color_lut *, int, void __iomem *))
+{
+ struct drm_color_lut *in = blob ? blob->data : NULL;
+ void __iomem *mem = lut->mem[buffer].object.map.ptr;
+ const u32 addr = lut->mem[buffer].addr;
+ int i;
+
+ if (!in) {
+ in = kvmalloc_array(1024, sizeof(*in), GFP_KERNEL);
+ if (!WARN_ON(!in)) {
+ for (i = 0; i < 1024; i++) {
+ in[i].red =
+ in[i].green =
+ in[i].blue = (i << 16) >> 10;
+ }
+ load(in, 1024, mem);
+ kvfree(in);
+ }
+ } else {
+ load(in, drm_color_lut_size(blob), mem);
+ }
+
+ return addr;
+}
+
+void
+nv50_lut_fini(struct nv50_lut *lut)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(lut->mem); i++)
+ nvif_mem_dtor(&lut->mem[i]);
+}
+
+int
+nv50_lut_init(struct nv50_disp *disp, struct nvif_mmu *mmu,
+ struct nv50_lut *lut)
+{
+ const u32 size = disp->disp->object.oclass < GF110_DISP ? 257 : 1025;
+ int i;
+ for (i = 0; i < ARRAY_SIZE(lut->mem); i++) {
+ int ret = nvif_mem_ctor_map(mmu, "kmsLut", NVIF_MEM_VRAM,
+ size * 8, &lut->mem[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.h b/drivers/gpu/drm/nouveau/dispnv50/lut.h
new file mode 100644
index 0000000000..b3b9040cfe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.h
@@ -0,0 +1,16 @@
+#ifndef __NV50_KMS_LUT_H__
+#define __NV50_KMS_LUT_H__
+#include <nvif/mem.h>
+struct drm_property_blob;
+struct drm_color_lut;
+struct nv50_disp;
+
+struct nv50_lut {
+ struct nvif_mem mem[2];
+};
+
+int nv50_lut_init(struct nv50_disp *, struct nvif_mmu *, struct nv50_lut *);
+void nv50_lut_fini(struct nv50_lut *);
+u32 nv50_lut_load(struct nv50_lut *, int buffer, struct drm_property_blob *,
+ void (*)(struct drm_color_lut *, int size, void __iomem *));
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/oimm.c b/drivers/gpu/drm/nouveau/dispnv50/oimm.c
new file mode 100644
index 0000000000..2a2841d344
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/oimm.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "oimm.h"
+
+#include <nvif/class.h>
+
+int
+nv50_oimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
+{
+ static const struct {
+ s32 oclass;
+ int version;
+ int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
+ } oimms[] = {
+ { GK104_DISP_OVERLAY, 0, oimm507b_init },
+ { GF110_DISP_OVERLAY, 0, oimm507b_init },
+ { GT214_DISP_OVERLAY, 0, oimm507b_init },
+ { G82_DISP_OVERLAY, 0, oimm507b_init },
+ { NV50_DISP_OVERLAY, 0, oimm507b_init },
+ {}
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ int cid;
+
+ cid = nvif_mclass(&disp->disp->object, oimms);
+ if (cid < 0) {
+ NV_ERROR(drm, "No supported overlay immediate class\n");
+ return cid;
+ }
+
+ return oimms[cid].init(drm, oimms[cid].oclass, wndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/oimm.h b/drivers/gpu/drm/nouveau/dispnv50/oimm.h
new file mode 100644
index 0000000000..6fa51f101e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/oimm.h
@@ -0,0 +1,8 @@
+#ifndef __NV50_KMS_OIMM_H__
+#define __NV50_KMS_OIMM_H__
+#include "wndw.h"
+
+int oimm507b_init(struct nouveau_drm *, s32, struct nv50_wndw *);
+
+int nv50_oimm_init(struct nouveau_drm *, struct nv50_wndw *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c b/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
new file mode 100644
index 0000000000..752318cf3c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "oimm.h"
+
+#include <nvif/if0014.h>
+
+static int
+oimm507b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
+ s32 oclass, struct nv50_wndw *wndw)
+{
+ struct nvif_disp_chan_v0 args = {
+ .id = wndw->id,
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ int ret;
+
+ ret = nvif_object_ctor(&disp->disp->object, "kmsOvim", 0, oclass,
+ &args, sizeof(args), &wndw->wimm.base.user);
+ if (ret) {
+ NV_ERROR(drm, "oimm%04x allocation failed: %d\n", oclass, ret);
+ return ret;
+ }
+
+ nvif_object_map(&wndw->wimm.base.user, NULL, 0);
+ wndw->immd = func;
+ return 0;
+}
+
+int
+oimm507b_init(struct nouveau_drm *drm, s32 oclass, struct nv50_wndw *wndw)
+{
+ return oimm507b_init_(&curs507a, drm, oclass, wndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly.c b/drivers/gpu/drm/nouveau/dispnv50/ovly.c
new file mode 100644
index 0000000000..90c246d476
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ovly.h"
+#include "oimm.h"
+
+#include <nvif/class.h>
+
+int
+nv50_ovly_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
+{
+ static const struct {
+ s32 oclass;
+ int version;
+ int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+ } ovlys[] = {
+ { GK104_DISP_OVERLAY_CONTROL_DMA, 0, ovly917e_new },
+ { GF110_DISP_OVERLAY_CONTROL_DMA, 0, ovly907e_new },
+ { GT214_DISP_OVERLAY_CHANNEL_DMA, 0, ovly827e_new },
+ { GT200_DISP_OVERLAY_CHANNEL_DMA, 0, ovly827e_new },
+ { G82_DISP_OVERLAY_CHANNEL_DMA, 0, ovly827e_new },
+ { NV50_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
+ {}
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ int cid, ret;
+
+ cid = nvif_mclass(&disp->disp->object, ovlys);
+ if (cid < 0) {
+ NV_ERROR(drm, "No supported overlay class\n");
+ return cid;
+ }
+
+ ret = ovlys[cid].new(drm, head, ovlys[cid].oclass, pwndw);
+ if (ret)
+ return ret;
+
+ return nv50_oimm_init(drm, *pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly.h b/drivers/gpu/drm/nouveau/dispnv50/ovly.h
new file mode 100644
index 0000000000..6ae1fbe12c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly.h
@@ -0,0 +1,26 @@
+#ifndef __NV50_KMS_OVLY_H__
+#define __NV50_KMS_OVLY_H__
+#include "wndw.h"
+
+int ovly507e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+int ovly507e_new_(const struct nv50_wndw_func *, const u32 *format,
+ struct nouveau_drm *, int head, s32 oclass,
+ u32 interlock_data, struct nv50_wndw **);
+int ovly507e_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+void ovly507e_release(struct nv50_wndw *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+int ovly507e_scale_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+
+extern const u32 ovly827e_format[];
+void ovly827e_ntfy_reset(struct nouveau_bo *, u32);
+int ovly827e_ntfy_wait_begun(struct nouveau_bo *, u32, struct nvif_device *);
+
+extern const struct nv50_wndw_func ovly907e;
+
+int ovly827e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+int ovly907e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+int ovly917e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+
+int nv50_ovly_new(struct nouveau_drm *, int head, struct nv50_wndw **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
new file mode 100644
index 0000000000..797c1e4e0e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ovly.h"
+#include "atom.h"
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include <nvif/if0014.h>
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl507e.h>
+
+int
+ovly507e_scale_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NV507E, SET_POINT_IN,
+ NVVAL(NV507E, SET_POINT_IN, X, asyw->scale.sx) |
+ NVVAL(NV507E, SET_POINT_IN, Y, asyw->scale.sy),
+
+ SET_SIZE_IN,
+ NVVAL(NV507E, SET_SIZE_IN, WIDTH, asyw->scale.sw) |
+ NVVAL(NV507E, SET_SIZE_IN, HEIGHT, asyw->scale.sh),
+
+ SET_SIZE_OUT,
+ NVVAL(NV507E, SET_SIZE_OUT, WIDTH, asyw->scale.dw));
+ return 0;
+}
+
+static int
+ovly507e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 12)))
+ return ret;
+
+ PUSH_MTHD(push, NV507E, SET_PRESENT_CONTROL,
+ NVDEF(NV507E, SET_PRESENT_CONTROL, BEGIN_MODE, ASAP) |
+ NVVAL(NV507E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+ PUSH_MTHD(push, NV507E, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
+
+ PUSH_MTHD(push, NV507E, SET_COMPOSITION_CONTROL,
+ NVDEF(NV507E, SET_COMPOSITION_CONTROL, MODE, OPAQUE_SUSPEND_BASE));
+
+ PUSH_MTHD(push, NV507E, SURFACE_SET_OFFSET, asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NV507E, SURFACE_SET_SIZE,
+ NVVAL(NV507E, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NV507E, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+ SURFACE_SET_STORAGE,
+ NVVAL(NV507E, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+ NVVAL(NV507E, SURFACE_SET_STORAGE, PITCH, (asyw->image.pitch[0] >> 8)) |
+ NVVAL(NV507E, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NV507E, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+ SURFACE_SET_PARAMS,
+ NVVAL(NV507E, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+ NVVAL(NV507E, SURFACE_SET_PARAMS, COLOR_SPACE, asyw->image.colorspace) |
+ NVVAL(NV507E, SURFACE_SET_PARAMS, KIND, asyw->image.kind) |
+ NVDEF(NV507E, SURFACE_SET_PARAMS, PART_STRIDE, PARTSTRIDE_256));
+ return 0;
+}
+
+void
+ovly507e_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ asyh->ovly.cpp = 0;
+}
+
+int
+ovly507e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ const struct drm_framebuffer *fb = asyw->state.fb;
+ int ret;
+
+ ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+ if (ret)
+ return ret;
+
+ asyh->ovly.cpp = fb->format->cpp[0];
+ return 0;
+}
+
+#include "nouveau_bo.h"
+
+static const struct nv50_wndw_func
+ovly507e = {
+ .acquire = ovly507e_acquire,
+ .release = ovly507e_release,
+ .ntfy_set = base507c_ntfy_set,
+ .ntfy_clr = base507c_ntfy_clr,
+ .ntfy_reset = base507c_ntfy_reset,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .image_set = ovly507e_image_set,
+ .image_clr = base507c_image_clr,
+ .scale_set = ovly507e_scale_set,
+ .update = base507c_update,
+};
+
+static const u32
+ovly507e_format[] = {
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB1555,
+ 0
+};
+
+int
+ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
+ struct nouveau_drm *drm, int head, s32 oclass, u32 interlock_data,
+ struct nv50_wndw **pwndw)
+{
+ struct nvif_disp_chan_v0 args = {
+ .id = head,
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_wndw *wndw;
+ int ret;
+
+ ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_OVERLAY,
+ "ovly", head, format, BIT(head),
+ NV50_DISP_INTERLOCK_OVLY, interlock_data,
+ &wndw);
+ if (*pwndw = wndw, ret)
+ return ret;
+
+ ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ &oclass, 0, &args, sizeof(args),
+ disp->sync->offset, &wndw->wndw);
+ if (ret) {
+ NV_ERROR(drm, "ovly%04x allocation failed: %d\n", oclass, ret);
+ return ret;
+ }
+
+ wndw->ntfy = NV50_DISP_OVLY_NTFY(wndw->id);
+ wndw->sema = NV50_DISP_OVLY_SEM0(wndw->id);
+ wndw->data = 0x00000000;
+ return 0;
+}
+
+int
+ovly507e_new(struct nouveau_drm *drm, int head, s32 oclass,
+ struct nv50_wndw **pwndw)
+{
+ return ovly507e_new_(&ovly507e, ovly507e_format, drm, head, oclass,
+ 0x00000004 << (head * 8), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
new file mode 100644
index 0000000000..02dc02d926
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ovly.h"
+#include "atom.h"
+
+#include <nouveau_bo.h>
+
+#include <nvif/push507c.h>
+#include <nvif/timer.h>
+
+#include <nvhw/class/cl827e.h>
+
+static int
+ovly827e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 12)))
+ return ret;
+
+ PUSH_MTHD(push, NV827E, SET_PRESENT_CONTROL,
+ NVDEF(NV827E, SET_PRESENT_CONTROL, BEGIN_MODE, ASAP) |
+ NVVAL(NV827E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+ PUSH_MTHD(push, NV827E, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
+
+ PUSH_MTHD(push, NV827E, SET_COMPOSITION_CONTROL,
+ NVDEF(NV827E, SET_COMPOSITION_CONTROL, MODE, OPAQUE_SUSPEND_BASE));
+
+ PUSH_MTHD(push, NV827E, SURFACE_SET_OFFSET, asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NV827E, SURFACE_SET_SIZE,
+ NVVAL(NV827E, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NV827E, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+ SURFACE_SET_STORAGE,
+ NVVAL(NV827E, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+ NVVAL(NV827E, SURFACE_SET_STORAGE, PITCH, (asyw->image.pitch[0] >> 8)) |
+ NVVAL(NV827E, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NV827E, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+ SURFACE_SET_PARAMS,
+ NVVAL(NV827E, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+ NVVAL(NV827E, SURFACE_SET_PARAMS, COLOR_SPACE, asyw->image.colorspace));
+ return 0;
+}
+
+int
+ovly827e_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
+ struct nvif_device *device)
+{
+ s64 time = nvif_msec(device, 2000ULL,
+ if (NVBO_TD32(bo, offset, NV_DISP_NOTIFICATION_1, _3, STATUS, ==, BEGUN))
+ break;
+ usleep_range(1, 2);
+ );
+ return time < 0 ? time : 0;
+}
+
+void
+ovly827e_ntfy_reset(struct nouveau_bo *bo, u32 offset)
+{
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, TIME_STAMP_0, 0);
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, TIME_STAMP_1, 0);
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, _2, 0);
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, _3,
+ NVDEF(NV_DISP_NOTIFICATION_1, _3, STATUS, NOT_BEGUN));
+}
+
+static const struct nv50_wndw_func
+ovly827e = {
+ .acquire = ovly507e_acquire,
+ .release = ovly507e_release,
+ .ntfy_set = base507c_ntfy_set,
+ .ntfy_clr = base507c_ntfy_clr,
+ .ntfy_reset = ovly827e_ntfy_reset,
+ .ntfy_wait_begun = ovly827e_ntfy_wait_begun,
+ .image_set = ovly827e_image_set,
+ .image_clr = base507c_image_clr,
+ .scale_set = ovly507e_scale_set,
+ .update = base507c_update,
+};
+
+const u32
+ovly827e_format[] = {
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR2101010,
+ 0
+};
+
+int
+ovly827e_new(struct nouveau_drm *drm, int head, s32 oclass,
+ struct nv50_wndw **pwndw)
+{
+ return ovly507e_new_(&ovly827e, ovly827e_format, drm, head, oclass,
+ 0x00000004 << (head * 8), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
new file mode 100644
index 0000000000..645130d18a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ovly.h"
+#include "atom.h"
+
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907e.h>
+
+static int
+ovly907e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 12)))
+ return ret;
+
+ PUSH_MTHD(push, NV907E, SET_PRESENT_CONTROL,
+ NVDEF(NV907E, SET_PRESENT_CONTROL, BEGIN_MODE, ASAP) |
+ NVVAL(NV907E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+ PUSH_MTHD(push, NV907E, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
+
+ PUSH_MTHD(push, NV907E, SET_COMPOSITION_CONTROL,
+ NVDEF(NV907E, SET_COMPOSITION_CONTROL, MODE, OPAQUE));
+
+ PUSH_MTHD(push, NV907E, SURFACE_SET_OFFSET, asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NV907E, SURFACE_SET_SIZE,
+ NVVAL(NV907E, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NV907E, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+ SURFACE_SET_STORAGE,
+ NVVAL(NV907E, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+ NVVAL(NV907E, SURFACE_SET_STORAGE, PITCH, (asyw->image.pitch[0] >> 8)) |
+ NVVAL(NV907E, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NV907E, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+ SURFACE_SET_PARAMS,
+ NVVAL(NV907E, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+ NVVAL(NV907E, SURFACE_SET_PARAMS, COLOR_SPACE, asyw->image.colorspace));
+ return 0;
+}
+
+const struct nv50_wndw_func
+ovly907e = {
+ .acquire = ovly507e_acquire,
+ .release = ovly507e_release,
+ .ntfy_set = base507c_ntfy_set,
+ .ntfy_clr = base507c_ntfy_clr,
+ .ntfy_reset = ovly827e_ntfy_reset,
+ .ntfy_wait_begun = ovly827e_ntfy_wait_begun,
+ .image_set = ovly907e_image_set,
+ .image_clr = base507c_image_clr,
+ .scale_set = ovly507e_scale_set,
+ .update = base507c_update,
+};
+
+static const u32
+ovly907e_format[] = {
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+ 0
+};
+
+int
+ovly907e_new(struct nouveau_drm *drm, int head, s32 oclass,
+ struct nv50_wndw **pwndw)
+{
+ return ovly507e_new_(&ovly907e, ovly907e_format, drm, head, oclass,
+ 0x00000004 << (head * 4), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c
new file mode 100644
index 0000000000..e24d6fd234
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ovly.h"
+
+static const u32
+ovly917e_format[] = {
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR16161616F,
+ 0
+};
+
+int
+ovly917e_new(struct nouveau_drm *drm, int head, s32 oclass,
+ struct nv50_wndw **pwndw)
+{
+ return ovly507e_new_(&ovly907e, ovly917e_format, drm, head, oclass,
+ 0x00000004 << (head * 4), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
new file mode 100644
index 0000000000..17d230256b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl507d.h>
+#include <nvhw/class/cl837d.h>
+
+static int
+pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+ struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if (asyh) {
+ ctrl |= NVVAL(NV507D, PIOR_SET_CONTROL, HSYNC_POLARITY, asyh->or.nhsync);
+ ctrl |= NVVAL(NV507D, PIOR_SET_CONTROL, VSYNC_POLARITY, asyh->or.nvsync);
+ ctrl |= NVVAL(NV837D, PIOR_SET_CONTROL, PIXEL_DEPTH, asyh->or.depth);
+ }
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, PIOR_SET_CONTROL(or), ctrl);
+ return 0;
+}
+
+static void
+pior507d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp,
+ int or)
+{
+ outp->caps.dp_interlace = true;
+}
+
+const struct nv50_outp_func
+pior507d = {
+ .ctrl = pior507d_ctrl,
+ .get_caps = pior507d_get_caps,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
new file mode 100644
index 0000000000..ca73d77108
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl507d.h>
+#include <nvhw/class/cl837d.h>
+
+static int
+sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+ struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if (asyh) {
+ ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, HSYNC_POLARITY, asyh->or.nhsync);
+ ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, VSYNC_POLARITY, asyh->or.nvsync);
+ ctrl |= NVVAL(NV837D, SOR_SET_CONTROL, PIXEL_DEPTH, asyh->or.depth);
+ }
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, SOR_SET_CONTROL(or), ctrl);
+ return 0;
+}
+
+static void
+sor507d_get_caps(struct nv50_disp *core, struct nouveau_encoder *outp, int or)
+{
+ outp->caps.dp_interlace = true;
+}
+
+const struct nv50_outp_func
+sor507d = {
+ .ctrl = sor507d_ctrl,
+ .get_caps = sor507d_get_caps,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
new file mode 100644
index 0000000000..c86cd8fa61
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+
+#include <nvif/class.h>
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907d.h>
+
+#include <nouveau_bo.h>
+
+static int
+sor907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+ struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, SOR_SET_CONTROL(or), ctrl);
+ return 0;
+}
+
+static void
+sor907d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
+{
+ struct nouveau_bo *bo = disp->sync;
+ const int off = or * 2;
+ outp->caps.dp_interlace =
+ NVBO_RV32(bo, off, NV907D_CORE_NOTIFIER_3, CAPABILITIES_CAP_SOR0_20, DP_INTERLACE);
+}
+
+const struct nv50_outp_func
+sor907d = {
+ .ctrl = sor907d_ctrl,
+ .get_caps = sor907d_get_caps,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
new file mode 100644
index 0000000000..9eaef34816
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc37d.h>
+
+static int
+sorc37d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+ struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, SOR_SET_CONTROL(or), ctrl);
+ return 0;
+}
+
+static void
+sorc37d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
+{
+ u32 tmp = nvif_rd32(&disp->caps, 0x000144 + (or * 8));
+
+ outp->caps.dp_interlace = !!(tmp & 0x04000000);
+}
+
+const struct nv50_outp_func
+sorc37d = {
+ .ctrl = sorc37d_ctrl,
+ .get_caps = sorc37d_get_caps,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
new file mode 100644
index 0000000000..566fbddfc8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wimm.h"
+
+#include <nvif/class.h>
+
+int
+nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
+{
+ struct {
+ s32 oclass;
+ int version;
+ int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
+ } wimms[] = {
+ { GA102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
+ { TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
+ { GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
+ {}
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ int cid;
+
+ cid = nvif_mclass(&disp->disp->object, wimms);
+ if (cid < 0) {
+ NV_ERROR(drm, "No supported window immediate class\n");
+ return cid;
+ }
+
+ return wimms[cid].init(drm, wimms[cid].oclass, wndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.h b/drivers/gpu/drm/nouveau/dispnv50/wimm.h
new file mode 100644
index 0000000000..363052309b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.h
@@ -0,0 +1,8 @@
+#ifndef __NV50_KMS_WIMM_H__
+#define __NV50_KMS_WIMM_H__
+#include "wndw.h"
+
+int nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *);
+
+int wimmc37b_init(struct nouveau_drm *, s32, struct nv50_wndw *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
new file mode 100644
index 0000000000..ee76b091d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wimm.h"
+#include "atom.h"
+#include "wndw.h"
+
+#include <nvif/if0014.h>
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc37b.h>
+
+static int
+wimmc37b_update(struct nv50_wndw *wndw, u32 *interlock)
+{
+ struct nvif_push *push = wndw->wimm.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37B, UPDATE, 0x00000001 |
+ NVVAL(NVC37B, UPDATE, INTERLOCK_WITH_WINDOW,
+ !!(interlock[NV50_DISP_INTERLOCK_WNDW] & wndw->interlock.data)));
+ return PUSH_KICK(push);
+}
+
+static int
+wimmc37b_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wimm.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37B, SET_POINT_OUT(0),
+ NVVAL(NVC37B, SET_POINT_OUT, X, asyw->point.x) |
+ NVVAL(NVC37B, SET_POINT_OUT, Y, asyw->point.y));
+ return 0;
+}
+
+static const struct nv50_wimm_func
+wimmc37b = {
+ .point = wimmc37b_point,
+ .update = wimmc37b_update,
+};
+
+static int
+wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
+ s32 oclass, struct nv50_wndw *wndw)
+{
+ struct nvif_disp_chan_v0 args = {
+ .id = wndw->id,
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ int ret;
+
+ ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ &oclass, 0, &args, sizeof(args), -1,
+ &wndw->wimm);
+ if (ret) {
+ NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
+ return ret;
+ }
+
+ wndw->interlock.wimm = wndw->interlock.data;
+ wndw->immd = func;
+ return 0;
+}
+
+int
+wimmc37b_init(struct nouveau_drm *drm, s32 oclass, struct nv50_wndw *wndw)
+{
+ return wimmc37b_init_(&wimmc37b, drm, oclass, wndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
new file mode 100644
index 0000000000..7a2cceaee6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -0,0 +1,798 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "wimm.h"
+#include "handles.h"
+
+#include <nvif/class.h>
+#include <nvif/cl0002.h>
+
+#include <nvhw/class/cl507c.h>
+#include <nvhw/class/cl507e.h>
+#include <nvhw/class/clc37e.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include "nouveau_bo.h"
+#include "nouveau_gem.h"
+
+static void
+nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
+{
+ nvif_object_dtor(&ctxdma->object);
+ list_del(&ctxdma->head);
+ kfree(ctxdma);
+}
+
+static struct nv50_wndw_ctxdma *
+nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
+{
+ struct nouveau_drm *drm = nouveau_drm(fb->dev);
+ struct nv50_wndw_ctxdma *ctxdma;
+ u32 handle;
+ u32 unused;
+ u8 kind;
+ struct {
+ struct nv_dma_v0 base;
+ union {
+ struct nv50_dma_v0 nv50;
+ struct gf100_dma_v0 gf100;
+ struct gf119_dma_v0 gf119;
+ };
+ } args = {};
+ u32 argc = sizeof(args.base);
+ int ret;
+
+ nouveau_framebuffer_get_layout(fb, &unused, &kind);
+ handle = NV50_DISP_HANDLE_WNDW_CTX(kind);
+
+ list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
+ if (ctxdma->object.handle == handle)
+ return ctxdma;
+ }
+
+ if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
+ return ERR_PTR(-ENOMEM);
+ list_add(&ctxdma->head, &wndw->ctxdma.list);
+
+ args.base.target = NV_DMA_V0_TARGET_VRAM;
+ args.base.access = NV_DMA_V0_ACCESS_RDWR;
+ args.base.start = 0;
+ args.base.limit = drm->client.device.info.ram_user - 1;
+
+ if (drm->client.device.info.chipset < 0x80) {
+ args.nv50.part = NV50_DMA_V0_PART_256;
+ argc += sizeof(args.nv50);
+ } else
+ if (drm->client.device.info.chipset < 0xc0) {
+ args.nv50.part = NV50_DMA_V0_PART_256;
+ args.nv50.kind = kind;
+ argc += sizeof(args.nv50);
+ } else
+ if (drm->client.device.info.chipset < 0xd0) {
+ args.gf100.kind = kind;
+ argc += sizeof(args.gf100);
+ } else {
+ args.gf119.page = GF119_DMA_V0_PAGE_LP;
+ args.gf119.kind = kind;
+ argc += sizeof(args.gf119);
+ }
+
+ ret = nvif_object_ctor(wndw->ctxdma.parent, "kmsFbCtxDma", handle,
+ NV_DMA_IN_MEMORY, &args, argc, &ctxdma->object);
+ if (ret) {
+ nv50_wndw_ctxdma_del(ctxdma);
+ return ERR_PTR(ret);
+ }
+
+ return ctxdma;
+}
+
+int
+nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+ if (asyw->set.ntfy) {
+ return wndw->func->ntfy_wait_begun(disp->sync,
+ asyw->ntfy.offset,
+ wndw->wndw.base.device);
+ }
+ return 0;
+}
+
+void
+nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 *interlock, bool flush,
+ struct nv50_wndw_atom *asyw)
+{
+ union nv50_wndw_atom_mask clr = {
+ .mask = asyw->clr.mask & ~(flush ? 0 : asyw->set.mask),
+ };
+ if (clr.sema ) wndw->func-> sema_clr(wndw);
+ if (clr.ntfy ) wndw->func-> ntfy_clr(wndw);
+ if (clr.xlut ) wndw->func-> xlut_clr(wndw);
+ if (clr.csc ) wndw->func-> csc_clr(wndw);
+ if (clr.image) wndw->func->image_clr(wndw);
+
+ interlock[wndw->interlock.type] |= wndw->interlock.data;
+}
+
+void
+nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
+ struct nv50_wndw_atom *asyw)
+{
+ if (interlock[NV50_DISP_INTERLOCK_CORE]) {
+ asyw->image.mode = NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING;
+ asyw->image.interval = 1;
+ }
+
+ if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
+ if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
+ if (asyw->set.image) wndw->func->image_set(wndw, asyw);
+
+ if (asyw->set.xlut ) {
+ if (asyw->ilut) {
+ asyw->xlut.i.offset =
+ nv50_lut_load(&wndw->ilut, asyw->xlut.i.buffer,
+ asyw->ilut, asyw->xlut.i.load);
+ }
+ wndw->func->xlut_set(wndw, asyw);
+ }
+
+ if (asyw->set.csc ) wndw->func->csc_set (wndw, asyw);
+ if (asyw->set.scale) wndw->func->scale_set(wndw, asyw);
+ if (asyw->set.blend) wndw->func->blend_set(wndw, asyw);
+ if (asyw->set.point) {
+ if (asyw->set.point = false, asyw->set.mask)
+ interlock[wndw->interlock.type] |= wndw->interlock.data;
+ interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm;
+
+ wndw->immd->point(wndw, asyw);
+ wndw->immd->update(wndw, interlock);
+ } else {
+ interlock[wndw->interlock.type] |= wndw->interlock.data;
+ }
+}
+
+void
+nv50_wndw_ntfy_enable(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+
+ asyw->ntfy.handle = wndw->wndw.sync.handle;
+ asyw->ntfy.offset = wndw->ntfy;
+ asyw->ntfy.awaken = false;
+ asyw->set.ntfy = true;
+
+ wndw->func->ntfy_reset(disp->sync, wndw->ntfy);
+ wndw->ntfy ^= 0x10;
+}
+
+static void
+nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
+ struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
+ wndw->func->release(wndw, asyw, asyh);
+ asyw->ntfy.handle = 0;
+ asyw->sema.handle = 0;
+ asyw->xlut.handle = 0;
+ memset(asyw->image.handle, 0x00, sizeof(asyw->image.handle));
+}
+
+static int
+nv50_wndw_atomic_check_acquire_yuv(struct nv50_wndw_atom *asyw)
+{
+ switch (asyw->state.fb->format->format) {
+ case DRM_FORMAT_YUYV:
+ asyw->image.format = NV507E_SURFACE_SET_PARAMS_FORMAT_VE8YO8UE8YE8;
+ break;
+ case DRM_FORMAT_UYVY:
+ asyw->image.format = NV507E_SURFACE_SET_PARAMS_FORMAT_YO8VE8YE8UE8;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ asyw->image.colorspace = NV507E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_601;
+ return 0;
+}
+
+static int
+nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
+{
+ switch (asyw->state.fb->format->format) {
+ case DRM_FORMAT_C8:
+ asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_I8;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8;
+ break;
+ case DRM_FORMAT_RGB565:
+ asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_R5G6B5;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ asyw->image.format = NVC37E_SET_PARAMS_FORMAT_A2R10G10B10;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ asyw->image.colorspace = NV507E_SURFACE_SET_PARAMS_COLOR_SPACE_RGB;
+ return 0;
+}
+
+static int
+nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
+ struct nv50_wndw_atom *armw,
+ struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ struct drm_framebuffer *fb = asyw->state.fb;
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ uint8_t kind;
+ uint32_t tile_mode;
+ int ret;
+
+ NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
+
+ if (fb != armw->state.fb || !armw->visible || modeset) {
+ nouveau_framebuffer_get_layout(fb, &tile_mode, &kind);
+
+ asyw->image.w = fb->width;
+ asyw->image.h = fb->height;
+ asyw->image.kind = kind;
+
+ ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
+ if (ret) {
+ ret = nv50_wndw_atomic_check_acquire_yuv(asyw);
+ if (ret)
+ return ret;
+ }
+
+ if (asyw->image.kind) {
+ asyw->image.layout = NV507C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR;
+ if (drm->client.device.info.chipset >= 0xc0)
+ asyw->image.blockh = tile_mode >> 4;
+ else
+ asyw->image.blockh = tile_mode;
+ asyw->image.blocks[0] = fb->pitches[0] / 64;
+ asyw->image.pitch[0] = 0;
+ } else {
+ asyw->image.layout = NV507C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH;
+ asyw->image.blockh = NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB;
+ asyw->image.blocks[0] = 0;
+ asyw->image.pitch[0] = fb->pitches[0];
+ }
+
+ if (!asyh->state.async_flip)
+ asyw->image.interval = 1;
+ else
+ asyw->image.interval = 0;
+
+ if (asyw->image.interval)
+ asyw->image.mode = NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING;
+ else
+ asyw->image.mode = NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE;
+
+ asyw->set.image = wndw->func->image_set != NULL;
+ }
+
+ if (wndw->func->scale_set) {
+ asyw->scale.sx = asyw->state.src_x >> 16;
+ asyw->scale.sy = asyw->state.src_y >> 16;
+ asyw->scale.sw = asyw->state.src_w >> 16;
+ asyw->scale.sh = asyw->state.src_h >> 16;
+ asyw->scale.dw = asyw->state.crtc_w;
+ asyw->scale.dh = asyw->state.crtc_h;
+ if (memcmp(&armw->scale, &asyw->scale, sizeof(asyw->scale)))
+ asyw->set.scale = true;
+ }
+
+ if (wndw->func->blend_set) {
+ asyw->blend.depth = 255 - asyw->state.normalized_zpos;
+ asyw->blend.k1 = asyw->state.alpha >> 8;
+ switch (asyw->state.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PREMULTI:
+ asyw->blend.src_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1;
+ asyw->blend.dst_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC;
+ break;
+ case DRM_MODE_BLEND_COVERAGE:
+ asyw->blend.src_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC;
+ asyw->blend.dst_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC;
+ break;
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ default:
+ asyw->blend.src_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1;
+ asyw->blend.dst_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1;
+ break;
+ }
+ if (memcmp(&armw->blend, &asyw->blend, sizeof(asyw->blend)))
+ asyw->set.blend = true;
+ }
+
+ if (wndw->immd) {
+ asyw->point.x = asyw->state.crtc_x;
+ asyw->point.y = asyw->state.crtc_y;
+ if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
+ asyw->set.point = true;
+ }
+
+ return wndw->func->acquire(wndw, asyw, asyh);
+}
+
+static int
+nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
+ struct nv50_wndw_atom *armw,
+ struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ struct drm_property_blob *ilut = asyh->state.degamma_lut;
+
+ /* I8 format without an input LUT makes no sense, and the
+ * HW error-checks for this.
+ *
+ * In order to handle legacy gamma, when there's no input
+ * LUT we need to steal the output LUT and use it instead.
+ */
+ if (!ilut && asyw->state.fb->format->format == DRM_FORMAT_C8) {
+ /* This should be an error, but there's legacy clients
+ * that do a modeset before providing a gamma table.
+ *
+ * We keep the window disabled to avoid angering HW.
+ */
+ if (!(ilut = asyh->state.gamma_lut)) {
+ asyw->visible = false;
+ return 0;
+ }
+
+ if (wndw->func->ilut)
+ asyh->wndw.olut |= BIT(wndw->id);
+ } else {
+ asyh->wndw.olut &= ~BIT(wndw->id);
+ }
+
+ if (!ilut && wndw->func->ilut_identity &&
+ asyw->state.fb->format->format != DRM_FORMAT_XBGR16161616F &&
+ asyw->state.fb->format->format != DRM_FORMAT_ABGR16161616F) {
+ static struct drm_property_blob dummy = {};
+ ilut = &dummy;
+ }
+
+ /* Recalculate LUT state. */
+ memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
+ if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
+ wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut));
+ asyw->xlut.handle = wndw->wndw.vram.handle;
+ asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
+ asyw->set.xlut = true;
+ } else {
+ asyw->clr.xlut = armw->xlut.handle != 0;
+ }
+
+ /* Handle setting base SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT. */
+ if (wndw->func->olut_core &&
+ (!armw->visible || (armw->xlut.handle && !asyw->xlut.handle)))
+ asyw->set.xlut = true;
+
+ if (wndw->func->csc && asyh->state.ctm) {
+ const struct drm_color_ctm *ctm = asyh->state.ctm->data;
+ wndw->func->csc(wndw, asyw, ctm);
+ asyw->csc.valid = true;
+ asyw->set.csc = true;
+ } else {
+ asyw->csc.valid = false;
+ asyw->clr.csc = armw->csc.valid;
+ }
+
+ /* Can't do an immediate flip while changing the LUT. */
+ asyh->state.async_flip = false;
+ return 0;
+}
+
+static int
+nv50_wndw_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
+ struct nv50_head_atom *harm = NULL, *asyh = NULL;
+ bool modeset = false;
+ int ret;
+
+ NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
+
+ /* Fetch the assembly state for the head the window will belong to,
+ * and determine whether the window will be visible.
+ */
+ if (asyw->state.crtc) {
+ asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+ if (IS_ERR(asyh))
+ return PTR_ERR(asyh);
+ modeset = drm_atomic_crtc_needs_modeset(&asyh->state);
+ asyw->visible = asyh->state.active;
+ } else {
+ asyw->visible = false;
+ }
+
+ /* Fetch assembly state for the head the window used to belong to. */
+ if (armw->state.crtc) {
+ harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
+ if (IS_ERR(harm))
+ return PTR_ERR(harm);
+ }
+
+ /* LUT configuration can potentially cause the window to be disabled. */
+ if (asyw->visible && wndw->func->xlut_set &&
+ (!armw->visible ||
+ asyh->state.color_mgmt_changed ||
+ asyw->state.fb->format->format !=
+ armw->state.fb->format->format)) {
+ ret = nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
+ if (ret)
+ return ret;
+ }
+
+ /* Calculate new window state. */
+ if (asyw->visible) {
+ ret = nv50_wndw_atomic_check_acquire(wndw, modeset,
+ armw, asyw, asyh);
+ if (ret)
+ return ret;
+
+ asyh->wndw.mask |= BIT(wndw->id);
+ } else
+ if (armw->visible) {
+ nv50_wndw_atomic_check_release(wndw, asyw, harm);
+ harm->wndw.mask &= ~BIT(wndw->id);
+ } else {
+ return 0;
+ }
+
+ /* Aside from the obvious case where the window is actively being
+ * disabled, we might also need to temporarily disable the window
+ * when performing certain modeset operations.
+ */
+ if (!asyw->visible || modeset) {
+ asyw->clr.ntfy = armw->ntfy.handle != 0;
+ asyw->clr.sema = armw->sema.handle != 0;
+ asyw->clr.xlut = armw->xlut.handle != 0;
+ if (asyw->clr.xlut && asyw->visible)
+ asyw->set.xlut = asyw->xlut.handle != 0;
+ asyw->clr.csc = armw->csc.valid;
+ if (wndw->func->image_clr)
+ asyw->clr.image = armw->image.handle[0] != 0;
+ }
+
+ return 0;
+}
+
+static void
+nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
+{
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nouveau_bo *nvbo;
+
+ NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
+ if (!old_state->fb)
+ return;
+
+ nvbo = nouveau_gem_object(old_state->fb->obj[0]);
+ nouveau_bo_unpin(nvbo);
+}
+
+static int
+nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ struct nouveau_bo *nvbo;
+ struct nv50_head_atom *asyh;
+ struct nv50_wndw_ctxdma *ctxdma;
+ int ret;
+
+ NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
+ if (!asyw->state.fb)
+ return 0;
+
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true);
+ if (ret)
+ return ret;
+
+ if (wndw->ctxdma.parent) {
+ ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
+ if (IS_ERR(ctxdma)) {
+ nouveau_bo_unpin(nvbo);
+ return PTR_ERR(ctxdma);
+ }
+
+ if (asyw->visible)
+ asyw->image.handle[0] = ctxdma->object.handle;
+ }
+
+ ret = drm_gem_plane_helper_prepare_fb(plane, state);
+ if (ret)
+ return ret;
+
+ asyw->image.offset[0] = nvbo->offset;
+
+ if (wndw->func->prepare) {
+ asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+ if (IS_ERR(asyh))
+ return PTR_ERR(asyh);
+
+ wndw->func->prepare(wndw, asyh, asyw);
+ }
+
+ return 0;
+}
+
+static const struct drm_plane_helper_funcs
+nv50_wndw_helper = {
+ .prepare_fb = nv50_wndw_prepare_fb,
+ .cleanup_fb = nv50_wndw_cleanup_fb,
+ .atomic_check = nv50_wndw_atomic_check,
+};
+
+static void
+nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ __drm_atomic_helper_plane_destroy_state(&asyw->state);
+ kfree(asyw);
+}
+
+static struct drm_plane_state *
+nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw_atom *asyw;
+ if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
+ return NULL;
+ __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
+ asyw->sema = armw->sema;
+ asyw->ntfy = armw->ntfy;
+ asyw->ilut = NULL;
+ asyw->xlut = armw->xlut;
+ asyw->csc = armw->csc;
+ asyw->image = armw->image;
+ asyw->point = armw->point;
+ asyw->clr.mask = 0;
+ asyw->set.mask = 0;
+ return &asyw->state;
+}
+
+static int
+nv50_wndw_zpos_default(struct drm_plane *plane)
+{
+ return (plane->type == DRM_PLANE_TYPE_PRIMARY) ? 0 :
+ (plane->type == DRM_PLANE_TYPE_OVERLAY) ? 1 : 255;
+}
+
+static void
+nv50_wndw_reset(struct drm_plane *plane)
+{
+ struct nv50_wndw_atom *asyw;
+
+ if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
+ return;
+
+ if (plane->state)
+ plane->funcs->atomic_destroy_state(plane, plane->state);
+
+ __drm_atomic_helper_plane_reset(plane, &asyw->state);
+}
+
+static void
+nv50_wndw_destroy(struct drm_plane *plane)
+{
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
+
+ list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
+ nv50_wndw_ctxdma_del(ctxdma);
+ }
+
+ nv50_dmac_destroy(&wndw->wimm);
+ nv50_dmac_destroy(&wndw->wndw);
+
+ nv50_lut_fini(&wndw->ilut);
+
+ drm_plane_cleanup(&wndw->plane);
+ kfree(wndw);
+}
+
+/* This function assumes the format has already been validated against the plane
+ * and the modifier was validated against the device-wides modifier list at FB
+ * creation time.
+ */
+static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
+ u32 format, u64 modifier)
+{
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ uint8_t i;
+
+ if (drm->client.device.info.chipset < 0xc0) {
+ const struct drm_format_info *info = drm_format_info(format);
+ const uint8_t kind = (modifier >> 12) & 0xff;
+
+ if (!format) return false;
+
+ for (i = 0; i < info->num_planes; i++)
+ if ((info->cpp[i] != 4) && kind != 0x70) return false;
+ }
+
+ return true;
+}
+
+const struct drm_plane_funcs
+nv50_wndw = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = nv50_wndw_destroy,
+ .reset = nv50_wndw_reset,
+ .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
+ .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
+ .format_mod_supported = nv50_plane_format_mod_supported,
+};
+
+static const u64 nv50_cursor_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+int
+nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
+ enum drm_plane_type type, const char *name, int index,
+ const u32 *format, u32 heads,
+ enum nv50_disp_interlock_type interlock_type, u32 interlock_data,
+ struct nv50_wndw **pwndw)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvif_mmu *mmu = &drm->client.mmu;
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_wndw *wndw;
+ const u64 *format_modifiers;
+ int nformat;
+ int ret;
+
+ if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL)))
+ return -ENOMEM;
+ wndw->func = func;
+ wndw->id = index;
+ wndw->interlock.type = interlock_type;
+ wndw->interlock.data = interlock_data;
+
+ wndw->ctxdma.parent = &wndw->wndw.base.user;
+ INIT_LIST_HEAD(&wndw->ctxdma.list);
+
+ for (nformat = 0; format[nformat]; nformat++);
+
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ format_modifiers = nv50_cursor_format_modifiers;
+ else
+ format_modifiers = nouveau_display(dev)->format_modifiers;
+
+ ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, format, nformat,
+ format_modifiers, type, "%s-%d", name, index);
+ if (ret) {
+ kfree(*pwndw);
+ *pwndw = NULL;
+ return ret;
+ }
+
+ drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
+
+ if (wndw->func->ilut) {
+ ret = nv50_lut_init(disp, mmu, &wndw->ilut);
+ if (ret)
+ return ret;
+ }
+
+ if (wndw->func->blend_set) {
+ ret = drm_plane_create_zpos_property(&wndw->plane,
+ nv50_wndw_zpos_default(&wndw->plane), 0, 254);
+ if (ret)
+ return ret;
+
+ ret = drm_plane_create_alpha_property(&wndw->plane);
+ if (ret)
+ return ret;
+
+ ret = drm_plane_create_blend_mode_property(&wndw->plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+ if (ret)
+ return ret;
+ } else {
+ ret = drm_plane_create_zpos_immutable_property(&wndw->plane,
+ nv50_wndw_zpos_default(&wndw->plane));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+ struct nv50_wndw **pwndw)
+{
+ struct {
+ s32 oclass;
+ int version;
+ int (*new)(struct nouveau_drm *, enum drm_plane_type,
+ int, s32, struct nv50_wndw **);
+ } wndws[] = {
+ { GA102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc67e_new },
+ { TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
+ { GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
+ {}
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ int cid, ret;
+
+ cid = nvif_mclass(&disp->disp->object, wndws);
+ if (cid < 0) {
+ NV_ERROR(drm, "No supported window class\n");
+ return cid;
+ }
+
+ ret = wndws[cid].new(drm, type, index, wndws[cid].oclass, pwndw);
+ if (ret)
+ return ret;
+
+ return nv50_wimm_init(drm, *pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
new file mode 100644
index 0000000000..76a6ae5d56
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -0,0 +1,139 @@
+#ifndef __NV50_KMS_WNDW_H__
+#define __NV50_KMS_WNDW_H__
+#define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
+#include "disp.h"
+#include "atom.h"
+#include "lut.h"
+
+struct nv50_wndw_ctxdma {
+ struct list_head head;
+ struct nvif_object object;
+};
+
+struct nv50_wndw {
+ const struct nv50_wndw_func *func;
+ const struct nv50_wimm_func *immd;
+ int id;
+ struct nv50_disp_interlock interlock;
+
+ struct {
+ struct nvif_object *parent;
+ struct list_head list;
+ } ctxdma;
+
+ struct drm_plane plane;
+
+ struct nv50_lut ilut;
+
+ struct nv50_dmac wndw;
+ struct nv50_dmac wimm;
+
+ u16 ntfy;
+ u16 sema;
+ u32 data;
+};
+
+int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *,
+ enum drm_plane_type, const char *name, int index,
+ const u32 *format, u32 heads,
+ enum nv50_disp_interlock_type, u32 interlock_data,
+ struct nv50_wndw **);
+void nv50_wndw_flush_set(struct nv50_wndw *, u32 *interlock,
+ struct nv50_wndw_atom *);
+void nv50_wndw_flush_clr(struct nv50_wndw *, u32 *interlock, bool flush,
+ struct nv50_wndw_atom *);
+void nv50_wndw_ntfy_enable(struct nv50_wndw *, struct nv50_wndw_atom *);
+int nv50_wndw_wait_armed(struct nv50_wndw *, struct nv50_wndw_atom *);
+
+struct nv50_wndw_func {
+ int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh);
+ void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh);
+ void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
+ struct nv50_wndw_atom *asyw);
+
+ int (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ int (*sema_clr)(struct nv50_wndw *);
+ void (*ntfy_reset)(struct nouveau_bo *, u32 offset);
+ int (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ int (*ntfy_clr)(struct nv50_wndw *);
+ int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
+ struct nvif_device *);
+ void (*ilut)(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyh, int size);
+ void (*csc)(struct nv50_wndw *, struct nv50_wndw_atom *,
+ const struct drm_color_ctm *);
+ int (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ int (*csc_clr)(struct nv50_wndw *);
+ bool ilut_identity;
+ int ilut_size;
+ bool olut_core;
+ int (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ int (*xlut_clr)(struct nv50_wndw *);
+ int (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ int (*image_clr)(struct nv50_wndw *);
+ int (*scale_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ int (*blend_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+
+ int (*update)(struct nv50_wndw *, u32 *interlock);
+};
+
+extern const struct drm_plane_funcs nv50_wndw;
+
+void base507c_ntfy_reset(struct nouveau_bo *, u32);
+int base507c_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int base507c_ntfy_clr(struct nv50_wndw *);
+int base507c_ntfy_wait_begun(struct nouveau_bo *, u32, struct nvif_device *);
+int base507c_image_clr(struct nv50_wndw *);
+int base507c_update(struct nv50_wndw *, u32 *);
+
+void base907c_csc(struct nv50_wndw *, struct nv50_wndw_atom *,
+ const struct drm_color_ctm *);
+
+struct nv50_wimm_func {
+ int (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
+
+ int (*update)(struct nv50_wndw *, u32 *interlock);
+};
+
+extern const struct nv50_wimm_func curs507a;
+bool curs507a_space(struct nv50_wndw *);
+
+static inline __must_check int
+nvif_chan_wait(struct nv50_dmac *dmac, u32 size)
+{
+ struct nv50_wndw *wndw = container_of(dmac, typeof(*wndw), wimm);
+ return curs507a_space(wndw) ? 0 : -ETIMEDOUT;
+}
+
+int wndwc37e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+ struct nv50_wndw **);
+int wndwc37e_new_(const struct nv50_wndw_func *, struct nouveau_drm *,
+ enum drm_plane_type type, int index, s32 oclass, u32 heads,
+ struct nv50_wndw **);
+int wndwc37e_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+void wndwc37e_release(struct nv50_wndw *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+int wndwc37e_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc37e_sema_clr(struct nv50_wndw *);
+int wndwc37e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc37e_ntfy_clr(struct nv50_wndw *);
+int wndwc37e_image_clr(struct nv50_wndw *);
+int wndwc37e_blend_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc37e_update(struct nv50_wndw *, u32 *);
+
+int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+ struct nv50_wndw **);
+void wndwc57e_ilut(struct nv50_wndw *, struct nv50_wndw_atom *, int);
+int wndwc57e_ilut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc57e_ilut_clr(struct nv50_wndw *);
+int wndwc57e_csc_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc57e_csc_clr(struct nv50_wndw *);
+
+int wndwc67e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+ struct nv50_wndw **);
+
+int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
+ struct nv50_wndw **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
new file mode 100644
index 0000000000..b3deea5aca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <drm/drm_atomic_helper.h>
+#include <nouveau_bo.h>
+
+#include <nvif/if0014.h>
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc37e.h>
+
+static int
+wndwc37e_csc_clr(struct nv50_wndw *wndw)
+{
+ return 0;
+}
+
+static int
+wndwc37e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 13)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_CSC_RED2RED, asyw->csc.matrix, 12);
+ return 0;
+}
+
+static int
+wndwc37e_ilut_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_INPUT_LUT, 0x00000000);
+ return 0;
+}
+
+static int
+wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_CONTROL_INPUT_LUT,
+ NVVAL(NVC37E, SET_CONTROL_INPUT_LUT, OUTPUT_MODE, asyw->xlut.i.output_mode) |
+ NVVAL(NVC37E, SET_CONTROL_INPUT_LUT, RANGE, asyw->xlut.i.range) |
+ NVVAL(NVC37E, SET_CONTROL_INPUT_LUT, SIZE, asyw->xlut.i.size),
+
+ SET_OFFSET_INPUT_LUT, asyw->xlut.i.offset >> 8,
+ SET_CONTEXT_DMA_INPUT_LUT, asyw->xlut.handle);
+ return 0;
+}
+
+static void
+wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
+{
+ asyw->xlut.i.size = size == 1024 ? NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_1025 :
+ NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_257;
+ asyw->xlut.i.range = NVC37E_SET_CONTROL_INPUT_LUT_RANGE_UNITY;
+ asyw->xlut.i.output_mode = NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INTERPOLATE;
+ asyw->xlut.i.load = head907d_olut_load;
+}
+
+int
+wndwc37e_blend_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 8)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_COMPOSITION_CONTROL,
+ NVDEF(NVC37E, SET_COMPOSITION_CONTROL, COLOR_KEY_SELECT, DISABLE) |
+ NVVAL(NVC37E, SET_COMPOSITION_CONTROL, DEPTH, asyw->blend.depth),
+
+ SET_COMPOSITION_CONSTANT_ALPHA,
+ NVVAL(NVC37E, SET_COMPOSITION_CONSTANT_ALPHA, K1, asyw->blend.k1) |
+ NVVAL(NVC37E, SET_COMPOSITION_CONSTANT_ALPHA, K2, 0),
+
+ SET_COMPOSITION_FACTOR_SELECT,
+ NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, SRC_COLOR_FACTOR_MATCH_SELECT,
+ asyw->blend.src_color) |
+ NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, SRC_COLOR_FACTOR_NO_MATCH_SELECT,
+ asyw->blend.src_color) |
+ NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, DST_COLOR_FACTOR_MATCH_SELECT,
+ asyw->blend.dst_color) |
+ NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, DST_COLOR_FACTOR_NO_MATCH_SELECT,
+ asyw->blend.dst_color),
+
+ SET_KEY_ALPHA,
+ NVVAL(NVC37E, SET_KEY_ALPHA, MIN, 0x0000) |
+ NVVAL(NVC37E, SET_KEY_ALPHA, MAX, 0xffff),
+
+ SET_KEY_RED_CR,
+ NVVAL(NVC37E, SET_KEY_RED_CR, MIN, 0x0000) |
+ NVVAL(NVC37E, SET_KEY_RED_CR, MAX, 0xffff),
+
+ SET_KEY_GREEN_Y,
+ NVVAL(NVC37E, SET_KEY_GREEN_Y, MIN, 0x0000) |
+ NVVAL(NVC37E, SET_KEY_GREEN_Y, MAX, 0xffff),
+
+ SET_KEY_BLUE_CB,
+ NVVAL(NVC37E, SET_KEY_BLUE_CB, MIN, 0x0000) |
+ NVVAL(NVC37E, SET_KEY_BLUE_CB, MAX, 0xffff));
+ return 0;
+}
+
+int
+wndwc37e_image_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_PRESENT_CONTROL,
+ NVVAL(NVC37E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, 0) |
+ NVDEF(NVC37E, SET_PRESENT_CONTROL, BEGIN_MODE, NON_TEARING));
+
+ PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_ISO(0), 0x00000000);
+ return 0;
+}
+
+static int
+wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 17)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_PRESENT_CONTROL,
+ NVVAL(NVC37E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+ NVVAL(NVC37E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NVC37E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+ PUSH_MTHD(push, NVC37E, SET_SIZE,
+ NVVAL(NVC37E, SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NVC37E, SET_SIZE, HEIGHT, asyw->image.h),
+
+ SET_STORAGE,
+ NVVAL(NVC37E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+ NVVAL(NVC37E, SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+ SET_PARAMS,
+ NVVAL(NVC37E, SET_PARAMS, FORMAT, asyw->image.format) |
+ NVVAL(NVC37E, SET_PARAMS, COLOR_SPACE, asyw->image.colorspace) |
+ NVDEF(NVC37E, SET_PARAMS, INPUT_RANGE, BYPASS) |
+ NVDEF(NVC37E, SET_PARAMS, UNDERREPLICATE, DISABLE) |
+ NVDEF(NVC37E, SET_PARAMS, DE_GAMMA, NONE) |
+ NVVAL(NVC37E, SET_PARAMS, CSC, asyw->csc.valid) |
+ NVDEF(NVC37E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+ NVDEF(NVC37E, SET_PARAMS, SWAP_UV, DISABLE),
+
+ SET_PLANAR_STORAGE(0),
+ NVVAL(NVC37E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NVC37E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+ PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
+ PUSH_MTHD(push, NVC37E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NVC37E, SET_POINT_IN(0),
+ NVVAL(NVC37E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+ NVVAL(NVC37E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+ PUSH_MTHD(push, NVC37E, SET_SIZE_IN,
+ NVVAL(NVC37E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+ NVVAL(NVC37E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+ PUSH_MTHD(push, NVC37E, SET_SIZE_OUT,
+ NVVAL(NVC37E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+ NVVAL(NVC37E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+ return 0;
+}
+
+int
+wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_NOTIFIER, 0x00000000);
+ return 0;
+}
+
+int
+wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 3)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_NOTIFIER, asyw->ntfy.handle,
+
+ SET_NOTIFIER_CONTROL,
+ NVVAL(NVC37E, SET_NOTIFIER_CONTROL, MODE, asyw->ntfy.awaken) |
+ NVVAL(NVC37E, SET_NOTIFIER_CONTROL, OFFSET, asyw->ntfy.offset >> 4));
+ return 0;
+}
+
+int
+wndwc37e_sema_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_SEMAPHORE, 0x00000000);
+ return 0;
+}
+
+int
+wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_SEMAPHORE_CONTROL, asyw->sema.offset,
+ SET_SEMAPHORE_ACQUIRE, asyw->sema.acquire,
+ SET_SEMAPHORE_RELEASE, asyw->sema.release,
+ SET_CONTEXT_DMA_SEMAPHORE, asyw->sema.handle);
+ return 0;
+}
+
+int
+wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_CURS] << 1 |
+ interlock[NV50_DISP_INTERLOCK_CORE],
+ SET_WINDOW_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_WNDW]);
+
+ PUSH_MTHD(push, NVC37E, UPDATE, 0x00000001 |
+ NVVAL(NVC37E, UPDATE, INTERLOCK_WITH_WIN_IMM,
+ !!(interlock[NV50_DISP_INTERLOCK_WIMM] & wndw->interlock.data)));
+
+ return PUSH_KICK(push);
+}
+
+void
+wndwc37e_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+}
+
+int
+wndwc37e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ return drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+}
+
+static const u32
+wndwc37e_format[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ABGR16161616F,
+ 0
+};
+
+static const struct nv50_wndw_func
+wndwc37e = {
+ .acquire = wndwc37e_acquire,
+ .release = wndwc37e_release,
+ .sema_set = wndwc37e_sema_set,
+ .sema_clr = wndwc37e_sema_clr,
+ .ntfy_set = wndwc37e_ntfy_set,
+ .ntfy_clr = wndwc37e_ntfy_clr,
+ .ntfy_reset = corec37d_ntfy_init,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .ilut = wndwc37e_ilut,
+ .ilut_size = 1024,
+ .xlut_set = wndwc37e_ilut_set,
+ .xlut_clr = wndwc37e_ilut_clr,
+ .csc = base907c_csc,
+ .csc_set = wndwc37e_csc_set,
+ .csc_clr = wndwc37e_csc_clr,
+ .image_set = wndwc37e_image_set,
+ .image_clr = wndwc37e_image_clr,
+ .blend_set = wndwc37e_blend_set,
+ .update = wndwc37e_update,
+};
+
+int
+wndwc37e_new_(const struct nv50_wndw_func *func, struct nouveau_drm *drm,
+ enum drm_plane_type type, int index, s32 oclass, u32 heads,
+ struct nv50_wndw **pwndw)
+{
+ struct nvif_disp_chan_v0 args = {
+ .id = index,
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_wndw *wndw;
+ int ret;
+
+ ret = nv50_wndw_new_(func, drm->dev, type, "wndw", index,
+ wndwc37e_format, heads, NV50_DISP_INTERLOCK_WNDW,
+ BIT(index), &wndw);
+ if (*pwndw = wndw, ret)
+ return ret;
+
+ ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ &oclass, 0, &args, sizeof(args),
+ disp->sync->offset, &wndw->wndw);
+ if (ret) {
+ NV_ERROR(drm, "qndw%04x allocation failed: %d\n", oclass, ret);
+ return ret;
+ }
+
+ wndw->ntfy = NV50_DISP_WNDW_NTFY(wndw->id);
+ wndw->sema = NV50_DISP_WNDW_SEM0(wndw->id);
+ wndw->data = 0x00000000;
+ return 0;
+}
+
+int
+wndwc37e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+ s32 oclass, struct nv50_wndw **pwndw)
+{
+ return wndwc37e_new_(&wndwc37e, drm, type, index, oclass,
+ BIT(index >> 1), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
new file mode 100644
index 0000000000..1d214a4b96
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <drm/drm_atomic_helper.h>
+#include <nouveau_bo.h>
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57e.h>
+
+static int
+wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 17)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_PRESENT_CONTROL,
+ NVVAL(NVC57E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+ NVVAL(NVC57E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NVC57E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE,
+ NVVAL(NVC57E, SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NVC57E, SET_SIZE, HEIGHT, asyw->image.h),
+
+ SET_STORAGE,
+ NVVAL(NVC57E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+ NVVAL(NVC57E, SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+ SET_PARAMS,
+ NVVAL(NVC57E, SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NVC57E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+ NVDEF(NVC57E, SET_PARAMS, SWAP_UV, DISABLE) |
+ NVDEF(NVC57E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
+
+ SET_PLANAR_STORAGE(0),
+ NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+ PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
+ PUSH_MTHD(push, NVC57E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NVC57E, SET_POINT_IN(0),
+ NVVAL(NVC57E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+ NVVAL(NVC57E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE_IN,
+ NVVAL(NVC57E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+ NVVAL(NVC57E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE_OUT,
+ NVVAL(NVC57E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+ NVVAL(NVC57E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+ return 0;
+}
+
+int
+wndwc57e_csc_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ const u32 identity[12] = {
+ 0x00010000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00010000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00010000, 0x00000000,
+ };
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 1 + ARRAY_SIZE(identity))))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_FMT_COEFFICIENT_C00, identity, ARRAY_SIZE(identity));
+ return 0;
+}
+
+int
+wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 13)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_FMT_COEFFICIENT_C00, asyw->csc.matrix, 12);
+ return 0;
+}
+
+int
+wndwc57e_ilut_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ILUT, 0x00000000);
+ return 0;
+}
+
+int
+wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_ILUT_CONTROL,
+ NVVAL(NVC57E, SET_ILUT_CONTROL, SIZE, asyw->xlut.i.size) |
+ NVVAL(NVC57E, SET_ILUT_CONTROL, MODE, asyw->xlut.i.mode) |
+ NVVAL(NVC57E, SET_ILUT_CONTROL, INTERPOLATE, asyw->xlut.i.output_mode),
+
+ SET_CONTEXT_DMA_ILUT, asyw->xlut.handle,
+ SET_OFFSET_ILUT, asyw->xlut.i.offset >> 8);
+ return 0;
+}
+
+static u16
+fixedU0_16_FP16(u16 fixed)
+{
+ int sign = 0, exp = 0, man = 0;
+ if (fixed) {
+ while (--exp && !(fixed & 0x8000))
+ fixed <<= 1;
+ man = ((fixed << 1) & 0xffc0) >> 6;
+ exp += 15;
+ }
+ return (sign << 15) | (exp << 10) | man;
+}
+
+static void
+wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ memset_io(mem, 0x00, 0x20); /* VSS header. */
+ mem += 0x20;
+
+ for (; size--; in++, mem += 0x08) {
+ u16 r = fixedU0_16_FP16(drm_color_lut_extract(in-> red, 16));
+ u16 g = fixedU0_16_FP16(drm_color_lut_extract(in->green, 16));
+ u16 b = fixedU0_16_FP16(drm_color_lut_extract(in-> blue, 16));
+ writew(r, mem + 0);
+ writew(g, mem + 2);
+ writew(b, mem + 4);
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
+void
+wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
+{
+ if (!size)
+ size = 1024;
+
+ if (size == 256)
+ asyw->xlut.i.mode = NVC57E_SET_ILUT_CONTROL_MODE_DIRECT8;
+ else
+ asyw->xlut.i.mode = NVC57E_SET_ILUT_CONTROL_MODE_DIRECT10;
+
+ asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */;
+ asyw->xlut.i.output_mode = NVC57E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE;
+ asyw->xlut.i.load = wndwc57e_ilut_load;
+}
+
+/****************************************************************
+ * Log2(block height) ----------------------------+ *
+ * Page Kind ----------------------------------+ | *
+ * Gob Height/Page Kind Generation ------+ | | *
+ * Sector layout -------+ | | | *
+ * Compression ------+ | | | | */
+const u64 wndwc57e_modifiers[] = { /* | | | | | */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 5),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const struct nv50_wndw_func
+wndwc57e = {
+ .acquire = wndwc37e_acquire,
+ .release = wndwc37e_release,
+ .sema_set = wndwc37e_sema_set,
+ .sema_clr = wndwc37e_sema_clr,
+ .ntfy_set = wndwc37e_ntfy_set,
+ .ntfy_clr = wndwc37e_ntfy_clr,
+ .ntfy_reset = corec37d_ntfy_init,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .ilut = wndwc57e_ilut,
+ .ilut_identity = true,
+ .ilut_size = 1024,
+ .xlut_set = wndwc57e_ilut_set,
+ .xlut_clr = wndwc57e_ilut_clr,
+ .csc = base907c_csc,
+ .csc_set = wndwc57e_csc_set,
+ .csc_clr = wndwc57e_csc_clr,
+ .image_set = wndwc57e_image_set,
+ .image_clr = wndwc37e_image_clr,
+ .blend_set = wndwc37e_blend_set,
+ .update = wndwc37e_update,
+};
+
+int
+wndwc57e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+ s32 oclass, struct nv50_wndw **pwndw)
+{
+ return wndwc37e_new_(&wndwc57e, drm, type, index, oclass,
+ BIT(index >> 1), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
new file mode 100644
index 0000000000..7a370fa1df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57e.h>
+
+static int
+wndwc67e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 17)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_PRESENT_CONTROL,
+ NVVAL(NVC57E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+ NVVAL(NVC57E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NVC57E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE,
+ NVVAL(NVC57E, SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NVC57E, SET_SIZE, HEIGHT, asyw->image.h),
+
+ SET_STORAGE,
+ NVVAL(NVC57E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
+
+ SET_PARAMS,
+ NVVAL(NVC57E, SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NVC57E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+ NVDEF(NVC57E, SET_PARAMS, SWAP_UV, DISABLE) |
+ NVDEF(NVC57E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
+
+ SET_PLANAR_STORAGE(0),
+ NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+ PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
+ PUSH_MTHD(push, NVC57E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NVC57E, SET_POINT_IN(0),
+ NVVAL(NVC57E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+ NVVAL(NVC57E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE_IN,
+ NVVAL(NVC57E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+ NVVAL(NVC57E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE_OUT,
+ NVVAL(NVC57E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+ NVVAL(NVC57E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+ return 0;
+}
+
+static const struct nv50_wndw_func
+wndwc67e = {
+ .acquire = wndwc37e_acquire,
+ .release = wndwc37e_release,
+ .sema_set = wndwc37e_sema_set,
+ .sema_clr = wndwc37e_sema_clr,
+ .ntfy_set = wndwc37e_ntfy_set,
+ .ntfy_clr = wndwc37e_ntfy_clr,
+ .ntfy_reset = corec37d_ntfy_init,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .ilut = wndwc57e_ilut,
+ .ilut_identity = true,
+ .ilut_size = 1024,
+ .xlut_set = wndwc57e_ilut_set,
+ .xlut_clr = wndwc57e_ilut_clr,
+ .csc = base907c_csc,
+ .csc_set = wndwc57e_csc_set,
+ .csc_clr = wndwc57e_csc_clr,
+ .image_set = wndwc67e_image_set,
+ .image_clr = wndwc37e_image_clr,
+ .blend_set = wndwc37e_blend_set,
+ .update = wndwc37e_update,
+};
+
+int
+wndwc67e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+ s32 oclass, struct nv50_wndw **pwndw)
+{
+ return wndwc37e_new_(&wndwc67e, drm, type, index, oclass, BIT(index >> 1), pwndw);
+}