summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vkms
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/gpu/drm/vkms
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/vkms')
-rw-r--r--drivers/gpu/drm/vkms/Makefile11
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c431
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c304
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c292
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h173
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c264
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h12
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c126
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c219
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c178
10 files changed, 2010 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
new file mode 100644
index 000000000..1b28a6a32
--- /dev/null
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vkms-y := \
+ vkms_drv.o \
+ vkms_plane.o \
+ vkms_output.o \
+ vkms_formats.o \
+ vkms_crtc.o \
+ vkms_composer.o \
+ vkms_writeback.o
+
+obj-$(CONFIG_DRM_VKMS) += vkms.o
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
new file mode 100644
index 000000000..3c99fb8b5
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/crc32.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_vblank.h>
+#include <linux/minmax.h>
+
+#include "vkms_drv.h"
+
+static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha)
+{
+ u32 new_color;
+
+ new_color = (src * 0xffff + dst * (0xffff - alpha));
+
+ return DIV_ROUND_CLOSEST(new_color, 0xffff);
+}
+
+/**
+ * pre_mul_alpha_blend - alpha blending equation
+ * @frame_info: Source framebuffer's metadata
+ * @stage_buffer: The line with the pixels from src_plane
+ * @output_buffer: A line buffer that receives all the blends output
+ *
+ * Using the information from the `frame_info`, this blends only the
+ * necessary pixels from the `stage_buffer` to the `output_buffer`
+ * using premultiplied blend formula.
+ *
+ * The current DRM assumption is that pixel color values have been already
+ * pre-multiplied with the alpha channel values. See more
+ * drm_plane_create_blend_mode_property(). Also, this formula assumes a
+ * completely opaque background.
+ */
+static void pre_mul_alpha_blend(struct vkms_frame_info *frame_info,
+ struct line_buffer *stage_buffer,
+ struct line_buffer *output_buffer)
+{
+ int x_dst = frame_info->dst.x1;
+ struct pixel_argb_u16 *out = output_buffer->pixels + x_dst;
+ struct pixel_argb_u16 *in = stage_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ stage_buffer->n_pixels);
+
+ for (int x = 0; x < x_limit; x++) {
+ out[x].a = (u16)0xffff;
+ out[x].r = pre_mul_blend_channel(in[x].r, out[x].r, in[x].a);
+ out[x].g = pre_mul_blend_channel(in[x].g, out[x].g, in[x].a);
+ out[x].b = pre_mul_blend_channel(in[x].b, out[x].b, in[x].a);
+ }
+}
+
+static int get_y_pos(struct vkms_frame_info *frame_info, int y)
+{
+ if (frame_info->rotation & DRM_MODE_REFLECT_Y)
+ return drm_rect_height(&frame_info->rotated) - y - 1;
+
+ switch (frame_info->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_90:
+ return frame_info->rotated.x2 - y - 1;
+ case DRM_MODE_ROTATE_270:
+ return y + frame_info->rotated.x1;
+ default:
+ return y;
+ }
+}
+
+static bool check_limit(struct vkms_frame_info *frame_info, int pos)
+{
+ if (drm_rotation_90_or_270(frame_info->rotation)) {
+ if (pos >= 0 && pos < drm_rect_width(&frame_info->rotated))
+ return true;
+ } else {
+ if (pos >= frame_info->rotated.y1 && pos < frame_info->rotated.y2)
+ return true;
+ }
+
+ return false;
+}
+
+static void fill_background(const struct pixel_argb_u16 *background_color,
+ struct line_buffer *output_buffer)
+{
+ for (size_t i = 0; i < output_buffer->n_pixels; i++)
+ output_buffer->pixels[i] = *background_color;
+}
+
+// lerp(a, b, t) = a + (b - a) * t
+static u16 lerp_u16(u16 a, u16 b, s64 t)
+{
+ s64 a_fp = drm_int2fixp(a);
+ s64 b_fp = drm_int2fixp(b);
+
+ s64 delta = drm_fixp_mul(b_fp - a_fp, t);
+
+ return drm_fixp2int(a_fp + delta);
+}
+
+static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)
+{
+ s64 color_channel_fp = drm_int2fixp(channel_value);
+
+ return drm_fixp_mul(color_channel_fp, lut->channel_value2index_ratio);
+}
+
+/*
+ * This enum is related to the positions of the variables inside
+ * `struct drm_color_lut`, so the order of both needs to be the same.
+ */
+enum lut_channel {
+ LUT_RED = 0,
+ LUT_GREEN,
+ LUT_BLUE,
+ LUT_RESERVED
+};
+
+static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value,
+ enum lut_channel channel)
+{
+ s64 lut_index = get_lut_index(lut, channel_value);
+
+ /*
+ * This checks if `struct drm_color_lut` has any gap added by the compiler
+ * between the struct fields.
+ */
+ static_assert(sizeof(struct drm_color_lut) == sizeof(__u16) * 4);
+
+ u16 *floor_lut_value = (__u16 *)&lut->base[drm_fixp2int(lut_index)];
+ u16 *ceil_lut_value = (__u16 *)&lut->base[drm_fixp2int_ceil(lut_index)];
+
+ u16 floor_channel_value = floor_lut_value[channel];
+ u16 ceil_channel_value = ceil_lut_value[channel];
+
+ return lerp_u16(floor_channel_value, ceil_channel_value,
+ lut_index & DRM_FIXED_DECIMAL_MASK);
+}
+
+static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buffer *output_buffer)
+{
+ if (!crtc_state->gamma_lut.base)
+ return;
+
+ if (!crtc_state->gamma_lut.lut_length)
+ return;
+
+ for (size_t x = 0; x < output_buffer->n_pixels; x++) {
+ struct pixel_argb_u16 *pixel = &output_buffer->pixels[x];
+
+ pixel->r = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->r, LUT_RED);
+ pixel->g = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->g, LUT_GREEN);
+ pixel->b = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->b, LUT_BLUE);
+ }
+}
+
+/**
+ * blend - blend the pixels from all planes and compute crc
+ * @wb: The writeback frame buffer metadata
+ * @crtc_state: The crtc state
+ * @crc32: The crc output of the final frame
+ * @output_buffer: A buffer of a row that will receive the result of the blend(s)
+ * @stage_buffer: The line with the pixels from plane being blend to the output
+ * @row_size: The size, in bytes, of a single row
+ *
+ * This function blends the pixels (Using the `pre_mul_alpha_blend`)
+ * from all planes, calculates the crc32 of the output from the former step,
+ * and, if necessary, convert and store the output to the writeback buffer.
+ */
+static void blend(struct vkms_writeback_job *wb,
+ struct vkms_crtc_state *crtc_state,
+ u32 *crc32, struct line_buffer *stage_buffer,
+ struct line_buffer *output_buffer, size_t row_size)
+{
+ struct vkms_plane_state **plane = crtc_state->active_planes;
+ u32 n_active_planes = crtc_state->num_active_planes;
+ int y_pos;
+
+ const struct pixel_argb_u16 background_color = { .a = 0xffff };
+
+ size_t crtc_y_limit = crtc_state->base.crtc->mode.vdisplay;
+
+ for (size_t y = 0; y < crtc_y_limit; y++) {
+ fill_background(&background_color, output_buffer);
+
+ /* The active planes are composed associatively in z-order. */
+ for (size_t i = 0; i < n_active_planes; i++) {
+ y_pos = get_y_pos(plane[i]->frame_info, y);
+
+ if (!check_limit(plane[i]->frame_info, y_pos))
+ continue;
+
+ vkms_compose_row(stage_buffer, plane[i], y_pos);
+ pre_mul_alpha_blend(plane[i]->frame_info, stage_buffer,
+ output_buffer);
+ }
+
+ apply_lut(crtc_state, output_buffer);
+
+ *crc32 = crc32_le(*crc32, (void *)output_buffer->pixels, row_size);
+
+ if (wb)
+ vkms_writeback_row(wb, output_buffer, y_pos);
+ }
+}
+
+static int check_format_funcs(struct vkms_crtc_state *crtc_state,
+ struct vkms_writeback_job *active_wb)
+{
+ struct vkms_plane_state **planes = crtc_state->active_planes;
+ u32 n_active_planes = crtc_state->num_active_planes;
+
+ for (size_t i = 0; i < n_active_planes; i++)
+ if (!planes[i]->pixel_read)
+ return -1;
+
+ if (active_wb && !active_wb->pixel_write)
+ return -1;
+
+ return 0;
+}
+
+static int check_iosys_map(struct vkms_crtc_state *crtc_state)
+{
+ struct vkms_plane_state **plane_state = crtc_state->active_planes;
+ u32 n_active_planes = crtc_state->num_active_planes;
+
+ for (size_t i = 0; i < n_active_planes; i++)
+ if (iosys_map_is_null(&plane_state[i]->frame_info->map[0]))
+ return -1;
+
+ return 0;
+}
+
+static int compose_active_planes(struct vkms_writeback_job *active_wb,
+ struct vkms_crtc_state *crtc_state,
+ u32 *crc32)
+{
+ size_t line_width, pixel_size = sizeof(struct pixel_argb_u16);
+ struct line_buffer output_buffer, stage_buffer;
+ int ret = 0;
+
+ /*
+ * This check exists so we can call `crc32_le` for the entire line
+ * instead doing it for each channel of each pixel in case
+ * `struct `pixel_argb_u16` had any gap added by the compiler
+ * between the struct fields.
+ */
+ static_assert(sizeof(struct pixel_argb_u16) == 8);
+
+ if (WARN_ON(check_iosys_map(crtc_state)))
+ return -EINVAL;
+
+ if (WARN_ON(check_format_funcs(crtc_state, active_wb)))
+ return -EINVAL;
+
+ line_width = crtc_state->base.crtc->mode.hdisplay;
+ stage_buffer.n_pixels = line_width;
+ output_buffer.n_pixels = line_width;
+
+ stage_buffer.pixels = kvmalloc(line_width * pixel_size, GFP_KERNEL);
+ if (!stage_buffer.pixels) {
+ DRM_ERROR("Cannot allocate memory for the output line buffer");
+ return -ENOMEM;
+ }
+
+ output_buffer.pixels = kvmalloc(line_width * pixel_size, GFP_KERNEL);
+ if (!output_buffer.pixels) {
+ DRM_ERROR("Cannot allocate memory for intermediate line buffer");
+ ret = -ENOMEM;
+ goto free_stage_buffer;
+ }
+
+ blend(active_wb, crtc_state, crc32, &stage_buffer,
+ &output_buffer, line_width * pixel_size);
+
+ kvfree(output_buffer.pixels);
+free_stage_buffer:
+ kvfree(stage_buffer.pixels);
+
+ return ret;
+}
+
+/**
+ * vkms_composer_worker - ordered work_struct to compute CRC
+ *
+ * @work: work_struct
+ *
+ * Work handler for composing and computing CRCs. work_struct scheduled in
+ * an ordered workqueue that's periodically scheduled to run by
+ * vkms_vblank_simulate() and flushed at vkms_atomic_commit_tail().
+ */
+void vkms_composer_worker(struct work_struct *work)
+{
+ struct vkms_crtc_state *crtc_state = container_of(work,
+ struct vkms_crtc_state,
+ composer_work);
+ struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct vkms_writeback_job *active_wb = crtc_state->active_writeback;
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+ bool crc_pending, wb_pending;
+ u64 frame_start, frame_end;
+ u32 crc32 = 0;
+ int ret;
+
+ spin_lock_irq(&out->composer_lock);
+ frame_start = crtc_state->frame_start;
+ frame_end = crtc_state->frame_end;
+ crc_pending = crtc_state->crc_pending;
+ wb_pending = crtc_state->wb_pending;
+ crtc_state->frame_start = 0;
+ crtc_state->frame_end = 0;
+ crtc_state->crc_pending = false;
+
+ if (crtc->state->gamma_lut) {
+ s64 max_lut_index_fp;
+ s64 u16_max_fp = drm_int2fixp(0xffff);
+
+ crtc_state->gamma_lut.base = (struct drm_color_lut *)crtc->state->gamma_lut->data;
+ crtc_state->gamma_lut.lut_length =
+ crtc->state->gamma_lut->length / sizeof(struct drm_color_lut);
+ max_lut_index_fp = drm_int2fixp(crtc_state->gamma_lut.lut_length - 1);
+ crtc_state->gamma_lut.channel_value2index_ratio = drm_fixp_div(max_lut_index_fp,
+ u16_max_fp);
+
+ } else {
+ crtc_state->gamma_lut.base = NULL;
+ }
+
+ spin_unlock_irq(&out->composer_lock);
+
+ /*
+ * We raced with the vblank hrtimer and previous work already computed
+ * the crc, nothing to do.
+ */
+ if (!crc_pending)
+ return;
+
+ if (wb_pending)
+ ret = compose_active_planes(active_wb, crtc_state, &crc32);
+ else
+ ret = compose_active_planes(NULL, crtc_state, &crc32);
+
+ if (ret)
+ return;
+
+ if (wb_pending) {
+ drm_writeback_signal_completion(&out->wb_connector, 0);
+ spin_lock_irq(&out->composer_lock);
+ crtc_state->wb_pending = false;
+ spin_unlock_irq(&out->composer_lock);
+ }
+
+ /*
+ * The worker can fall behind the vblank hrtimer, make sure we catch up.
+ */
+ while (frame_start <= frame_end)
+ drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
+}
+
+static const char * const pipe_crc_sources[] = {"auto"};
+
+const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count)
+{
+ *count = ARRAY_SIZE(pipe_crc_sources);
+ return pipe_crc_sources;
+}
+
+static int vkms_crc_parse_source(const char *src_name, bool *enabled)
+{
+ int ret = 0;
+
+ if (!src_name) {
+ *enabled = false;
+ } else if (strcmp(src_name, "auto") == 0) {
+ *enabled = true;
+ } else {
+ *enabled = false;
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
+ size_t *values_cnt)
+{
+ bool enabled;
+
+ if (vkms_crc_parse_source(src_name, &enabled) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
+ return -EINVAL;
+ }
+
+ *values_cnt = 1;
+
+ return 0;
+}
+
+void vkms_set_composer(struct vkms_output *out, bool enabled)
+{
+ bool old_enabled;
+
+ if (enabled)
+ drm_crtc_vblank_get(&out->crtc);
+
+ spin_lock_irq(&out->lock);
+ old_enabled = out->composer_enabled;
+ out->composer_enabled = enabled;
+ spin_unlock_irq(&out->lock);
+
+ if (old_enabled)
+ drm_crtc_vblank_put(&out->crtc);
+}
+
+int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
+{
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+ bool enabled = false;
+ int ret = 0;
+
+ ret = vkms_crc_parse_source(src_name, &enabled);
+
+ vkms_set_composer(out, enabled);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
new file mode 100644
index 000000000..61e500b8c
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/dma-fence.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "vkms_drv.h"
+
+static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+{
+ struct vkms_output *output = container_of(timer, struct vkms_output,
+ vblank_hrtimer);
+ struct drm_crtc *crtc = &output->crtc;
+ struct vkms_crtc_state *state;
+ u64 ret_overrun;
+ bool ret, fence_cookie;
+
+ fence_cookie = dma_fence_begin_signalling();
+
+ ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
+ output->period_ns);
+ if (ret_overrun != 1)
+ pr_warn("%s: vblank timer overrun\n", __func__);
+
+ spin_lock(&output->lock);
+ ret = drm_crtc_handle_vblank(crtc);
+ if (!ret)
+ DRM_ERROR("vkms failure on handling vblank");
+
+ state = output->composer_state;
+ spin_unlock(&output->lock);
+
+ if (state && output->composer_enabled) {
+ u64 frame = drm_crtc_accurate_vblank_count(crtc);
+
+ /* update frame_start only if a queued vkms_composer_worker()
+ * has read the data
+ */
+ spin_lock(&output->composer_lock);
+ if (!state->crc_pending)
+ state->frame_start = frame;
+ else
+ DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
+ state->frame_start, frame);
+ state->frame_end = frame;
+ state->crc_pending = true;
+ spin_unlock(&output->composer_lock);
+
+ ret = queue_work(output->composer_workq, &state->composer_work);
+ if (!ret)
+ DRM_DEBUG_DRIVER("Composer worker already queued\n");
+ }
+
+ dma_fence_end_signalling(fence_cookie);
+
+ return HRTIMER_RESTART;
+}
+
+static int vkms_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+
+ drm_calc_timestamping_constants(crtc, &crtc->mode);
+
+ hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ out->vblank_hrtimer.function = &vkms_vblank_simulate;
+ out->period_ns = ktime_set(0, vblank->framedur_ns);
+ hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
+
+ return 0;
+}
+
+static void vkms_disable_vblank(struct drm_crtc *crtc)
+{
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+
+ hrtimer_cancel(&out->vblank_hrtimer);
+}
+
+static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
+ struct vkms_output *output = &vkmsdev->output;
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ if (!READ_ONCE(vblank->enabled)) {
+ *vblank_time = ktime_get();
+ return true;
+ }
+
+ *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
+
+ if (WARN_ON(*vblank_time == vblank->time))
+ return true;
+
+ /*
+ * To prevent races we roll the hrtimer forward before we do any
+ * interrupt processing - this is how real hw works (the interrupt is
+ * only generated after all the vblank registers are updated) and what
+ * the vblank core expects. Therefore we need to always correct the
+ * timestampe by one frame.
+ */
+ *vblank_time -= output->period_ns;
+
+ return true;
+}
+
+static struct drm_crtc_state *
+vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct vkms_crtc_state *vkms_state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
+ if (!vkms_state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
+
+ INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
+
+ return &vkms_state->base;
+}
+
+static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ WARN_ON(work_pending(&vkms_state->composer_work));
+ kfree(vkms_state->active_planes);
+ kfree(vkms_state);
+}
+
+static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
+{
+ struct vkms_crtc_state *vkms_state =
+ kzalloc(sizeof(*vkms_state), GFP_KERNEL);
+
+ if (crtc->state)
+ vkms_atomic_crtc_destroy_state(crtc, crtc->state);
+
+ __drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
+ if (vkms_state)
+ INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
+}
+
+static const struct drm_crtc_funcs vkms_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = vkms_atomic_crtc_reset,
+ .atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
+ .atomic_destroy_state = vkms_atomic_crtc_destroy_state,
+ .enable_vblank = vkms_enable_vblank,
+ .disable_vblank = vkms_disable_vblank,
+ .get_vblank_timestamp = vkms_get_vblank_timestamp,
+ .get_crc_sources = vkms_get_crc_sources,
+ .set_crc_source = vkms_set_crc_source,
+ .verify_crc_source = vkms_verify_crc_source,
+};
+
+static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc_state);
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ int i = 0, ret;
+
+ if (vkms_state->active_planes)
+ return 0;
+
+ ret = drm_atomic_add_affected_planes(crtc_state->state, crtc);
+ if (ret < 0)
+ return ret;
+
+ drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
+ plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
+ plane);
+ WARN_ON(!plane_state);
+
+ if (!plane_state->visible)
+ continue;
+
+ i++;
+ }
+
+ vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
+ if (!vkms_state->active_planes)
+ return -ENOMEM;
+ vkms_state->num_active_planes = i;
+
+ i = 0;
+ drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
+ plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
+ plane);
+
+ if (!plane_state->visible)
+ continue;
+
+ vkms_state->active_planes[i++] =
+ to_vkms_plane_state(plane_state);
+ }
+
+ return 0;
+}
+
+static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ drm_crtc_vblank_on(crtc);
+}
+
+static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ drm_crtc_vblank_off(crtc);
+}
+
+static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
+
+ /* This lock is held across the atomic commit to block vblank timer
+ * from scheduling vkms_composer_worker until the composer is updated
+ */
+ spin_lock_irq(&vkms_output->lock);
+}
+
+static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
+
+ if (crtc->state->event) {
+ spin_lock(&crtc->dev->event_lock);
+
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ else
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+
+ spin_unlock(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
+
+ vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
+
+ spin_unlock_irq(&vkms_output->lock);
+}
+
+static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
+ .atomic_check = vkms_crtc_atomic_check,
+ .atomic_begin = vkms_crtc_atomic_begin,
+ .atomic_flush = vkms_crtc_atomic_flush,
+ .atomic_enable = vkms_crtc_atomic_enable,
+ .atomic_disable = vkms_crtc_atomic_disable,
+};
+
+int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary, struct drm_plane *cursor)
+{
+ struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc);
+ int ret;
+
+ ret = drmm_crtc_init_with_planes(dev, crtc, primary, cursor,
+ &vkms_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to init CRTC\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
+
+ drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE);
+
+ spin_lock_init(&vkms_out->lock);
+ spin_lock_init(&vkms_out->composer_lock);
+
+ vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
+ if (!vkms_out->composer_workq)
+ return -ENOMEM;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
new file mode 100644
index 000000000..dd0af086e
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/**
+ * DOC: vkms (Virtual Kernel Modesetting)
+ *
+ * VKMS is a software-only model of a KMS driver that is useful for testing
+ * and for running X (or similar) on headless machines. VKMS aims to enable
+ * a virtual display with no need of a hardware display capability, releasing
+ * the GPU in DRM API tests.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <drm/drm_gem.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "vkms_drv.h"
+
+#include <drm/drm_print.h>
+#include <drm/drm_debugfs.h>
+
+#define DRIVER_NAME "vkms"
+#define DRIVER_DESC "Virtual Kernel Mode Setting"
+#define DRIVER_DATE "20180514"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static struct vkms_config *default_config;
+
+static bool enable_cursor = true;
+module_param_named(enable_cursor, enable_cursor, bool, 0444);
+MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
+
+static bool enable_writeback = true;
+module_param_named(enable_writeback, enable_writeback, bool, 0444);
+MODULE_PARM_DESC(enable_writeback, "Enable/Disable writeback connector support");
+
+static bool enable_overlay;
+module_param_named(enable_overlay, enable_overlay, bool, 0444);
+MODULE_PARM_DESC(enable_overlay, "Enable/Disable overlay support");
+
+DEFINE_DRM_GEM_FOPS(vkms_driver_fops);
+
+static void vkms_release(struct drm_device *dev)
+{
+ struct vkms_device *vkms = drm_device_to_vkms_device(dev);
+
+ if (vkms->output.composer_workq)
+ destroy_workqueue(vkms->output.composer_workq);
+}
+
+static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = old_state->dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ drm_atomic_helper_commit_modeset_disables(dev, old_state);
+
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+ drm_atomic_helper_fake_vblank(old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+
+ drm_atomic_helper_wait_for_flip_done(dev, old_state);
+
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ struct vkms_crtc_state *vkms_state =
+ to_vkms_crtc_state(old_crtc_state);
+
+ flush_work(&vkms_state->composer_work);
+ }
+
+ drm_atomic_helper_cleanup_planes(dev, old_state);
+}
+
+static int vkms_config_show(struct seq_file *m, void *data)
+{
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
+
+ seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback);
+ seq_printf(m, "cursor=%d\n", vkmsdev->config->cursor);
+ seq_printf(m, "overlay=%d\n", vkmsdev->config->overlay);
+
+ return 0;
+}
+
+static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
+ { "vkms_config", vkms_config_show, 0 },
+};
+
+static const struct drm_driver vkms_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
+ .release = vkms_release,
+ .fops = &vkms_driver_fops,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int vkms_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->gamma_lut || !new_crtc_state->color_mgmt_changed)
+ continue;
+
+ if (new_crtc_state->gamma_lut->length / sizeof(struct drm_color_lut *)
+ > VKMS_LUT_SIZE)
+ return -EINVAL;
+ }
+
+ return drm_atomic_helper_check(dev, state);
+}
+
+static const struct drm_mode_config_funcs vkms_mode_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = vkms_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static const struct drm_mode_config_helper_funcs vkms_mode_config_helpers = {
+ .atomic_commit_tail = vkms_atomic_commit_tail,
+};
+
+static int vkms_modeset_init(struct vkms_device *vkmsdev)
+{
+ struct drm_device *dev = &vkmsdev->drm;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
+
+ dev->mode_config.funcs = &vkms_mode_funcs;
+ dev->mode_config.min_width = XRES_MIN;
+ dev->mode_config.min_height = YRES_MIN;
+ dev->mode_config.max_width = XRES_MAX;
+ dev->mode_config.max_height = YRES_MAX;
+ dev->mode_config.cursor_width = 512;
+ dev->mode_config.cursor_height = 512;
+ /* FIXME: There's a confusion between bpp and depth between this and
+ * fbdev helpers. We have to go with 0, meaning "pick the default",
+ * which ix XRGB8888 in all cases. */
+ dev->mode_config.preferred_depth = 0;
+ dev->mode_config.helper_private = &vkms_mode_config_helpers;
+
+ return vkms_output_init(vkmsdev, 0);
+}
+
+static int vkms_create(struct vkms_config *config)
+{
+ int ret;
+ struct platform_device *pdev;
+ struct vkms_device *vkms_device;
+
+ pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out_unregister;
+ }
+
+ vkms_device = devm_drm_dev_alloc(&pdev->dev, &vkms_driver,
+ struct vkms_device, drm);
+ if (IS_ERR(vkms_device)) {
+ ret = PTR_ERR(vkms_device);
+ goto out_devres;
+ }
+ vkms_device->platform = pdev;
+ vkms_device->config = config;
+ config->dev = vkms_device;
+
+ ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
+ DMA_BIT_MASK(64));
+
+ if (ret) {
+ DRM_ERROR("Could not initialize DMA support\n");
+ goto out_devres;
+ }
+
+ ret = drm_vblank_init(&vkms_device->drm, 1);
+ if (ret) {
+ DRM_ERROR("Failed to vblank\n");
+ goto out_devres;
+ }
+
+ ret = vkms_modeset_init(vkms_device);
+ if (ret)
+ goto out_devres;
+
+ drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list,
+ ARRAY_SIZE(vkms_config_debugfs_list));
+
+ ret = drm_dev_register(&vkms_device->drm, 0);
+ if (ret)
+ goto out_devres;
+
+ drm_fbdev_generic_setup(&vkms_device->drm, 0);
+
+ return 0;
+
+out_devres:
+ devres_release_group(&pdev->dev, NULL);
+out_unregister:
+ platform_device_unregister(pdev);
+ return ret;
+}
+
+static int __init vkms_init(void)
+{
+ int ret;
+ struct vkms_config *config;
+
+ config = kmalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ default_config = config;
+
+ config->cursor = enable_cursor;
+ config->writeback = enable_writeback;
+ config->overlay = enable_overlay;
+
+ ret = vkms_create(config);
+ if (ret)
+ kfree(config);
+
+ return ret;
+}
+
+static void vkms_destroy(struct vkms_config *config)
+{
+ struct platform_device *pdev;
+
+ if (!config->dev) {
+ DRM_INFO("vkms_device is NULL.\n");
+ return;
+ }
+
+ pdev = config->dev->platform;
+
+ drm_dev_unregister(&config->dev->drm);
+ drm_atomic_helper_shutdown(&config->dev->drm);
+ devres_release_group(&pdev->dev, NULL);
+ platform_device_unregister(pdev);
+
+ config->dev = NULL;
+}
+
+static void __exit vkms_exit(void)
+{
+ if (default_config->dev)
+ vkms_destroy(default_config);
+
+ kfree(default_config);
+}
+
+module_init(vkms_init);
+module_exit(vkms_exit);
+
+MODULE_AUTHOR("Haneen Mohammed <hamohammed.sa@gmail.com>");
+MODULE_AUTHOR("Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
new file mode 100644
index 000000000..8f5710deb
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_DRV_H_
+#define _VKMS_DRV_H_
+
+#include <linux/hrtimer.h>
+
+#include <drm/drm.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_writeback.h>
+
+#define XRES_MIN 10
+#define YRES_MIN 10
+
+#define XRES_DEF 1024
+#define YRES_DEF 768
+
+#define XRES_MAX 8192
+#define YRES_MAX 8192
+
+#define NUM_OVERLAY_PLANES 8
+
+#define VKMS_LUT_SIZE 256
+
+struct vkms_frame_info {
+ struct drm_framebuffer *fb;
+ struct drm_rect src, dst;
+ struct drm_rect rotated;
+ struct iosys_map map[DRM_FORMAT_MAX_PLANES];
+ unsigned int rotation;
+ unsigned int offset;
+ unsigned int pitch;
+ unsigned int cpp;
+};
+
+struct pixel_argb_u16 {
+ u16 a, r, g, b;
+};
+
+struct line_buffer {
+ size_t n_pixels;
+ struct pixel_argb_u16 *pixels;
+};
+
+struct vkms_writeback_job {
+ struct iosys_map data[DRM_FORMAT_MAX_PLANES];
+ struct vkms_frame_info wb_frame_info;
+ void (*pixel_write)(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel);
+};
+
+/**
+ * vkms_plane_state - Driver specific plane state
+ * @base: base plane state
+ * @frame_info: data required for composing computation
+ */
+struct vkms_plane_state {
+ struct drm_shadow_plane_state base;
+ struct vkms_frame_info *frame_info;
+ void (*pixel_read)(u8 *src_buffer, struct pixel_argb_u16 *out_pixel);
+};
+
+struct vkms_plane {
+ struct drm_plane base;
+};
+
+struct vkms_color_lut {
+ struct drm_color_lut *base;
+ size_t lut_length;
+ s64 channel_value2index_ratio;
+};
+
+/**
+ * vkms_crtc_state - Driver specific CRTC state
+ * @base: base CRTC state
+ * @composer_work: work struct to compose and add CRC entries
+ * @n_frame_start: start frame number for computed CRC
+ * @n_frame_end: end frame number for computed CRC
+ */
+struct vkms_crtc_state {
+ struct drm_crtc_state base;
+ struct work_struct composer_work;
+
+ int num_active_planes;
+ /* stack of active planes for crc computation, should be in z order */
+ struct vkms_plane_state **active_planes;
+ struct vkms_writeback_job *active_writeback;
+ struct vkms_color_lut gamma_lut;
+
+ /* below four are protected by vkms_output.composer_lock */
+ bool crc_pending;
+ bool wb_pending;
+ u64 frame_start;
+ u64 frame_end;
+};
+
+struct vkms_output {
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct drm_writeback_connector wb_connector;
+ struct hrtimer vblank_hrtimer;
+ ktime_t period_ns;
+ struct drm_pending_vblank_event *event;
+ /* ordered wq for composer_work */
+ struct workqueue_struct *composer_workq;
+ /* protects concurrent access to composer */
+ spinlock_t lock;
+
+ /* protected by @lock */
+ bool composer_enabled;
+ struct vkms_crtc_state *composer_state;
+
+ spinlock_t composer_lock;
+};
+
+struct vkms_device;
+
+struct vkms_config {
+ bool writeback;
+ bool cursor;
+ bool overlay;
+ /* only set when instantiated */
+ struct vkms_device *dev;
+};
+
+struct vkms_device {
+ struct drm_device drm;
+ struct platform_device *platform;
+ struct vkms_output output;
+ const struct vkms_config *config;
+};
+
+#define drm_crtc_to_vkms_output(target) \
+ container_of(target, struct vkms_output, crtc)
+
+#define drm_device_to_vkms_device(target) \
+ container_of(target, struct vkms_device, drm)
+
+#define to_vkms_crtc_state(target)\
+ container_of(target, struct vkms_crtc_state, base)
+
+#define to_vkms_plane_state(target)\
+ container_of(target, struct vkms_plane_state, base.base)
+
+/* CRTC */
+int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary, struct drm_plane *cursor);
+
+int vkms_output_init(struct vkms_device *vkmsdev, int index);
+
+struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
+ enum drm_plane_type type, int index);
+
+/* CRC Support */
+const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count);
+int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name);
+int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt);
+
+/* Composer Support */
+void vkms_composer_worker(struct work_struct *work);
+void vkms_set_composer(struct vkms_output *out, bool enabled);
+void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y);
+void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer *src_buffer, int y);
+
+/* Writeback */
+int vkms_enable_writeback_connector(struct vkms_device *vkmsdev);
+
+#endif /* _VKMS_DRV_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
new file mode 100644
index 000000000..36046b12f
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/kernel.h>
+#include <linux/minmax.h>
+
+#include <drm/drm_blend.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_fixed.h>
+
+#include "vkms_formats.h"
+
+static size_t pixel_offset(const struct vkms_frame_info *frame_info, int x, int y)
+{
+ return frame_info->offset + (y * frame_info->pitch)
+ + (x * frame_info->cpp);
+}
+
+/*
+ * packed_pixels_addr - Get the pointer to pixel of a given pair of coordinates
+ *
+ * @frame_info: Buffer metadata
+ * @x: The x(width) coordinate of the 2D buffer
+ * @y: The y(Heigth) coordinate of the 2D buffer
+ *
+ * Takes the information stored in the frame_info, a pair of coordinates, and
+ * returns the address of the first color channel.
+ * This function assumes the channels are packed together, i.e. a color channel
+ * comes immediately after another in the memory. And therefore, this function
+ * doesn't work for YUV with chroma subsampling (e.g. YUV420 and NV21).
+ */
+static void *packed_pixels_addr(const struct vkms_frame_info *frame_info,
+ int x, int y)
+{
+ size_t offset = pixel_offset(frame_info, x, y);
+
+ return (u8 *)frame_info->map[0].vaddr + offset;
+}
+
+static void *get_packed_src_addr(const struct vkms_frame_info *frame_info, int y)
+{
+ int x_src = frame_info->src.x1 >> 16;
+ int y_src = y - frame_info->rotated.y1 + (frame_info->src.y1 >> 16);
+
+ return packed_pixels_addr(frame_info, x_src, y_src);
+}
+
+static int get_x_position(const struct vkms_frame_info *frame_info, int limit, int x)
+{
+ if (frame_info->rotation & (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_270))
+ return limit - x - 1;
+ return x;
+}
+
+static void ARGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+{
+ /*
+ * The 257 is the "conversion ratio". This number is obtained by the
+ * (2^16 - 1) / (2^8 - 1) division. Which, in this case, tries to get
+ * the best color value in a pixel format with more possibilities.
+ * A similar idea applies to others RGB color conversions.
+ */
+ out_pixel->a = (u16)src_pixels[3] * 257;
+ out_pixel->r = (u16)src_pixels[2] * 257;
+ out_pixel->g = (u16)src_pixels[1] * 257;
+ out_pixel->b = (u16)src_pixels[0] * 257;
+}
+
+static void XRGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+{
+ out_pixel->a = (u16)0xffff;
+ out_pixel->r = (u16)src_pixels[2] * 257;
+ out_pixel->g = (u16)src_pixels[1] * 257;
+ out_pixel->b = (u16)src_pixels[0] * 257;
+}
+
+static void ARGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+{
+ u16 *pixels = (u16 *)src_pixels;
+
+ out_pixel->a = le16_to_cpu(pixels[3]);
+ out_pixel->r = le16_to_cpu(pixels[2]);
+ out_pixel->g = le16_to_cpu(pixels[1]);
+ out_pixel->b = le16_to_cpu(pixels[0]);
+}
+
+static void XRGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+{
+ u16 *pixels = (u16 *)src_pixels;
+
+ out_pixel->a = (u16)0xffff;
+ out_pixel->r = le16_to_cpu(pixels[2]);
+ out_pixel->g = le16_to_cpu(pixels[1]);
+ out_pixel->b = le16_to_cpu(pixels[0]);
+}
+
+static void RGB565_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+{
+ u16 *pixels = (u16 *)src_pixels;
+
+ s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
+ s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
+
+ u16 rgb_565 = le16_to_cpu(*pixels);
+ s64 fp_r = drm_int2fixp((rgb_565 >> 11) & 0x1f);
+ s64 fp_g = drm_int2fixp((rgb_565 >> 5) & 0x3f);
+ s64 fp_b = drm_int2fixp(rgb_565 & 0x1f);
+
+ out_pixel->a = (u16)0xffff;
+ out_pixel->r = drm_fixp2int_round(drm_fixp_mul(fp_r, fp_rb_ratio));
+ out_pixel->g = drm_fixp2int_round(drm_fixp_mul(fp_g, fp_g_ratio));
+ out_pixel->b = drm_fixp2int_round(drm_fixp_mul(fp_b, fp_rb_ratio));
+}
+
+/**
+ * vkms_compose_row - compose a single row of a plane
+ * @stage_buffer: output line with the composed pixels
+ * @plane: state of the plane that is being composed
+ * @y: y coordinate of the row
+ *
+ * This function composes a single row of a plane. It gets the source pixels
+ * through the y coordinate (see get_packed_src_addr()) and goes linearly
+ * through the source pixel, reading the pixels and converting it to
+ * ARGB16161616 (see the pixel_read() callback). For rotate-90 and rotate-270,
+ * the source pixels are not traversed linearly. The source pixels are queried
+ * on each iteration in order to traverse the pixels vertically.
+ */
+void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y)
+{
+ struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+ struct vkms_frame_info *frame_info = plane->frame_info;
+ u8 *src_pixels = get_packed_src_addr(frame_info, y);
+ int limit = min_t(size_t, drm_rect_width(&frame_info->dst), stage_buffer->n_pixels);
+
+ for (size_t x = 0; x < limit; x++, src_pixels += frame_info->cpp) {
+ int x_pos = get_x_position(frame_info, limit, x);
+
+ if (drm_rotation_90_or_270(frame_info->rotation))
+ src_pixels = get_packed_src_addr(frame_info, x + frame_info->rotated.y1)
+ + frame_info->cpp * y;
+
+ plane->pixel_read(src_pixels, &out_pixels[x_pos]);
+ }
+}
+
+/*
+ * The following functions take an line of argb_u16 pixels from the
+ * src_buffer, convert them to a specific format, and store them in the
+ * destination.
+ *
+ * They are used in the `compose_active_planes` to convert and store a line
+ * from the src_buffer to the writeback buffer.
+ */
+static void argb_u16_to_ARGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+{
+ /*
+ * This sequence below is important because the format's byte order is
+ * in little-endian. In the case of the ARGB8888 the memory is
+ * organized this way:
+ *
+ * | Addr | = blue channel
+ * | Addr + 1 | = green channel
+ * | Addr + 2 | = Red channel
+ * | Addr + 3 | = Alpha channel
+ */
+ dst_pixels[3] = DIV_ROUND_CLOSEST(in_pixel->a, 257);
+ dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
+ dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
+ dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
+}
+
+static void argb_u16_to_XRGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+{
+ dst_pixels[3] = 0xff;
+ dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
+ dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
+ dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
+}
+
+static void argb_u16_to_ARGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+{
+ u16 *pixels = (u16 *)dst_pixels;
+
+ pixels[3] = cpu_to_le16(in_pixel->a);
+ pixels[2] = cpu_to_le16(in_pixel->r);
+ pixels[1] = cpu_to_le16(in_pixel->g);
+ pixels[0] = cpu_to_le16(in_pixel->b);
+}
+
+static void argb_u16_to_XRGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+{
+ u16 *pixels = (u16 *)dst_pixels;
+
+ pixels[3] = 0xffff;
+ pixels[2] = cpu_to_le16(in_pixel->r);
+ pixels[1] = cpu_to_le16(in_pixel->g);
+ pixels[0] = cpu_to_le16(in_pixel->b);
+}
+
+static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+{
+ u16 *pixels = (u16 *)dst_pixels;
+
+ s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
+ s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
+
+ s64 fp_r = drm_int2fixp(in_pixel->r);
+ s64 fp_g = drm_int2fixp(in_pixel->g);
+ s64 fp_b = drm_int2fixp(in_pixel->b);
+
+ u16 r = drm_fixp2int(drm_fixp_div(fp_r, fp_rb_ratio));
+ u16 g = drm_fixp2int(drm_fixp_div(fp_g, fp_g_ratio));
+ u16 b = drm_fixp2int(drm_fixp_div(fp_b, fp_rb_ratio));
+
+ *pixels = cpu_to_le16(r << 11 | g << 5 | b);
+}
+
+void vkms_writeback_row(struct vkms_writeback_job *wb,
+ const struct line_buffer *src_buffer, int y)
+{
+ struct vkms_frame_info *frame_info = &wb->wb_frame_info;
+ int x_dst = frame_info->dst.x1;
+ u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst), src_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, dst_pixels += frame_info->cpp)
+ wb->pixel_write(dst_pixels, &in_pixels[x]);
+}
+
+void *get_pixel_conversion_function(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ return &ARGB8888_to_argb_u16;
+ case DRM_FORMAT_XRGB8888:
+ return &XRGB8888_to_argb_u16;
+ case DRM_FORMAT_ARGB16161616:
+ return &ARGB16161616_to_argb_u16;
+ case DRM_FORMAT_XRGB16161616:
+ return &XRGB16161616_to_argb_u16;
+ case DRM_FORMAT_RGB565:
+ return &RGB565_to_argb_u16;
+ default:
+ return NULL;
+ }
+}
+
+void *get_pixel_write_function(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ return &argb_u16_to_ARGB8888;
+ case DRM_FORMAT_XRGB8888:
+ return &argb_u16_to_XRGB8888;
+ case DRM_FORMAT_ARGB16161616:
+ return &argb_u16_to_ARGB16161616;
+ case DRM_FORMAT_XRGB16161616:
+ return &argb_u16_to_XRGB16161616;
+ case DRM_FORMAT_RGB565:
+ return &argb_u16_to_RGB565;
+ default:
+ return NULL;
+ }
+}
diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
new file mode 100644
index 000000000..cf59c2ed8
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_formats.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_FORMATS_H_
+#define _VKMS_FORMATS_H_
+
+#include "vkms_drv.h"
+
+void *get_pixel_conversion_function(u32 format);
+
+void *get_pixel_write_function(u32 format);
+
+#endif /* _VKMS_FORMATS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
new file mode 100644
index 000000000..5ce70dd94
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "vkms_drv.h"
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
+
+static const struct drm_connector_funcs vkms_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_encoder_funcs vkms_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int vkms_conn_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+ drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+
+ return count;
+}
+
+static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
+ .get_modes = vkms_conn_get_modes,
+};
+
+static int vkms_add_overlay_plane(struct vkms_device *vkmsdev, int index,
+ struct drm_crtc *crtc)
+{
+ struct vkms_plane *overlay;
+
+ overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY, index);
+ if (IS_ERR(overlay))
+ return PTR_ERR(overlay);
+
+ if (!overlay->base.possible_crtcs)
+ overlay->base.possible_crtcs = drm_crtc_mask(crtc);
+
+ return 0;
+}
+
+int vkms_output_init(struct vkms_device *vkmsdev, int index)
+{
+ struct vkms_output *output = &vkmsdev->output;
+ struct drm_device *dev = &vkmsdev->drm;
+ struct drm_connector *connector = &output->connector;
+ struct drm_encoder *encoder = &output->encoder;
+ struct drm_crtc *crtc = &output->crtc;
+ struct vkms_plane *primary, *cursor = NULL;
+ int ret;
+ int writeback;
+ unsigned int n;
+
+ primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ if (vkmsdev->config->overlay) {
+ for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
+ ret = vkms_add_overlay_plane(vkmsdev, index, crtc);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (vkmsdev->config->cursor) {
+ cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
+ if (IS_ERR(cursor))
+ return PTR_ERR(cursor);
+ }
+
+ ret = vkms_crtc_init(dev, crtc, &primary->base, &cursor->base);
+ if (ret)
+ return ret;
+
+ ret = drm_connector_init(dev, connector, &vkms_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ if (ret) {
+ DRM_ERROR("Failed to init connector\n");
+ goto err_connector;
+ }
+
+ drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
+
+ ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to init encoder\n");
+ goto err_encoder;
+ }
+ encoder->possible_crtcs = 1;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ DRM_ERROR("Failed to attach connector to encoder\n");
+ goto err_attach;
+ }
+
+ if (vkmsdev->config->writeback) {
+ writeback = vkms_enable_writeback_connector(vkmsdev);
+ if (writeback)
+ DRM_ERROR("Failed to init writeback connector\n");
+ }
+
+ drm_mode_config_reset(dev);
+
+ return 0;
+
+err_attach:
+ drm_encoder_cleanup(encoder);
+
+err_encoder:
+ drm_connector_cleanup(connector);
+
+err_connector:
+ drm_crtc_cleanup(crtc);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
new file mode 100644
index 000000000..e5c625ab8
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/iosys-map.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+
+#include "vkms_drv.h"
+#include "vkms_formats.h"
+
+static const u32 vkms_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB16161616,
+ DRM_FORMAT_ARGB16161616,
+ DRM_FORMAT_RGB565
+};
+
+static struct drm_plane_state *
+vkms_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct vkms_plane_state *vkms_state;
+ struct vkms_frame_info *frame_info;
+
+ vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
+ if (!vkms_state)
+ return NULL;
+
+ frame_info = kzalloc(sizeof(*frame_info), GFP_KERNEL);
+ if (!frame_info) {
+ DRM_DEBUG_KMS("Couldn't allocate frame_info\n");
+ kfree(vkms_state);
+ return NULL;
+ }
+
+ vkms_state->frame_info = frame_info;
+
+ __drm_gem_duplicate_shadow_plane_state(plane, &vkms_state->base);
+
+ return &vkms_state->base.base;
+}
+
+static void vkms_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vkms_plane_state *vkms_state = to_vkms_plane_state(old_state);
+ struct drm_crtc *crtc = vkms_state->base.base.crtc;
+
+ if (crtc && vkms_state->frame_info->fb) {
+ /* dropping the reference we acquired in
+ * vkms_primary_plane_update()
+ */
+ if (drm_framebuffer_read_refcount(vkms_state->frame_info->fb))
+ drm_framebuffer_put(vkms_state->frame_info->fb);
+ }
+
+ kfree(vkms_state->frame_info);
+ vkms_state->frame_info = NULL;
+
+ __drm_gem_destroy_shadow_plane_state(&vkms_state->base);
+ kfree(vkms_state);
+}
+
+static void vkms_plane_reset(struct drm_plane *plane)
+{
+ struct vkms_plane_state *vkms_state;
+
+ if (plane->state) {
+ vkms_plane_destroy_state(plane, plane->state);
+ plane->state = NULL; /* must be set to NULL here */
+ }
+
+ vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
+ if (!vkms_state) {
+ DRM_ERROR("Cannot allocate vkms_plane_state\n");
+ return;
+ }
+
+ __drm_gem_reset_shadow_plane(plane, &vkms_state->base);
+}
+
+static const struct drm_plane_funcs vkms_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = vkms_plane_reset,
+ .atomic_duplicate_state = vkms_plane_duplicate_state,
+ .atomic_destroy_state = vkms_plane_destroy_state,
+};
+
+static void vkms_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct vkms_plane_state *vkms_plane_state;
+ struct drm_shadow_plane_state *shadow_plane_state;
+ struct drm_framebuffer *fb = new_state->fb;
+ struct vkms_frame_info *frame_info;
+ u32 fmt;
+
+ if (!new_state->crtc || !fb)
+ return;
+
+ fmt = fb->format->format;
+ vkms_plane_state = to_vkms_plane_state(new_state);
+ shadow_plane_state = &vkms_plane_state->base;
+
+ frame_info = vkms_plane_state->frame_info;
+ memcpy(&frame_info->src, &new_state->src, sizeof(struct drm_rect));
+ memcpy(&frame_info->dst, &new_state->dst, sizeof(struct drm_rect));
+ memcpy(&frame_info->rotated, &new_state->dst, sizeof(struct drm_rect));
+ frame_info->fb = fb;
+ memcpy(&frame_info->map, &shadow_plane_state->data, sizeof(frame_info->map));
+ drm_framebuffer_get(frame_info->fb);
+ frame_info->rotation = drm_rotation_simplify(new_state->rotation, DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+
+ drm_rect_rotate(&frame_info->rotated, drm_rect_width(&frame_info->rotated),
+ drm_rect_height(&frame_info->rotated), frame_info->rotation);
+
+ frame_info->offset = fb->offsets[0];
+ frame_info->pitch = fb->pitches[0];
+ frame_info->cpp = fb->format->cpp[0];
+ vkms_plane_state->pixel_read = get_pixel_conversion_function(fmt);
+}
+
+static int vkms_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state,
+ new_plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+ if (ret != 0)
+ return ret;
+
+ return 0;
+}
+
+static int vkms_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_shadow_plane_state *shadow_plane_state;
+ struct drm_framebuffer *fb = state->fb;
+ int ret;
+
+ if (!fb)
+ return 0;
+
+ shadow_plane_state = to_drm_shadow_plane_state(state);
+
+ ret = drm_gem_plane_helper_prepare_fb(plane, state);
+ if (ret)
+ return ret;
+
+ return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data);
+}
+
+static void vkms_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_shadow_plane_state *shadow_plane_state;
+ struct drm_framebuffer *fb = state->fb;
+
+ if (!fb)
+ return;
+
+ shadow_plane_state = to_drm_shadow_plane_state(state);
+
+ drm_gem_fb_vunmap(fb, shadow_plane_state->map);
+}
+
+static const struct drm_plane_helper_funcs vkms_plane_helper_funcs = {
+ .atomic_update = vkms_plane_atomic_update,
+ .atomic_check = vkms_plane_atomic_check,
+ .prepare_fb = vkms_prepare_fb,
+ .cleanup_fb = vkms_cleanup_fb,
+};
+
+struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
+ enum drm_plane_type type, int index)
+{
+ struct drm_device *dev = &vkmsdev->drm;
+ struct vkms_plane *plane;
+
+ plane = drmm_universal_plane_alloc(dev, struct vkms_plane, base, 1 << index,
+ &vkms_plane_funcs,
+ vkms_formats, ARRAY_SIZE(vkms_formats),
+ NULL, type, NULL);
+ if (IS_ERR(plane))
+ return plane;
+
+ drm_plane_helper_add(&plane->base, &vkms_plane_helper_funcs);
+
+ drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK);
+
+ return plane;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
new file mode 100644
index 000000000..d7e63aa14
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/iosys-map.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_writeback.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+
+#include "vkms_drv.h"
+#include "vkms_formats.h"
+
+static const u32 vkms_wb_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB16161616,
+ DRM_FORMAT_ARGB16161616,
+ DRM_FORMAT_RGB565
+};
+
+static const struct drm_connector_funcs vkms_wb_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_framebuffer *fb;
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ int ret;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+ if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
+ DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
+ fb->width, fb->height);
+ return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_check_wb_encoder_state(encoder, conn_state);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs vkms_wb_encoder_helper_funcs = {
+ .atomic_check = vkms_wb_encoder_atomic_check,
+};
+
+static int vkms_wb_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+ dev->mode_config.max_height);
+}
+
+static int vkms_wb_prepare_job(struct drm_writeback_connector *wb_connector,
+ struct drm_writeback_job *job)
+{
+ struct vkms_writeback_job *vkmsjob;
+ int ret;
+
+ if (!job->fb)
+ return 0;
+
+ vkmsjob = kzalloc(sizeof(*vkmsjob), GFP_KERNEL);
+ if (!vkmsjob)
+ return -ENOMEM;
+
+ ret = drm_gem_fb_vmap(job->fb, vkmsjob->wb_frame_info.map, vkmsjob->data);
+ if (ret) {
+ DRM_ERROR("vmap failed: %d\n", ret);
+ goto err_kfree;
+ }
+
+ vkmsjob->wb_frame_info.fb = job->fb;
+ drm_framebuffer_get(vkmsjob->wb_frame_info.fb);
+
+ job->priv = vkmsjob;
+
+ return 0;
+
+err_kfree:
+ kfree(vkmsjob);
+ return ret;
+}
+
+static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+ struct vkms_writeback_job *vkmsjob = job->priv;
+ struct vkms_device *vkmsdev;
+
+ if (!job->fb)
+ return;
+
+ drm_gem_fb_vunmap(job->fb, vkmsjob->wb_frame_info.map);
+
+ drm_framebuffer_put(vkmsjob->wb_frame_info.fb);
+
+ vkmsdev = drm_device_to_vkms_device(job->fb->dev);
+ vkms_set_composer(&vkmsdev->output, false);
+ kfree(vkmsjob);
+}
+
+static void vkms_wb_atomic_commit(struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
+ conn);
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(conn->dev);
+ struct vkms_output *output = &vkmsdev->output;
+ struct drm_writeback_connector *wb_conn = &output->wb_connector;
+ struct drm_connector_state *conn_state = wb_conn->base.state;
+ struct vkms_crtc_state *crtc_state = output->composer_state;
+ struct drm_framebuffer *fb = connector_state->writeback_job->fb;
+ u16 crtc_height = crtc_state->base.crtc->mode.vdisplay;
+ u16 crtc_width = crtc_state->base.crtc->mode.hdisplay;
+ struct vkms_writeback_job *active_wb;
+ struct vkms_frame_info *wb_frame_info;
+ u32 wb_format = fb->format->format;
+
+ if (!conn_state)
+ return;
+
+ vkms_set_composer(&vkmsdev->output, true);
+
+ active_wb = conn_state->writeback_job->priv;
+ wb_frame_info = &active_wb->wb_frame_info;
+
+ spin_lock_irq(&output->composer_lock);
+ crtc_state->active_writeback = active_wb;
+ crtc_state->wb_pending = true;
+ spin_unlock_irq(&output->composer_lock);
+
+ wb_frame_info->offset = fb->offsets[0];
+ wb_frame_info->pitch = fb->pitches[0];
+ wb_frame_info->cpp = fb->format->cpp[0];
+
+ drm_writeback_queue_job(wb_conn, connector_state);
+ active_wb->pixel_write = get_pixel_write_function(wb_format);
+ drm_rect_init(&wb_frame_info->src, 0, 0, crtc_width, crtc_height);
+ drm_rect_init(&wb_frame_info->dst, 0, 0, crtc_width, crtc_height);
+}
+
+static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = {
+ .get_modes = vkms_wb_connector_get_modes,
+ .prepare_writeback_job = vkms_wb_prepare_job,
+ .cleanup_writeback_job = vkms_wb_cleanup_job,
+ .atomic_commit = vkms_wb_atomic_commit,
+};
+
+int vkms_enable_writeback_connector(struct vkms_device *vkmsdev)
+{
+ struct drm_writeback_connector *wb = &vkmsdev->output.wb_connector;
+
+ drm_connector_helper_add(&wb->base, &vkms_wb_conn_helper_funcs);
+
+ return drm_writeback_connector_init(&vkmsdev->drm, wb,
+ &vkms_wb_connector_funcs,
+ &vkms_wb_encoder_helper_funcs,
+ vkms_wb_formats,
+ ARRAY_SIZE(vkms_wb_formats),
+ 1);
+}