diff options
Diffstat (limited to 'drivers/gpu/drm/vkms')
-rw-r--r-- | drivers/gpu/drm/vkms/Makefile | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/vkms/vkms_composer.c | 316 | ||||
-rw-r--r-- | drivers/gpu/drm/vkms/vkms_crtc.c | 294 | ||||
-rw-r--r-- | drivers/gpu/drm/vkms/vkms_drv.c | 220 | ||||
-rw-r--r-- | drivers/gpu/drm/vkms/vkms_drv.h | 155 | ||||
-rw-r--r-- | drivers/gpu/drm/vkms/vkms_gem.c | 248 | ||||
-rw-r--r-- | drivers/gpu/drm/vkms/vkms_output.c | 108 | ||||
-rw-r--r-- | drivers/gpu/drm/vkms/vkms_plane.c | 215 | ||||
-rw-r--r-- | drivers/gpu/drm/vkms/vkms_writeback.c | 142 |
9 files changed, 1709 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile new file mode 100644 index 000000000..333d3cead --- /dev/null +++ b/drivers/gpu/drm/vkms/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only +vkms-y := \ + vkms_drv.o \ + vkms_plane.o \ + vkms_output.o \ + vkms_crtc.o \ + vkms_gem.o \ + vkms_composer.o \ + vkms_writeback.o + +obj-$(CONFIG_DRM_VKMS) += vkms.o diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c new file mode 100644 index 000000000..33c031f27 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/crc32.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_vblank.h> + +#include "vkms_drv.h" + +static u32 get_pixel_from_buffer(int x, int y, const u8 *buffer, + const struct vkms_composer *composer) +{ + u32 pixel; + int src_offset = composer->offset + (y * composer->pitch) + + (x * composer->cpp); + + pixel = *(u32 *)&buffer[src_offset]; + + return pixel; +} + +/** + * compute_crc - Compute CRC value on output frame + * + * @vaddr: address to final framebuffer + * @composer: framebuffer's metadata + * + * returns CRC value computed using crc32 on the visible portion of + * the final framebuffer at vaddr_out + */ +static uint32_t compute_crc(const u8 *vaddr, + const struct vkms_composer *composer) +{ + int x, y; + u32 crc = 0, pixel = 0; + int x_src = composer->src.x1 >> 16; + int y_src = composer->src.y1 >> 16; + int h_src = drm_rect_height(&composer->src) >> 16; + int w_src = drm_rect_width(&composer->src) >> 16; + + for (y = y_src; y < y_src + h_src; ++y) { + for (x = x_src; x < x_src + w_src; ++x) { + pixel = get_pixel_from_buffer(x, y, vaddr, composer); + crc = crc32_le(crc, (void *)&pixel, sizeof(u32)); + } + } + + return crc; +} + +static u8 blend_channel(u8 src, u8 dst, u8 alpha) +{ + u32 pre_blend; + u8 new_color; + + pre_blend = (src * 255 + dst * (255 - alpha)); + + /* Faster div by 255 */ + new_color = ((pre_blend + ((pre_blend + 257) >> 8)) >> 8); + + return new_color; +} + +static void alpha_blending(const u8 *argb_src, u8 *argb_dst) +{ + u8 alpha; + + alpha = argb_src[3]; + argb_dst[0] = blend_channel(argb_src[0], argb_dst[0], alpha); + argb_dst[1] = blend_channel(argb_src[1], argb_dst[1], alpha); + argb_dst[2] = blend_channel(argb_src[2], argb_dst[2], alpha); + /* Opaque primary */ + argb_dst[3] = 0xFF; +} + +/** + * blend - blend value at vaddr_src with value at vaddr_dst + * @vaddr_dst: destination address + * @vaddr_src: source address + * @dst_composer: destination framebuffer's metadata + * @src_composer: source framebuffer's metadata + * + * Blend the vaddr_src value with the vaddr_dst value using the pre-multiplied + * alpha blending equation, since DRM currently assumes that the pixel color + * values have already been pre-multiplied with the alpha channel values. See + * more drm_plane_create_blend_mode_property(). This function uses buffer's + * metadata to locate the new composite values at vaddr_dst. + */ +static void blend(void *vaddr_dst, void *vaddr_src, + struct vkms_composer *dst_composer, + struct vkms_composer *src_composer) +{ + int i, j, j_dst, i_dst; + int offset_src, offset_dst; + u8 *pixel_dst, *pixel_src; + + int x_src = src_composer->src.x1 >> 16; + int y_src = src_composer->src.y1 >> 16; + + int x_dst = src_composer->dst.x1; + int y_dst = src_composer->dst.y1; + int h_dst = drm_rect_height(&src_composer->dst); + int w_dst = drm_rect_width(&src_composer->dst); + + int y_limit = y_src + h_dst; + int x_limit = x_src + w_dst; + + for (i = y_src, i_dst = y_dst; i < y_limit; ++i) { + for (j = x_src, j_dst = x_dst; j < x_limit; ++j) { + offset_dst = dst_composer->offset + + (i_dst * dst_composer->pitch) + + (j_dst++ * dst_composer->cpp); + offset_src = src_composer->offset + + (i * src_composer->pitch) + + (j * src_composer->cpp); + + pixel_src = (u8 *)(vaddr_src + offset_src); + pixel_dst = (u8 *)(vaddr_dst + offset_dst); + alpha_blending(pixel_src, pixel_dst); + } + i_dst++; + } +} + +static void compose_cursor(struct vkms_composer *cursor_composer, + struct vkms_composer *primary_composer, + void *vaddr_out) +{ + struct drm_gem_object *cursor_obj; + struct vkms_gem_object *cursor_vkms_obj; + + cursor_obj = drm_gem_fb_get_obj(&cursor_composer->fb, 0); + cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj); + + if (WARN_ON(!cursor_vkms_obj->vaddr)) + return; + + blend(vaddr_out, cursor_vkms_obj->vaddr, + primary_composer, cursor_composer); +} + +static int compose_planes(void **vaddr_out, + struct vkms_composer *primary_composer, + struct vkms_composer *cursor_composer) +{ + struct drm_framebuffer *fb = &primary_composer->fb; + struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0); + struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj); + + if (!*vaddr_out) { + *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL); + if (!*vaddr_out) { + DRM_ERROR("Cannot allocate memory for output frame."); + return -ENOMEM; + } + } + + if (WARN_ON(!vkms_obj->vaddr)) + return -EINVAL; + + memcpy(*vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size); + + if (cursor_composer) + compose_cursor(cursor_composer, primary_composer, *vaddr_out); + + return 0; +} + +/** + * vkms_composer_worker - ordered work_struct to compute CRC + * + * @work: work_struct + * + * Work handler for composing and computing CRCs. work_struct scheduled in + * an ordered workqueue that's periodically scheduled to run by + * _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state(). + */ +void vkms_composer_worker(struct work_struct *work) +{ + struct vkms_crtc_state *crtc_state = container_of(work, + struct vkms_crtc_state, + composer_work); + struct drm_crtc *crtc = crtc_state->base.crtc; + struct vkms_output *out = drm_crtc_to_vkms_output(crtc); + struct vkms_composer *primary_composer = NULL; + struct vkms_composer *cursor_composer = NULL; + bool crc_pending, wb_pending; + void *vaddr_out = NULL; + u32 crc32 = 0; + u64 frame_start, frame_end; + int ret; + + spin_lock_irq(&out->composer_lock); + frame_start = crtc_state->frame_start; + frame_end = crtc_state->frame_end; + crc_pending = crtc_state->crc_pending; + wb_pending = crtc_state->wb_pending; + crtc_state->frame_start = 0; + crtc_state->frame_end = 0; + crtc_state->crc_pending = false; + spin_unlock_irq(&out->composer_lock); + + /* + * We raced with the vblank hrtimer and previous work already computed + * the crc, nothing to do. + */ + if (!crc_pending) + return; + + if (crtc_state->num_active_planes >= 1) + primary_composer = crtc_state->active_planes[0]->composer; + + if (crtc_state->num_active_planes == 2) + cursor_composer = crtc_state->active_planes[1]->composer; + + if (!primary_composer) + return; + + if (wb_pending) + vaddr_out = crtc_state->active_writeback; + + ret = compose_planes(&vaddr_out, primary_composer, cursor_composer); + if (ret) { + if (ret == -EINVAL && !wb_pending) + kfree(vaddr_out); + return; + } + + crc32 = compute_crc(vaddr_out, primary_composer); + + if (wb_pending) { + drm_writeback_signal_completion(&out->wb_connector, 0); + spin_lock_irq(&out->composer_lock); + crtc_state->wb_pending = false; + spin_unlock_irq(&out->composer_lock); + } else { + kfree(vaddr_out); + } + + /* + * The worker can fall behind the vblank hrtimer, make sure we catch up. + */ + while (frame_start <= frame_end) + drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32); +} + +static const char * const pipe_crc_sources[] = {"auto"}; + +const char *const *vkms_get_crc_sources(struct drm_crtc *crtc, + size_t *count) +{ + *count = ARRAY_SIZE(pipe_crc_sources); + return pipe_crc_sources; +} + +static int vkms_crc_parse_source(const char *src_name, bool *enabled) +{ + int ret = 0; + + if (!src_name) { + *enabled = false; + } else if (strcmp(src_name, "auto") == 0) { + *enabled = true; + } else { + *enabled = false; + ret = -EINVAL; + } + + return ret; +} + +int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name, + size_t *values_cnt) +{ + bool enabled; + + if (vkms_crc_parse_source(src_name, &enabled) < 0) { + DRM_DEBUG_DRIVER("unknown source %s\n", src_name); + return -EINVAL; + } + + *values_cnt = 1; + + return 0; +} + +void vkms_set_composer(struct vkms_output *out, bool enabled) +{ + bool old_enabled; + + if (enabled) + drm_crtc_vblank_get(&out->crtc); + + spin_lock_irq(&out->lock); + old_enabled = out->composer_enabled; + out->composer_enabled = enabled; + spin_unlock_irq(&out->lock); + + if (old_enabled) + drm_crtc_vblank_put(&out->crtc); +} + +int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name) +{ + struct vkms_output *out = drm_crtc_to_vkms_output(crtc); + bool enabled = false; + int ret = 0; + + ret = vkms_crc_parse_source(src_name, &enabled); + + vkms_set_composer(out, enabled); + + return ret; +} diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c new file mode 100644 index 000000000..1ae5cd47d --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "vkms_drv.h" + +static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) +{ + struct vkms_output *output = container_of(timer, struct vkms_output, + vblank_hrtimer); + struct drm_crtc *crtc = &output->crtc; + struct vkms_crtc_state *state; + u64 ret_overrun; + bool ret; + + ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, + output->period_ns); + if (ret_overrun != 1) + pr_warn("%s: vblank timer overrun\n", __func__); + + spin_lock(&output->lock); + ret = drm_crtc_handle_vblank(crtc); + if (!ret) + DRM_ERROR("vkms failure on handling vblank"); + + state = output->composer_state; + spin_unlock(&output->lock); + + if (state && output->composer_enabled) { + u64 frame = drm_crtc_accurate_vblank_count(crtc); + + /* update frame_start only if a queued vkms_composer_worker() + * has read the data + */ + spin_lock(&output->composer_lock); + if (!state->crc_pending) + state->frame_start = frame; + else + DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n", + state->frame_start, frame); + state->frame_end = frame; + state->crc_pending = true; + spin_unlock(&output->composer_lock); + + ret = queue_work(output->composer_workq, &state->composer_work); + if (!ret) + DRM_DEBUG_DRIVER("Composer worker already queued\n"); + } + + return HRTIMER_RESTART; +} + +static int vkms_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + unsigned int pipe = drm_crtc_index(crtc); + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct vkms_output *out = drm_crtc_to_vkms_output(crtc); + + drm_calc_timestamping_constants(crtc, &crtc->mode); + + hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + out->vblank_hrtimer.function = &vkms_vblank_simulate; + out->period_ns = ktime_set(0, vblank->framedur_ns); + hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL); + + return 0; +} + +static void vkms_disable_vblank(struct drm_crtc *crtc) +{ + struct vkms_output *out = drm_crtc_to_vkms_output(crtc); + + hrtimer_cancel(&out->vblank_hrtimer); +} + +static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc, + int *max_error, ktime_t *vblank_time, + bool in_vblank_irq) +{ + struct drm_device *dev = crtc->dev; + unsigned int pipe = crtc->index; + struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev); + struct vkms_output *output = &vkmsdev->output; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + + if (!READ_ONCE(vblank->enabled)) { + *vblank_time = ktime_get(); + return true; + } + + *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires); + + if (WARN_ON(*vblank_time == vblank->time)) + return true; + + /* + * To prevent races we roll the hrtimer forward before we do any + * interrupt processing - this is how real hw works (the interrupt is + * only generated after all the vblank registers are updated) and what + * the vblank core expects. Therefore we need to always correct the + * timestampe by one frame. + */ + *vblank_time -= output->period_ns; + + return true; +} + +static struct drm_crtc_state * +vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct vkms_crtc_state *vkms_state; + + if (WARN_ON(!crtc->state)) + return NULL; + + vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL); + if (!vkms_state) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base); + + INIT_WORK(&vkms_state->composer_work, vkms_composer_worker); + + return &vkms_state->base; +} + +static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state); + + __drm_atomic_helper_crtc_destroy_state(state); + + WARN_ON(work_pending(&vkms_state->composer_work)); + kfree(vkms_state->active_planes); + kfree(vkms_state); +} + +static void vkms_atomic_crtc_reset(struct drm_crtc *crtc) +{ + struct vkms_crtc_state *vkms_state = + kzalloc(sizeof(*vkms_state), GFP_KERNEL); + + if (crtc->state) + vkms_atomic_crtc_destroy_state(crtc, crtc->state); + + __drm_atomic_helper_crtc_reset(crtc, &vkms_state->base); + if (vkms_state) + INIT_WORK(&vkms_state->composer_work, vkms_composer_worker); +} + +static const struct drm_crtc_funcs vkms_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = drm_crtc_cleanup, + .page_flip = drm_atomic_helper_page_flip, + .reset = vkms_atomic_crtc_reset, + .atomic_duplicate_state = vkms_atomic_crtc_duplicate_state, + .atomic_destroy_state = vkms_atomic_crtc_destroy_state, + .enable_vblank = vkms_enable_vblank, + .disable_vblank = vkms_disable_vblank, + .get_vblank_timestamp = vkms_get_vblank_timestamp, + .get_crc_sources = vkms_get_crc_sources, + .set_crc_source = vkms_set_crc_source, + .verify_crc_source = vkms_verify_crc_source, +}; + +static int vkms_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state); + struct drm_plane *plane; + struct drm_plane_state *plane_state; + int i = 0, ret; + + if (vkms_state->active_planes) + return 0; + + ret = drm_atomic_add_affected_planes(state->state, crtc); + if (ret < 0) + return ret; + + drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) { + plane_state = drm_atomic_get_existing_plane_state(state->state, + plane); + WARN_ON(!plane_state); + + if (!plane_state->visible) + continue; + + i++; + } + + vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL); + if (!vkms_state->active_planes) + return -ENOMEM; + vkms_state->num_active_planes = i; + + i = 0; + drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) { + plane_state = drm_atomic_get_existing_plane_state(state->state, + plane); + + if (!plane_state->visible) + continue; + + vkms_state->active_planes[i++] = + to_vkms_plane_state(plane_state); + } + + return 0; +} + +static void vkms_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + drm_crtc_vblank_on(crtc); +} + +static void vkms_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + drm_crtc_vblank_off(crtc); +} + +static void vkms_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc); + + /* This lock is held across the atomic commit to block vblank timer + * from scheduling vkms_composer_worker until the composer is updated + */ + spin_lock_irq(&vkms_output->lock); +} + +static void vkms_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc); + + if (crtc->state->event) { + spin_lock(&crtc->dev->event_lock); + + if (drm_crtc_vblank_get(crtc) != 0) + drm_crtc_send_vblank_event(crtc, crtc->state->event); + else + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + + spin_unlock(&crtc->dev->event_lock); + + crtc->state->event = NULL; + } + + vkms_output->composer_state = to_vkms_crtc_state(crtc->state); + + spin_unlock_irq(&vkms_output->lock); +} + +static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = { + .atomic_check = vkms_crtc_atomic_check, + .atomic_begin = vkms_crtc_atomic_begin, + .atomic_flush = vkms_crtc_atomic_flush, + .atomic_enable = vkms_crtc_atomic_enable, + .atomic_disable = vkms_crtc_atomic_disable, +}; + +int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, + struct drm_plane *primary, struct drm_plane *cursor) +{ + struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc); + int ret; + + ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, + &vkms_crtc_funcs, NULL); + if (ret) { + DRM_ERROR("Failed to init CRTC\n"); + return ret; + } + + drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs); + + spin_lock_init(&vkms_out->lock); + spin_lock_init(&vkms_out->composer_lock); + + vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0); + if (!vkms_out->composer_workq) + return -ENOMEM; + + return ret; +} diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c new file mode 100644 index 000000000..838428988 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/** + * DOC: vkms (Virtual Kernel Modesetting) + * + * VKMS is a software-only model of a KMS driver that is useful for testing + * and for running X (or similar) on headless machines. VKMS aims to enable + * a virtual display with no need of a hardware display capability, releasing + * the GPU in DRM API tests. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> + +#include <drm/drm_gem.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_file.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_ioctl.h> +#include <drm/drm_managed.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "vkms_drv.h" + +#define DRIVER_NAME "vkms" +#define DRIVER_DESC "Virtual Kernel Mode Setting" +#define DRIVER_DATE "20180514" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 + +static struct vkms_device *vkms_device; + +bool enable_cursor = true; +module_param_named(enable_cursor, enable_cursor, bool, 0444); +MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support"); + +static const struct file_operations vkms_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .mmap = drm_gem_mmap, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .release = drm_release, +}; + +static const struct vm_operations_struct vkms_gem_vm_ops = { + .fault = vkms_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static void vkms_release(struct drm_device *dev) +{ + struct vkms_device *vkms = container_of(dev, struct vkms_device, drm); + + if (vkms->output.composer_workq) + destroy_workqueue(vkms->output.composer_workq); +} + +static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state) +{ + struct drm_device *dev = old_state->dev; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + int i; + + drm_atomic_helper_commit_modeset_disables(dev, old_state); + + drm_atomic_helper_commit_planes(dev, old_state, 0); + + drm_atomic_helper_commit_modeset_enables(dev, old_state); + + drm_atomic_helper_fake_vblank(old_state); + + drm_atomic_helper_commit_hw_done(old_state); + + drm_atomic_helper_wait_for_flip_done(dev, old_state); + + for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { + struct vkms_crtc_state *vkms_state = + to_vkms_crtc_state(old_crtc_state); + + flush_work(&vkms_state->composer_work); + } + + drm_atomic_helper_cleanup_planes(dev, old_state); +} + +static struct drm_driver vkms_driver = { + .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM, + .release = vkms_release, + .fops = &vkms_driver_fops, + .dumb_create = vkms_dumb_create, + .gem_vm_ops = &vkms_gem_vm_ops, + .gem_free_object_unlocked = vkms_gem_free_object, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import_sg_table = vkms_prime_import_sg_table, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, +}; + +static const struct drm_mode_config_funcs vkms_mode_funcs = { + .fb_create = drm_gem_fb_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static const struct drm_mode_config_helper_funcs vkms_mode_config_helpers = { + .atomic_commit_tail = vkms_atomic_commit_tail, +}; + +static int vkms_modeset_init(struct vkms_device *vkmsdev) +{ + struct drm_device *dev = &vkmsdev->drm; + + drm_mode_config_init(dev); + dev->mode_config.funcs = &vkms_mode_funcs; + dev->mode_config.min_width = XRES_MIN; + dev->mode_config.min_height = YRES_MIN; + dev->mode_config.max_width = XRES_MAX; + dev->mode_config.max_height = YRES_MAX; + dev->mode_config.cursor_width = 512; + dev->mode_config.cursor_height = 512; + dev->mode_config.preferred_depth = 24; + dev->mode_config.helper_private = &vkms_mode_config_helpers; + + return vkms_output_init(vkmsdev, 0); +} + +static int __init vkms_init(void) +{ + int ret; + struct platform_device *pdev; + + pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { + ret = -ENOMEM; + goto out_unregister; + } + + vkms_device = devm_drm_dev_alloc(&pdev->dev, &vkms_driver, + struct vkms_device, drm); + if (IS_ERR(vkms_device)) { + ret = PTR_ERR(vkms_device); + goto out_devres; + } + vkms_device->platform = pdev; + + ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev, + DMA_BIT_MASK(64)); + + if (ret) { + DRM_ERROR("Could not initialize DMA support\n"); + goto out_devres; + } + + vkms_device->drm.irq_enabled = true; + + ret = drm_vblank_init(&vkms_device->drm, 1); + if (ret) { + DRM_ERROR("Failed to vblank\n"); + goto out_devres; + } + + ret = vkms_modeset_init(vkms_device); + if (ret) + goto out_devres; + + ret = drm_dev_register(&vkms_device->drm, 0); + if (ret) + goto out_devres; + + return 0; + +out_devres: + devres_release_group(&pdev->dev, NULL); +out_unregister: + platform_device_unregister(pdev); + return ret; +} + +static void __exit vkms_exit(void) +{ + struct platform_device *pdev; + + if (!vkms_device) { + DRM_INFO("vkms_device is NULL.\n"); + return; + } + + pdev = vkms_device->platform; + + drm_dev_unregister(&vkms_device->drm); + drm_atomic_helper_shutdown(&vkms_device->drm); + devres_release_group(&pdev->dev, NULL); + platform_device_unregister(pdev); +} + +module_init(vkms_init); +module_exit(vkms_exit); + +MODULE_AUTHOR("Haneen Mohammed <hamohammed.sa@gmail.com>"); +MODULE_AUTHOR("Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>"); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h new file mode 100644 index 000000000..380a8f27e --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _VKMS_DRV_H_ +#define _VKMS_DRV_H_ + +#include <linux/hrtimer.h> + +#include <drm/drm.h> +#include <drm/drm_gem.h> +#include <drm/drm_encoder.h> +#include <drm/drm_writeback.h> + +#define XRES_MIN 20 +#define YRES_MIN 20 + +#define XRES_DEF 1024 +#define YRES_DEF 768 + +#define XRES_MAX 8192 +#define YRES_MAX 8192 + +extern bool enable_cursor; + +struct vkms_composer { + struct drm_framebuffer fb; + struct drm_rect src, dst; + unsigned int offset; + unsigned int pitch; + unsigned int cpp; +}; + +/** + * vkms_plane_state - Driver specific plane state + * @base: base plane state + * @composer: data required for composing computation + */ +struct vkms_plane_state { + struct drm_plane_state base; + struct vkms_composer *composer; +}; + +/** + * vkms_crtc_state - Driver specific CRTC state + * @base: base CRTC state + * @composer_work: work struct to compose and add CRC entries + * @n_frame_start: start frame number for computed CRC + * @n_frame_end: end frame number for computed CRC + */ +struct vkms_crtc_state { + struct drm_crtc_state base; + struct work_struct composer_work; + + int num_active_planes; + /* stack of active planes for crc computation, should be in z order */ + struct vkms_plane_state **active_planes; + void *active_writeback; + + /* below four are protected by vkms_output.composer_lock */ + bool crc_pending; + bool wb_pending; + u64 frame_start; + u64 frame_end; +}; + +struct vkms_output { + struct drm_crtc crtc; + struct drm_encoder encoder; + struct drm_connector connector; + struct drm_writeback_connector wb_connector; + struct hrtimer vblank_hrtimer; + ktime_t period_ns; + struct drm_pending_vblank_event *event; + /* ordered wq for composer_work */ + struct workqueue_struct *composer_workq; + /* protects concurrent access to composer */ + spinlock_t lock; + + /* protected by @lock */ + bool composer_enabled; + struct vkms_crtc_state *composer_state; + + spinlock_t composer_lock; +}; + +struct vkms_device { + struct drm_device drm; + struct platform_device *platform; + struct vkms_output output; +}; + +struct vkms_gem_object { + struct drm_gem_object gem; + struct mutex pages_lock; /* Page lock used in page fault handler */ + struct page **pages; + unsigned int vmap_count; + void *vaddr; +}; + +#define drm_crtc_to_vkms_output(target) \ + container_of(target, struct vkms_output, crtc) + +#define drm_device_to_vkms_device(target) \ + container_of(target, struct vkms_device, drm) + +#define drm_gem_to_vkms_gem(target)\ + container_of(target, struct vkms_gem_object, gem) + +#define to_vkms_crtc_state(target)\ + container_of(target, struct vkms_crtc_state, base) + +#define to_vkms_plane_state(target)\ + container_of(target, struct vkms_plane_state, base) + +/* CRTC */ +int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, + struct drm_plane *primary, struct drm_plane *cursor); + +int vkms_output_init(struct vkms_device *vkmsdev, int index); + +struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev, + enum drm_plane_type type, int index); + +/* Gem stuff */ +vm_fault_t vkms_gem_fault(struct vm_fault *vmf); + +int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); + +void vkms_gem_free_object(struct drm_gem_object *obj); + +int vkms_gem_vmap(struct drm_gem_object *obj); + +void vkms_gem_vunmap(struct drm_gem_object *obj); + +/* Prime */ +struct drm_gem_object * +vkms_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sg); + +/* CRC Support */ +const char *const *vkms_get_crc_sources(struct drm_crtc *crtc, + size_t *count); +int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name); +int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name, + size_t *values_cnt); + +/* Composer Support */ +void vkms_composer_worker(struct work_struct *work); +void vkms_set_composer(struct vkms_output *out, bool enabled); + +/* Writeback */ +int vkms_enable_writeback_connector(struct vkms_device *vkmsdev); + +#endif /* _VKMS_DRV_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c new file mode 100644 index 000000000..a017fc599 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/dma-buf.h> +#include <linux/shmem_fs.h> +#include <linux/vmalloc.h> +#include <drm/drm_prime.h> + +#include "vkms_drv.h" + +static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev, + u64 size) +{ + struct vkms_gem_object *obj; + int ret; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return ERR_PTR(-ENOMEM); + + size = roundup(size, PAGE_SIZE); + ret = drm_gem_object_init(dev, &obj->gem, size); + if (ret) { + kfree(obj); + return ERR_PTR(ret); + } + + mutex_init(&obj->pages_lock); + + return obj; +} + +void vkms_gem_free_object(struct drm_gem_object *obj) +{ + struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object, + gem); + + WARN_ON(gem->pages); + WARN_ON(gem->vaddr); + + mutex_destroy(&gem->pages_lock); + drm_gem_object_release(obj); + kfree(gem); +} + +vm_fault_t vkms_gem_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct vkms_gem_object *obj = vma->vm_private_data; + unsigned long vaddr = vmf->address; + pgoff_t page_offset; + loff_t num_pages; + vm_fault_t ret = VM_FAULT_SIGBUS; + + page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT; + num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE); + + if (page_offset > num_pages) + return VM_FAULT_SIGBUS; + + mutex_lock(&obj->pages_lock); + if (obj->pages) { + get_page(obj->pages[page_offset]); + vmf->page = obj->pages[page_offset]; + ret = 0; + } + mutex_unlock(&obj->pages_lock); + if (ret) { + struct page *page; + struct address_space *mapping; + + mapping = file_inode(obj->gem.filp)->i_mapping; + page = shmem_read_mapping_page(mapping, page_offset); + + if (!IS_ERR(page)) { + vmf->page = page; + ret = 0; + } else { + switch (PTR_ERR(page)) { + case -ENOSPC: + case -ENOMEM: + ret = VM_FAULT_OOM; + break; + case -EBUSY: + ret = VM_FAULT_RETRY; + break; + case -EFAULT: + case -EINVAL: + ret = VM_FAULT_SIGBUS; + break; + default: + WARN_ON(PTR_ERR(page)); + ret = VM_FAULT_SIGBUS; + break; + } + } + } + return ret; +} + +static struct drm_gem_object *vkms_gem_create(struct drm_device *dev, + struct drm_file *file, + u32 *handle, + u64 size) +{ + struct vkms_gem_object *obj; + int ret; + + if (!file || !dev || !handle) + return ERR_PTR(-EINVAL); + + obj = __vkms_gem_create(dev, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + ret = drm_gem_handle_create(file, &obj->gem, handle); + if (ret) + return ERR_PTR(ret); + + return &obj->gem; +} + +int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct drm_gem_object *gem_obj; + u64 pitch, size; + + if (!args || !dev || !file) + return -EINVAL; + + pitch = args->width * DIV_ROUND_UP(args->bpp, 8); + size = pitch * args->height; + + if (!size) + return -EINVAL; + + gem_obj = vkms_gem_create(dev, file, &args->handle, size); + if (IS_ERR(gem_obj)) + return PTR_ERR(gem_obj); + + args->size = gem_obj->size; + args->pitch = pitch; + + drm_gem_object_put(gem_obj); + + DRM_DEBUG_DRIVER("Created object of size %lld\n", size); + + return 0; +} + +static struct page **_get_pages(struct vkms_gem_object *vkms_obj) +{ + struct drm_gem_object *gem_obj = &vkms_obj->gem; + + if (!vkms_obj->pages) { + struct page **pages = drm_gem_get_pages(gem_obj); + + if (IS_ERR(pages)) + return pages; + + if (cmpxchg(&vkms_obj->pages, NULL, pages)) + drm_gem_put_pages(gem_obj, pages, false, true); + } + + return vkms_obj->pages; +} + +void vkms_gem_vunmap(struct drm_gem_object *obj) +{ + struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj); + + mutex_lock(&vkms_obj->pages_lock); + if (vkms_obj->vmap_count < 1) { + WARN_ON(vkms_obj->vaddr); + WARN_ON(vkms_obj->pages); + mutex_unlock(&vkms_obj->pages_lock); + return; + } + + vkms_obj->vmap_count--; + + if (vkms_obj->vmap_count == 0) { + vunmap(vkms_obj->vaddr); + vkms_obj->vaddr = NULL; + drm_gem_put_pages(obj, vkms_obj->pages, false, true); + vkms_obj->pages = NULL; + } + + mutex_unlock(&vkms_obj->pages_lock); +} + +int vkms_gem_vmap(struct drm_gem_object *obj) +{ + struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj); + int ret = 0; + + mutex_lock(&vkms_obj->pages_lock); + + if (!vkms_obj->vaddr) { + unsigned int n_pages = obj->size >> PAGE_SHIFT; + struct page **pages = _get_pages(vkms_obj); + + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto out; + } + + vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL); + if (!vkms_obj->vaddr) + goto err_vmap; + } + + vkms_obj->vmap_count++; + goto out; + +err_vmap: + ret = -ENOMEM; + drm_gem_put_pages(obj, vkms_obj->pages, false, true); + vkms_obj->pages = NULL; +out: + mutex_unlock(&vkms_obj->pages_lock); + return ret; +} + +struct drm_gem_object * +vkms_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sg) +{ + struct vkms_gem_object *obj; + int npages; + + obj = __vkms_gem_create(dev, attach->dmabuf->size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE; + DRM_DEBUG_PRIME("Importing %d pages\n", npages); + + obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); + if (!obj->pages) { + vkms_gem_free_object(&obj->gem); + return ERR_PTR(-ENOMEM); + } + + drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); + return &obj->gem; +} diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c new file mode 100644 index 000000000..4a1848b03 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "vkms_drv.h" +#include <drm/drm_atomic_helper.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +static void vkms_connector_destroy(struct drm_connector *connector) +{ + drm_connector_cleanup(connector); +} + +static const struct drm_connector_funcs vkms_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = vkms_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int vkms_conn_get_modes(struct drm_connector *connector) +{ + int count; + + count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX); + drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); + + return count; +} + +static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = { + .get_modes = vkms_conn_get_modes, +}; + +int vkms_output_init(struct vkms_device *vkmsdev, int index) +{ + struct vkms_output *output = &vkmsdev->output; + struct drm_device *dev = &vkmsdev->drm; + struct drm_connector *connector = &output->connector; + struct drm_encoder *encoder = &output->encoder; + struct drm_crtc *crtc = &output->crtc; + struct drm_plane *primary, *cursor = NULL; + int ret; + + primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index); + if (IS_ERR(primary)) + return PTR_ERR(primary); + + if (enable_cursor) { + cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index); + if (IS_ERR(cursor)) { + ret = PTR_ERR(cursor); + goto err_cursor; + } + } + + ret = vkms_crtc_init(dev, crtc, primary, cursor); + if (ret) + goto err_crtc; + + ret = drm_connector_init(dev, connector, &vkms_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL); + if (ret) { + DRM_ERROR("Failed to init connector\n"); + goto err_connector; + } + + drm_connector_helper_add(connector, &vkms_conn_helper_funcs); + + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL); + if (ret) { + DRM_ERROR("Failed to init encoder\n"); + goto err_encoder; + } + encoder->possible_crtcs = 1; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) { + DRM_ERROR("Failed to attach connector to encoder\n"); + goto err_attach; + } + + ret = vkms_enable_writeback_connector(vkmsdev); + if (ret) + DRM_ERROR("Failed to init writeback connector\n"); + + drm_mode_config_reset(dev); + + return 0; + +err_attach: + drm_encoder_cleanup(encoder); + +err_encoder: + drm_connector_cleanup(connector); + +err_connector: + drm_crtc_cleanup(crtc); + +err_crtc: + if (enable_cursor) + drm_plane_cleanup(cursor); + +err_cursor: + drm_plane_cleanup(primary); + + return ret; +} diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c new file mode 100644 index 000000000..6d31265a2 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_plane_helper.h> + +#include "vkms_drv.h" + +static const u32 vkms_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +static const u32 vkms_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +static struct drm_plane_state * +vkms_plane_duplicate_state(struct drm_plane *plane) +{ + struct vkms_plane_state *vkms_state; + struct vkms_composer *composer; + + vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL); + if (!vkms_state) + return NULL; + + composer = kzalloc(sizeof(*composer), GFP_KERNEL); + if (!composer) { + DRM_DEBUG_KMS("Couldn't allocate composer\n"); + kfree(vkms_state); + return NULL; + } + + vkms_state->composer = composer; + + __drm_atomic_helper_plane_duplicate_state(plane, + &vkms_state->base); + + return &vkms_state->base; +} + +static void vkms_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct vkms_plane_state *vkms_state = to_vkms_plane_state(old_state); + struct drm_crtc *crtc = vkms_state->base.crtc; + + if (crtc) { + /* dropping the reference we acquired in + * vkms_primary_plane_update() + */ + if (drm_framebuffer_read_refcount(&vkms_state->composer->fb)) + drm_framebuffer_put(&vkms_state->composer->fb); + } + + kfree(vkms_state->composer); + vkms_state->composer = NULL; + + __drm_atomic_helper_plane_destroy_state(old_state); + kfree(vkms_state); +} + +static void vkms_plane_reset(struct drm_plane *plane) +{ + struct vkms_plane_state *vkms_state; + + if (plane->state) + vkms_plane_destroy_state(plane, plane->state); + + vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL); + if (!vkms_state) { + DRM_ERROR("Cannot allocate vkms_plane_state\n"); + return; + } + + plane->state = &vkms_state->base; + plane->state->plane = plane; +} + +static const struct drm_plane_funcs vkms_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = vkms_plane_reset, + .atomic_duplicate_state = vkms_plane_duplicate_state, + .atomic_destroy_state = vkms_plane_destroy_state, +}; + +static void vkms_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct vkms_plane_state *vkms_plane_state; + struct drm_framebuffer *fb = plane->state->fb; + struct vkms_composer *composer; + + if (!plane->state->crtc || !fb) + return; + + vkms_plane_state = to_vkms_plane_state(plane->state); + + composer = vkms_plane_state->composer; + memcpy(&composer->src, &plane->state->src, sizeof(struct drm_rect)); + memcpy(&composer->dst, &plane->state->dst, sizeof(struct drm_rect)); + memcpy(&composer->fb, fb, sizeof(struct drm_framebuffer)); + drm_framebuffer_get(&composer->fb); + composer->offset = fb->offsets[0]; + composer->pitch = fb->pitches[0]; + composer->cpp = fb->format->cpp[0]; +} + +static int vkms_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc_state *crtc_state; + bool can_position = false; + int ret; + + if (!state->fb || WARN_ON(!state->crtc)) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + can_position = true; + + ret = drm_atomic_helper_check_plane_state(state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + can_position, true); + if (ret != 0) + return ret; + + /* for now primary plane must be visible and full screen */ + if (!state->visible && !can_position) + return -EINVAL; + + return 0; +} + +static int vkms_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_gem_object *gem_obj; + int ret; + + if (!state->fb) + return 0; + + gem_obj = drm_gem_fb_get_obj(state->fb, 0); + ret = vkms_gem_vmap(gem_obj); + if (ret) + DRM_ERROR("vmap failed: %d\n", ret); + + return drm_gem_fb_prepare_fb(plane, state); +} + +static void vkms_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_gem_object *gem_obj; + + if (!old_state->fb) + return; + + gem_obj = drm_gem_fb_get_obj(old_state->fb, 0); + vkms_gem_vunmap(gem_obj); +} + +static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = { + .atomic_update = vkms_plane_atomic_update, + .atomic_check = vkms_plane_atomic_check, + .prepare_fb = vkms_prepare_fb, + .cleanup_fb = vkms_cleanup_fb, +}; + +struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev, + enum drm_plane_type type, int index) +{ + struct drm_device *dev = &vkmsdev->drm; + const struct drm_plane_helper_funcs *funcs; + struct drm_plane *plane; + const u32 *formats; + int ret, nformats; + + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) + return ERR_PTR(-ENOMEM); + + if (type == DRM_PLANE_TYPE_CURSOR) { + formats = vkms_cursor_formats; + nformats = ARRAY_SIZE(vkms_cursor_formats); + funcs = &vkms_primary_helper_funcs; + } else { + formats = vkms_formats; + nformats = ARRAY_SIZE(vkms_formats); + funcs = &vkms_primary_helper_funcs; + } + + ret = drm_universal_plane_init(dev, plane, 1 << index, + &vkms_plane_funcs, + formats, nformats, + NULL, type, NULL); + if (ret) { + kfree(plane); + return ERR_PTR(ret); + } + + drm_plane_helper_add(plane, funcs); + + return plane; +} diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c new file mode 100644 index 000000000..094fa4aa0 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_writeback.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "vkms_drv.h" +#include <drm/drm_fourcc.h> +#include <drm/drm_writeback.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> + +static const u32 vkms_wb_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +static const struct drm_connector_funcs vkms_wb_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_framebuffer *fb; + const struct drm_display_mode *mode = &crtc_state->mode; + + if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + return 0; + + fb = conn_state->writeback_job->fb; + if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) { + DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n", + fb->width, fb->height); + return -EINVAL; + } + + if (fb->format->format != vkms_wb_formats[0]) { + struct drm_format_name_buf format_name; + + DRM_DEBUG_KMS("Invalid pixel format %s\n", + drm_get_format_name(fb->format->format, + &format_name)); + return -EINVAL; + } + + return 0; +} + +static const struct drm_encoder_helper_funcs vkms_wb_encoder_helper_funcs = { + .atomic_check = vkms_wb_encoder_atomic_check, +}; + +static int vkms_wb_connector_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + return drm_add_modes_noedid(connector, dev->mode_config.max_width, + dev->mode_config.max_height); +} + +static int vkms_wb_prepare_job(struct drm_writeback_connector *wb_connector, + struct drm_writeback_job *job) +{ + struct vkms_gem_object *vkms_obj; + struct drm_gem_object *gem_obj; + int ret; + + if (!job->fb) + return 0; + + gem_obj = drm_gem_fb_get_obj(job->fb, 0); + ret = vkms_gem_vmap(gem_obj); + if (ret) { + DRM_ERROR("vmap failed: %d\n", ret); + return ret; + } + + vkms_obj = drm_gem_to_vkms_gem(gem_obj); + job->priv = vkms_obj->vaddr; + + return 0; +} + +static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector, + struct drm_writeback_job *job) +{ + struct drm_gem_object *gem_obj; + struct vkms_device *vkmsdev; + + if (!job->fb) + return; + + gem_obj = drm_gem_fb_get_obj(job->fb, 0); + vkms_gem_vunmap(gem_obj); + + vkmsdev = drm_device_to_vkms_device(gem_obj->dev); + vkms_set_composer(&vkmsdev->output, false); +} + +static void vkms_wb_atomic_commit(struct drm_connector *conn, + struct drm_connector_state *state) +{ + struct vkms_device *vkmsdev = drm_device_to_vkms_device(conn->dev); + struct vkms_output *output = &vkmsdev->output; + struct drm_writeback_connector *wb_conn = &output->wb_connector; + struct drm_connector_state *conn_state = wb_conn->base.state; + struct vkms_crtc_state *crtc_state = output->composer_state; + + if (!conn_state) + return; + + vkms_set_composer(&vkmsdev->output, true); + + spin_lock_irq(&output->composer_lock); + crtc_state->active_writeback = conn_state->writeback_job->priv; + crtc_state->wb_pending = true; + spin_unlock_irq(&output->composer_lock); + drm_writeback_queue_job(wb_conn, state); +} + +static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = { + .get_modes = vkms_wb_connector_get_modes, + .prepare_writeback_job = vkms_wb_prepare_job, + .cleanup_writeback_job = vkms_wb_cleanup_job, + .atomic_commit = vkms_wb_atomic_commit, +}; + +int vkms_enable_writeback_connector(struct vkms_device *vkmsdev) +{ + struct drm_writeback_connector *wb = &vkmsdev->output.wb_connector; + + vkmsdev->output.wb_connector.encoder.possible_crtcs = 1; + drm_connector_helper_add(&wb->base, &vkms_wb_conn_helper_funcs); + + return drm_writeback_connector_init(&vkmsdev->drm, wb, + &vkms_wb_connector_funcs, + &vkms_wb_encoder_helper_funcs, + vkms_wb_formats, + ARRAY_SIZE(vkms_wb_formats)); +} |