From 51de1d8436100f725f3576aefa24a2bd2057bc28 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Mon, 15 Apr 2024 22:36:56 +0200 Subject: Adding upstream version 0.37.0. Signed-off-by: Daniel Baumann --- video/out/hwdec/dmabuf_interop.h | 57 ++++ video/out/hwdec/dmabuf_interop_gl.c | 311 +++++++++++++++++ video/out/hwdec/dmabuf_interop_pl.c | 138 ++++++++ video/out/hwdec/dmabuf_interop_wl.c | 83 +++++ video/out/hwdec/hwdec_aimagereader.c | 402 ++++++++++++++++++++++ video/out/hwdec/hwdec_cuda.c | 286 ++++++++++++++++ video/out/hwdec/hwdec_cuda.h | 59 ++++ video/out/hwdec/hwdec_cuda_gl.c | 174 ++++++++++ video/out/hwdec/hwdec_cuda_vk.c | 344 +++++++++++++++++++ video/out/hwdec/hwdec_drmprime.c | 294 ++++++++++++++++ video/out/hwdec/hwdec_drmprime_overlay.c | 334 ++++++++++++++++++ video/out/hwdec/hwdec_ios_gl.m | 222 ++++++++++++ video/out/hwdec/hwdec_mac_gl.c | 169 ++++++++++ video/out/hwdec/hwdec_vaapi.c | 557 +++++++++++++++++++++++++++++++ video/out/hwdec/hwdec_vt.c | 141 ++++++++ video/out/hwdec/hwdec_vt.h | 63 ++++ video/out/hwdec/hwdec_vt_pl.m | 312 +++++++++++++++++ video/out/hwdec/hwdec_vulkan.c | 333 ++++++++++++++++++ 18 files changed, 4279 insertions(+) create mode 100644 video/out/hwdec/dmabuf_interop.h create mode 100644 video/out/hwdec/dmabuf_interop_gl.c create mode 100644 video/out/hwdec/dmabuf_interop_pl.c create mode 100644 video/out/hwdec/dmabuf_interop_wl.c create mode 100644 video/out/hwdec/hwdec_aimagereader.c create mode 100644 video/out/hwdec/hwdec_cuda.c create mode 100644 video/out/hwdec/hwdec_cuda.h create mode 100644 video/out/hwdec/hwdec_cuda_gl.c create mode 100644 video/out/hwdec/hwdec_cuda_vk.c create mode 100644 video/out/hwdec/hwdec_drmprime.c create mode 100644 video/out/hwdec/hwdec_drmprime_overlay.c create mode 100644 video/out/hwdec/hwdec_ios_gl.m create mode 100644 video/out/hwdec/hwdec_mac_gl.c create mode 100644 video/out/hwdec/hwdec_vaapi.c create mode 100644 video/out/hwdec/hwdec_vt.c create mode 100644 video/out/hwdec/hwdec_vt.h create mode 100644 video/out/hwdec/hwdec_vt_pl.m create mode 100644 video/out/hwdec/hwdec_vulkan.c (limited to 'video/out/hwdec') diff --git a/video/out/hwdec/dmabuf_interop.h b/video/out/hwdec/dmabuf_interop.h new file mode 100644 index 0000000..e9b3e8e --- /dev/null +++ b/video/out/hwdec/dmabuf_interop.h @@ -0,0 +1,57 @@ +/* + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#pragma once + +#include + +#include "video/out/gpu/hwdec.h" + +struct dmabuf_interop { + bool use_modifiers; + bool composed_layers; + + bool (*interop_init)(struct ra_hwdec_mapper *mapper, + const struct ra_imgfmt_desc *desc); + void (*interop_uninit)(const struct ra_hwdec_mapper *mapper); + + bool (*interop_map)(struct ra_hwdec_mapper *mapper, + struct dmabuf_interop *dmabuf_interop, + bool probing); + void (*interop_unmap)(struct ra_hwdec_mapper *mapper); +}; + +struct dmabuf_interop_priv { + int num_planes; + struct mp_image layout; + struct ra_tex *tex[4]; + + AVDRMFrameDescriptor desc; + bool surface_acquired; + + void *interop_mapper_priv; +}; + +typedef bool (*dmabuf_interop_init)(const struct ra_hwdec *hw, + struct dmabuf_interop *dmabuf_interop); + +bool dmabuf_interop_gl_init(const struct ra_hwdec *hw, + struct dmabuf_interop *dmabuf_interop); +bool dmabuf_interop_pl_init(const struct ra_hwdec *hw, + struct dmabuf_interop *dmabuf_interop); +bool dmabuf_interop_wl_init(const struct ra_hwdec *hw, + struct dmabuf_interop *dmabuf_interop); diff --git a/video/out/hwdec/dmabuf_interop_gl.c b/video/out/hwdec/dmabuf_interop_gl.c new file mode 100644 index 0000000..e7fb103 --- /dev/null +++ b/video/out/hwdec/dmabuf_interop_gl.c @@ -0,0 +1,311 @@ +/* + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#include "dmabuf_interop.h" + +#include +#include +#include "video/out/opengl/ra_gl.h" + +typedef void* GLeglImageOES; +typedef void *EGLImageKHR; + +// Any EGL_EXT_image_dma_buf_import definitions used in this source file. +#define EGL_LINUX_DMA_BUF_EXT 0x3270 +#define EGL_LINUX_DRM_FOURCC_EXT 0x3271 +#define EGL_DMA_BUF_PLANE0_FD_EXT 0x3272 +#define EGL_DMA_BUF_PLANE0_OFFSET_EXT 0x3273 +#define EGL_DMA_BUF_PLANE0_PITCH_EXT 0x3274 +#define EGL_DMA_BUF_PLANE1_FD_EXT 0x3275 +#define EGL_DMA_BUF_PLANE1_OFFSET_EXT 0x3276 +#define EGL_DMA_BUF_PLANE1_PITCH_EXT 0x3277 +#define EGL_DMA_BUF_PLANE2_FD_EXT 0x3278 +#define EGL_DMA_BUF_PLANE2_OFFSET_EXT 0x3279 +#define EGL_DMA_BUF_PLANE2_PITCH_EXT 0x327A + + +// Any EGL_EXT_image_dma_buf_import definitions used in this source file. +#define EGL_DMA_BUF_PLANE3_FD_EXT 0x3440 +#define EGL_DMA_BUF_PLANE3_OFFSET_EXT 0x3441 +#define EGL_DMA_BUF_PLANE3_PITCH_EXT 0x3442 +#define EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT 0x3443 +#define EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT 0x3444 +#define EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT 0x3445 +#define EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT 0x3446 +#define EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT 0x3447 +#define EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT 0x3448 +#define EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT 0x3449 +#define EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT 0x344A + +struct vaapi_gl_mapper_priv { + GLuint gl_textures[4]; + EGLImageKHR images[4]; + + EGLImageKHR (EGLAPIENTRY *CreateImageKHR)(EGLDisplay, EGLContext, + EGLenum, EGLClientBuffer, + const EGLint *); + EGLBoolean (EGLAPIENTRY *DestroyImageKHR)(EGLDisplay, EGLImageKHR); + void (EGLAPIENTRY *EGLImageTargetTexture2DOES)(GLenum, GLeglImageOES); +}; + +static bool vaapi_gl_mapper_init(struct ra_hwdec_mapper *mapper, + const struct ra_imgfmt_desc *desc) +{ + struct dmabuf_interop_priv *p_mapper = mapper->priv; + struct vaapi_gl_mapper_priv *p = talloc_ptrtype(NULL, p); + p_mapper->interop_mapper_priv = p; + + *p = (struct vaapi_gl_mapper_priv) { + // EGL_KHR_image_base + .CreateImageKHR = (void *)eglGetProcAddress("eglCreateImageKHR"), + .DestroyImageKHR = (void *)eglGetProcAddress("eglDestroyImageKHR"), + // GL_OES_EGL_image + .EGLImageTargetTexture2DOES = + (void *)eglGetProcAddress("glEGLImageTargetTexture2DOES"), + }; + + if (!p->CreateImageKHR || !p->DestroyImageKHR || + !p->EGLImageTargetTexture2DOES) + return false; + + GL *gl = ra_gl_get(mapper->ra); + gl->GenTextures(4, p->gl_textures); + for (int n = 0; n < desc->num_planes; n++) { + gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]); + gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + gl->BindTexture(GL_TEXTURE_2D, 0); + + struct ra_tex_params params = { + .dimensions = 2, + .w = mp_image_plane_w(&p_mapper->layout, n), + .h = mp_image_plane_h(&p_mapper->layout, n), + .d = 1, + .format = desc->planes[n], + .render_src = true, + .src_linear = true, + }; + + if (params.format->ctype != RA_CTYPE_UNORM) + return false; + + p_mapper->tex[n] = ra_create_wrapped_tex(mapper->ra, ¶ms, + p->gl_textures[n]); + if (!p_mapper->tex[n]) + return false; + } + + return true; +} + +static void vaapi_gl_mapper_uninit(const struct ra_hwdec_mapper *mapper) +{ + struct dmabuf_interop_priv *p_mapper = mapper->priv; + struct vaapi_gl_mapper_priv *p = p_mapper->interop_mapper_priv; + + if (p) { + GL *gl = ra_gl_get(mapper->ra); + gl->DeleteTextures(4, p->gl_textures); + for (int n = 0; n < 4; n++) { + p->gl_textures[n] = 0; + ra_tex_free(mapper->ra, &p_mapper->tex[n]); + } + talloc_free(p); + p_mapper->interop_mapper_priv = NULL; + } +} + +#define ADD_ATTRIB(name, value) \ + do { \ + assert(num_attribs + 3 < MP_ARRAY_SIZE(attribs)); \ + attribs[num_attribs++] = (name); \ + attribs[num_attribs++] = (value); \ + attribs[num_attribs] = EGL_NONE; \ + } while(0) + +#define ADD_PLANE_ATTRIBS(plane) do { \ + uint64_t drm_format_modifier = p_mapper->desc.objects[p_mapper->desc.layers[i].planes[j].object_index].format_modifier; \ + ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _FD_EXT, \ + p_mapper->desc.objects[p_mapper->desc.layers[i].planes[j].object_index].fd); \ + ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _OFFSET_EXT, \ + p_mapper->desc.layers[i].planes[j].offset); \ + ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _PITCH_EXT, \ + p_mapper->desc.layers[i].planes[j].pitch); \ + if (dmabuf_interop->use_modifiers && drm_format_modifier != DRM_FORMAT_MOD_INVALID) { \ + ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _MODIFIER_LO_EXT, drm_format_modifier & 0xfffffffful); \ + ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _MODIFIER_HI_EXT, drm_format_modifier >> 32); \ + } \ + } while (0) + +static bool vaapi_gl_map(struct ra_hwdec_mapper *mapper, + struct dmabuf_interop *dmabuf_interop, + bool probing) +{ + struct dmabuf_interop_priv *p_mapper = mapper->priv; + struct vaapi_gl_mapper_priv *p = p_mapper->interop_mapper_priv; + + GL *gl = ra_gl_get(mapper->ra); + + for (int i = 0, n = 0; i < p_mapper->desc.nb_layers; i++) { + /* + * As we must map surfaces as one texture per plane, we can only support + * a subset of possible multi-plane layer formats. This is due to having + * to manually establish what DRM format each synthetic layer should + * have. + */ + uint32_t format[AV_DRM_MAX_PLANES] = { + p_mapper->desc.layers[i].format, + }; + + if (p_mapper->desc.layers[i].nb_planes > 1) { + switch (p_mapper->desc.layers[i].format) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV16: + format[0] = DRM_FORMAT_R8; + format[1] = DRM_FORMAT_GR88; + break; + case DRM_FORMAT_YUV420: + format[0] = DRM_FORMAT_R8; + format[1] = DRM_FORMAT_R8; + format[2] = DRM_FORMAT_R8; + break; + case DRM_FORMAT_P010: +#ifdef DRM_FORMAT_P030 /* Format added in a newer libdrm version than minimum */ + case DRM_FORMAT_P030: +#endif + format[0] = DRM_FORMAT_R16; + format[1] = DRM_FORMAT_GR1616; + break; + default: + mp_msg(mapper->log, probing ? MSGL_DEBUG : MSGL_ERR, + "Cannot map unknown multi-plane format: 0x%08X\n", + p_mapper->desc.layers[i].format); + return false; + } + } else { + /* + * As OpenGL only has one guaranteed rgba format (rgba8), drivers + * that support importing dmabuf formats with different channel + * orders do implicit swizzling to get to rgba. However, we look at + * the original imgfmt to decide channel order, and we then swizzle + * based on that. So, we can get into a situation where we swizzle + * twice and end up with a mess. + * + * The simplest way to avoid that is to lie to OpenGL and say that + * the surface we are importing is in the natural channel order, so + * that our swizzling does the right thing. + * + * DRM ABGR corresponds to OpenGL RGBA due to different naming + * conventions. + */ + switch (format[0]) { + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + format[0] = DRM_FORMAT_ABGR8888; + break; + case DRM_FORMAT_XRGB8888: + format[0] = DRM_FORMAT_XBGR8888; + break; + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + // Logically, these two formats should be handled as above, + // but there appear to be additional problems that make the + // format change here insufficient or incorrect, so we're + // doing nothing for now. + break; + } + } + + for (int j = 0; j < p_mapper->desc.layers[i].nb_planes; j++, n++) { + int attribs[48] = {EGL_NONE}; + int num_attribs = 0; + + ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, format[j]); + ADD_ATTRIB(EGL_WIDTH, p_mapper->tex[n]->params.w); + ADD_ATTRIB(EGL_HEIGHT, p_mapper->tex[n]->params.h); + ADD_PLANE_ATTRIBS(0); + + p->images[n] = p->CreateImageKHR(eglGetCurrentDisplay(), + EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attribs); + if (!p->images[n]) { + mp_msg(mapper->log, probing ? MSGL_DEBUG : MSGL_ERR, + "Failed to import surface in EGL: %u\n", eglGetError()); + return false; + } + + gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]); + p->EGLImageTargetTexture2DOES(GL_TEXTURE_2D, p->images[n]); + + mapper->tex[n] = p_mapper->tex[n]; + } + } + + gl->BindTexture(GL_TEXTURE_2D, 0); + return true; +} + +static void vaapi_gl_unmap(struct ra_hwdec_mapper *mapper) +{ + struct dmabuf_interop_priv *p_mapper = mapper->priv; + struct vaapi_gl_mapper_priv *p = p_mapper->interop_mapper_priv; + + if (p) { + for (int n = 0; n < 4; n++) { + if (p->images[n]) + p->DestroyImageKHR(eglGetCurrentDisplay(), p->images[n]); + p->images[n] = 0; + } + } +} + +bool dmabuf_interop_gl_init(const struct ra_hwdec *hw, + struct dmabuf_interop *dmabuf_interop) +{ + if (!ra_is_gl(hw->ra_ctx->ra)) { + // This is not an OpenGL RA. + return false; + } + + if (!eglGetCurrentContext()) + return false; + + const char *exts = eglQueryString(eglGetCurrentDisplay(), EGL_EXTENSIONS); + if (!exts) + return false; + + GL *gl = ra_gl_get(hw->ra_ctx->ra); + if (!gl_check_extension(exts, "EGL_EXT_image_dma_buf_import") || + !gl_check_extension(exts, "EGL_KHR_image_base") || + !gl_check_extension(gl->extensions, "GL_OES_EGL_image") || + !(gl->mpgl_caps & MPGL_CAP_TEX_RG)) + return false; + + dmabuf_interop->use_modifiers = + gl_check_extension(exts, "EGL_EXT_image_dma_buf_import_modifiers"); + + MP_VERBOSE(hw, "using EGL dmabuf interop\n"); + + dmabuf_interop->interop_init = vaapi_gl_mapper_init; + dmabuf_interop->interop_uninit = vaapi_gl_mapper_uninit; + dmabuf_interop->interop_map = vaapi_gl_map; + dmabuf_interop->interop_unmap = vaapi_gl_unmap; + + return true; +} diff --git a/video/out/hwdec/dmabuf_interop_pl.c b/video/out/hwdec/dmabuf_interop_pl.c new file mode 100644 index 0000000..0a8ec5b --- /dev/null +++ b/video/out/hwdec/dmabuf_interop_pl.c @@ -0,0 +1,138 @@ +/* + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#include +#include + +#include "dmabuf_interop.h" +#include "video/out/placebo/ra_pl.h" +#include "video/out/placebo/utils.h" + +static bool vaapi_pl_map(struct ra_hwdec_mapper *mapper, + struct dmabuf_interop *dmabuf_interop, + bool probing) +{ + struct dmabuf_interop_priv *p = mapper->priv; + pl_gpu gpu = ra_pl_get(mapper->ra); + + struct ra_imgfmt_desc desc = {0}; + if (!ra_get_imgfmt_desc(mapper->ra, mapper->dst_params.imgfmt, &desc)) + return false; + + // The calling code validates that the total number of exported planes + // equals the number we expected in p->num_planes. + int layer = 0; + int layer_plane = 0; + for (int n = 0; n < p->num_planes; n++) { + + const struct ra_format *format = desc.planes[n]; + int id = p->desc.layers[layer].planes[layer_plane].object_index; + int fd = p->desc.objects[id].fd; + uint32_t size = p->desc.objects[id].size; + uint32_t offset = p->desc.layers[layer].planes[layer_plane].offset; + uint32_t pitch = p->desc.layers[layer].planes[layer_plane].pitch; + + // AMD drivers do not return the size in the surface description, so we + // need to query it manually. + if (size == 0) { + size = lseek(fd, 0, SEEK_END); + if (size == -1) { + MP_ERR(mapper, "Cannot obtain size of object with fd %d: %s\n", + fd, mp_strerror(errno)); + return false; + } + off_t err = lseek(fd, 0, SEEK_SET); + if (err == -1) { + MP_ERR(mapper, "Failed to reset offset for fd %d: %s\n", + fd, mp_strerror(errno)); + return false; + } + } + + struct pl_tex_params tex_params = { + .w = mp_image_plane_w(&p->layout, n), + .h = mp_image_plane_h(&p->layout, n), + .d = 0, + .format = format->priv, + .sampleable = true, + .import_handle = PL_HANDLE_DMA_BUF, + .shared_mem = (struct pl_shared_mem) { + .handle = { + .fd = fd, + }, + .size = size, + .offset = offset, + .drm_format_mod = p->desc.objects[id].format_modifier, + .stride_w = pitch, + }, + }; + + mppl_log_set_probing(gpu->log, probing); + pl_tex pltex = pl_tex_create(gpu, &tex_params); + mppl_log_set_probing(gpu->log, false); + if (!pltex) + return false; + + struct ra_tex *ratex = talloc_ptrtype(NULL, ratex); + int ret = mppl_wrap_tex(mapper->ra, pltex, ratex); + if (!ret) { + pl_tex_destroy(gpu, &pltex); + talloc_free(ratex); + return false; + } + mapper->tex[n] = ratex; + + MP_TRACE(mapper, "Object %d with fd %d imported as %p\n", + id, fd, ratex); + + layer_plane++; + if (layer_plane == p->desc.layers[layer].nb_planes) { + layer_plane = 0; + layer++; + } + } + return true; +} + +static void vaapi_pl_unmap(struct ra_hwdec_mapper *mapper) +{ + for (int n = 0; n < 4; n++) + ra_tex_free(mapper->ra, &mapper->tex[n]); +} + +bool dmabuf_interop_pl_init(const struct ra_hwdec *hw, + struct dmabuf_interop *dmabuf_interop) +{ + pl_gpu gpu = ra_pl_get(hw->ra_ctx->ra); + if (!gpu) { + // This is not a libplacebo RA; + return false; + } + + if (!(gpu->import_caps.tex & PL_HANDLE_DMA_BUF)) { + MP_VERBOSE(hw, "libplacebo dmabuf interop requires support for " + "PL_HANDLE_DMA_BUF import.\n"); + return false; + } + + MP_VERBOSE(hw, "using libplacebo dmabuf interop\n"); + + dmabuf_interop->interop_map = vaapi_pl_map; + dmabuf_interop->interop_unmap = vaapi_pl_unmap; + + return true; +} diff --git a/video/out/hwdec/dmabuf_interop_wl.c b/video/out/hwdec/dmabuf_interop_wl.c new file mode 100644 index 0000000..606a0aa --- /dev/null +++ b/video/out/hwdec/dmabuf_interop_wl.c @@ -0,0 +1,83 @@ +/* + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ +#include "video/out/wldmabuf/ra_wldmabuf.h" +#include "dmabuf_interop.h" + +static bool mapper_init(struct ra_hwdec_mapper *mapper, + const struct ra_imgfmt_desc *desc) +{ + return true; +} + +static void mapper_uninit(const struct ra_hwdec_mapper *mapper) +{ +} + +static bool map(struct ra_hwdec_mapper *mapper, + struct dmabuf_interop *dmabuf_interop, + bool probing) +{ + // 1. only validate format when composed layers is enabled (i.e. vaapi) + // 2. for drmprime, just return true for now, as this use case + // has not been tested. + if (!dmabuf_interop->composed_layers) + return true; + + int layer_no = 0; + struct dmabuf_interop_priv *mapper_p = mapper->priv; + uint32_t drm_format = mapper_p->desc.layers[layer_no].format; + + if (mapper_p->desc.nb_layers != 1) { + MP_VERBOSE(mapper, "Mapped surface has separate layers - expected composed layers.\n"); + return false; + } else if (!ra_compatible_format(mapper->ra, drm_format, + mapper_p->desc.objects[0].format_modifier)) { + MP_VERBOSE(mapper, "Mapped surface with format %s; drm format '%s(%016lx)' " + "is not supported by compositor.\n", + mp_imgfmt_to_name(mapper->src->params.hw_subfmt), + mp_tag_str(drm_format), + mapper_p->desc.objects[0].format_modifier); + return false; + } + + MP_VERBOSE(mapper, "Supported Wayland display format %s: '%s(%016lx)'\n", + mp_imgfmt_to_name(mapper->src->params.hw_subfmt), + mp_tag_str(drm_format), mapper_p->desc.objects[0].format_modifier); + + return true; +} + +static void unmap(struct ra_hwdec_mapper *mapper) +{ +} + +bool dmabuf_interop_wl_init(const struct ra_hwdec *hw, + struct dmabuf_interop *dmabuf_interop) +{ + if (!ra_is_wldmabuf(hw->ra_ctx->ra)) + return false; + + if (strstr(hw->driver->name, "vaapi") != NULL) + dmabuf_interop->composed_layers = true; + + dmabuf_interop->interop_init = mapper_init; + dmabuf_interop->interop_uninit = mapper_uninit; + dmabuf_interop->interop_map = map; + dmabuf_interop->interop_unmap = unmap; + + return true; +} diff --git a/video/out/hwdec/hwdec_aimagereader.c b/video/out/hwdec/hwdec_aimagereader.c new file mode 100644 index 0000000..0dd5497 --- /dev/null +++ b/video/out/hwdec/hwdec_aimagereader.c @@ -0,0 +1,402 @@ +/* + * Copyright (c) 2021 sfan5 + * + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "misc/jni.h" +#include "osdep/threads.h" +#include "osdep/timer.h" +#include "video/out/gpu/hwdec.h" +#include "video/out/opengl/ra_gl.h" + +typedef void *GLeglImageOES; +typedef void *EGLImageKHR; +#define EGL_NATIVE_BUFFER_ANDROID 0x3140 + +struct priv_owner { + struct mp_hwdec_ctx hwctx; + AImageReader *reader; + jobject surface; + void *lib_handle; + + media_status_t (*AImageReader_newWithUsage)( + int32_t, int32_t, int32_t, uint64_t, int32_t, AImageReader **); + media_status_t (*AImageReader_getWindow)( + AImageReader *, ANativeWindow **); + media_status_t (*AImageReader_setImageListener)( + AImageReader *, AImageReader_ImageListener *); + media_status_t (*AImageReader_acquireLatestImage)(AImageReader *, AImage **); + void (*AImageReader_delete)(AImageReader *); + media_status_t (*AImage_getHardwareBuffer)(const AImage *, AHardwareBuffer **); + void (*AImage_delete)(AImage *); + void (*AHardwareBuffer_describe)(const AHardwareBuffer *, AHardwareBuffer_Desc *); + jobject (*ANativeWindow_toSurface)(JNIEnv *, ANativeWindow *); +}; + +struct priv { + struct mp_log *log; + + GLuint gl_texture; + AImage *image; + EGLImageKHR egl_image; + + mp_mutex lock; + mp_cond cond; + bool image_available; + + EGLImageKHR (EGLAPIENTRY *CreateImageKHR)( + EGLDisplay, EGLContext, EGLenum, EGLClientBuffer, const EGLint *); + EGLBoolean (EGLAPIENTRY *DestroyImageKHR)(EGLDisplay, EGLImageKHR); + EGLClientBuffer (EGLAPIENTRY *GetNativeClientBufferANDROID)( + const struct AHardwareBuffer *); + void (EGLAPIENTRY *EGLImageTargetTexture2DOES)(GLenum, GLeglImageOES); +}; + +const static struct { const char *symbol; int offset; } lib_functions[] = { + { "AImageReader_newWithUsage", offsetof(struct priv_owner, AImageReader_newWithUsage) }, + { "AImageReader_getWindow", offsetof(struct priv_owner, AImageReader_getWindow) }, + { "AImageReader_setImageListener", offsetof(struct priv_owner, AImageReader_setImageListener) }, + { "AImageReader_acquireLatestImage", offsetof(struct priv_owner, AImageReader_acquireLatestImage) }, + { "AImageReader_delete", offsetof(struct priv_owner, AImageReader_delete) }, + { "AImage_getHardwareBuffer", offsetof(struct priv_owner, AImage_getHardwareBuffer) }, + { "AImage_delete", offsetof(struct priv_owner, AImage_delete) }, + { "AHardwareBuffer_describe", offsetof(struct priv_owner, AHardwareBuffer_describe) }, + { "ANativeWindow_toSurface", offsetof(struct priv_owner, ANativeWindow_toSurface) }, + { NULL, 0 }, +}; + + +static AVBufferRef *create_mediacodec_device_ref(jobject surface) +{ + AVBufferRef *device_ref = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_MEDIACODEC); + if (!device_ref) + return NULL; + + AVHWDeviceContext *ctx = (void *)device_ref->data; + AVMediaCodecDeviceContext *hwctx = ctx->hwctx; + hwctx->surface = surface; + + if (av_hwdevice_ctx_init(device_ref) < 0) + av_buffer_unref(&device_ref); + + return device_ref; +} + +static bool load_lib_functions(struct priv_owner *p, struct mp_log *log) +{ + p->lib_handle = dlopen("libmediandk.so", RTLD_NOW | RTLD_GLOBAL); + if (!p->lib_handle) + return false; + for (int i = 0; lib_functions[i].symbol; i++) { + const char *sym = lib_functions[i].symbol; + void *fun = dlsym(p->lib_handle, sym); + if (!fun) + fun = dlsym(RTLD_DEFAULT, sym); + if (!fun) { + mp_warn(log, "Could not resolve symbol %s\n", sym); + return false; + } + + *(void **) ((uint8_t*)p + lib_functions[i].offset) = fun; + } + return true; +} + +static int init(struct ra_hwdec *hw) +{ + struct priv_owner *p = hw->priv; + + if (!ra_is_gl(hw->ra_ctx->ra)) + return -1; + if (!eglGetCurrentContext()) + return -1; + + const char *exts = eglQueryString(eglGetCurrentDisplay(), EGL_EXTENSIONS); + if (!gl_check_extension(exts, "EGL_ANDROID_image_native_buffer")) + return -1; + + if (!load_lib_functions(p, hw->log)) + return -1; + + static const char *es2_exts[] = {"GL_OES_EGL_image_external", 0}; + static const char *es3_exts[] = {"GL_OES_EGL_image_external_essl3", 0}; + GL *gl = ra_gl_get(hw->ra_ctx->ra); + if (gl_check_extension(gl->extensions, es3_exts[0])) + hw->glsl_extensions = es3_exts; + else + hw->glsl_extensions = es2_exts; + + // dummy dimensions, AImageReader only transports hardware buffers + media_status_t ret = p->AImageReader_newWithUsage(16, 16, + AIMAGE_FORMAT_PRIVATE, AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE, + 5, &p->reader); + if (ret != AMEDIA_OK) { + MP_ERR(hw, "newWithUsage failed: %d\n", ret); + return -1; + } + assert(p->reader); + + ANativeWindow *window; + ret = p->AImageReader_getWindow(p->reader, &window); + if (ret != AMEDIA_OK) { + MP_ERR(hw, "getWindow failed: %d\n", ret); + return -1; + } + assert(window); + + JNIEnv *env = MP_JNI_GET_ENV(hw); + assert(env); + jobject surface = p->ANativeWindow_toSurface(env, window); + p->surface = (*env)->NewGlobalRef(env, surface); + (*env)->DeleteLocalRef(env, surface); + + p->hwctx = (struct mp_hwdec_ctx) { + .driver_name = hw->driver->name, + .av_device_ref = create_mediacodec_device_ref(p->surface), + .hw_imgfmt = IMGFMT_MEDIACODEC, + }; + + if (!p->hwctx.av_device_ref) { + MP_VERBOSE(hw, "Failed to create hwdevice_ctx\n"); + return -1; + } + + hwdec_devices_add(hw->devs, &p->hwctx); + + return 0; +} + +static void uninit(struct ra_hwdec *hw) +{ + struct priv_owner *p = hw->priv; + JNIEnv *env = MP_JNI_GET_ENV(hw); + assert(env); + + if (p->surface) { + (*env)->DeleteGlobalRef(env, p->surface); + p->surface = NULL; + } + + if (p->reader) { + p->AImageReader_delete(p->reader); + p->reader = NULL; + } + + hwdec_devices_remove(hw->devs, &p->hwctx); + av_buffer_unref(&p->hwctx.av_device_ref); + + if (p->lib_handle) { + dlclose(p->lib_handle); + p->lib_handle = NULL; + } +} + +static void image_callback(void *context, AImageReader *reader) +{ + struct priv *p = context; + + mp_mutex_lock(&p->lock); + p->image_available = true; + mp_cond_signal(&p->cond); + mp_mutex_unlock(&p->lock); +} + +static int mapper_init(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + struct priv_owner *o = mapper->owner->priv; + GL *gl = ra_gl_get(mapper->ra); + + p->log = mapper->log; + mp_mutex_init(&p->lock); + mp_cond_init(&p->cond); + + p->CreateImageKHR = (void *)eglGetProcAddress("eglCreateImageKHR"); + p->DestroyImageKHR = (void *)eglGetProcAddress("eglDestroyImageKHR"); + p->GetNativeClientBufferANDROID = + (void *)eglGetProcAddress("eglGetNativeClientBufferANDROID"); + p->EGLImageTargetTexture2DOES = + (void *)eglGetProcAddress("glEGLImageTargetTexture2DOES"); + + if (!p->CreateImageKHR || !p->DestroyImageKHR || + !p->GetNativeClientBufferANDROID || !p->EGLImageTargetTexture2DOES) + return -1; + + AImageReader_ImageListener listener = { + .context = p, + .onImageAvailable = image_callback, + }; + o->AImageReader_setImageListener(o->reader, &listener); + + mapper->dst_params = mapper->src_params; + mapper->dst_params.imgfmt = IMGFMT_RGB0; + mapper->dst_params.hw_subfmt = 0; + + // texture creation + gl->GenTextures(1, &p->gl_texture); + gl->BindTexture(GL_TEXTURE_EXTERNAL_OES, p->gl_texture); + gl->TexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + gl->TexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + gl->TexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + gl->TexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + gl->BindTexture(GL_TEXTURE_EXTERNAL_OES, 0); + + struct ra_tex_params params = { + .dimensions = 2, + .w = mapper->src_params.w, + .h = mapper->src_params.h, + .d = 1, + .format = ra_find_unorm_format(mapper->ra, 1, 4), + .render_src = true, + .src_linear = true, + .external_oes = true, + }; + + if (params.format->ctype != RA_CTYPE_UNORM) + return -1; + + mapper->tex[0] = ra_create_wrapped_tex(mapper->ra, ¶ms, p->gl_texture); + if (!mapper->tex[0]) + return -1; + + return 0; +} + +static void mapper_uninit(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + struct priv_owner *o = mapper->owner->priv; + GL *gl = ra_gl_get(mapper->ra); + + o->AImageReader_setImageListener(o->reader, NULL); + + gl->DeleteTextures(1, &p->gl_texture); + p->gl_texture = 0; + + ra_tex_free(mapper->ra, &mapper->tex[0]); + + mp_mutex_destroy(&p->lock); + mp_cond_destroy(&p->cond); +} + +static void mapper_unmap(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + struct priv_owner *o = mapper->owner->priv; + + if (p->egl_image) { + p->DestroyImageKHR(eglGetCurrentDisplay(), p->egl_image); + p->egl_image = 0; + } + + if (p->image) { + o->AImage_delete(p->image); + p->image = NULL; + } +} + +static int mapper_map(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + struct priv_owner *o = mapper->owner->priv; + GL *gl = ra_gl_get(mapper->ra); + + { + if (mapper->src->imgfmt != IMGFMT_MEDIACODEC) + return -1; + AVMediaCodecBuffer *buffer = (AVMediaCodecBuffer *)mapper->src->planes[3]; + av_mediacodec_release_buffer(buffer, 1); + } + + bool image_available = false; + mp_mutex_lock(&p->lock); + if (!p->image_available) { + mp_cond_timedwait(&p->cond, &p->lock, MP_TIME_MS_TO_NS(100)); + if (!p->image_available) + MP_WARN(mapper, "Waiting for frame timed out!\n"); + } + image_available = p->image_available; + p->image_available = false; + mp_mutex_unlock(&p->lock); + + media_status_t ret = o->AImageReader_acquireLatestImage(o->reader, &p->image); + if (ret != AMEDIA_OK) { + MP_ERR(mapper, "acquireLatestImage failed: %d\n", ret); + // If we merely timed out waiting return success anyway to avoid + // flashing frames of render errors. + return image_available ? -1 : 0; + } + assert(p->image); + + AHardwareBuffer *hwbuf = NULL; + ret = o->AImage_getHardwareBuffer(p->image, &hwbuf); + if (ret != AMEDIA_OK) { + MP_ERR(mapper, "getHardwareBuffer failed: %d\n", ret); + return -1; + } + assert(hwbuf); + + // Update texture size since it may differ + AHardwareBuffer_Desc d; + o->AHardwareBuffer_describe(hwbuf, &d); + if (mapper->tex[0]->params.w != d.width || mapper->tex[0]->params.h != d.height) { + MP_VERBOSE(p, "Texture dimensions changed to %dx%d\n", d.width, d.height); + mapper->tex[0]->params.w = d.width; + mapper->tex[0]->params.h = d.height; + } + + EGLClientBuffer buf = p->GetNativeClientBufferANDROID(hwbuf); + if (!buf) + return -1; + + const int attribs[] = {EGL_NONE}; + p->egl_image = p->CreateImageKHR(eglGetCurrentDisplay(), + EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID, buf, attribs); + if (!p->egl_image) + return -1; + + gl->BindTexture(GL_TEXTURE_EXTERNAL_OES, p->gl_texture); + p->EGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, p->egl_image); + gl->BindTexture(GL_TEXTURE_EXTERNAL_OES, 0); + + return 0; +} + + +const struct ra_hwdec_driver ra_hwdec_aimagereader = { + .name = "aimagereader", + .priv_size = sizeof(struct priv_owner), + .imgfmts = {IMGFMT_MEDIACODEC, 0}, + .init = init, + .uninit = uninit, + .mapper = &(const struct ra_hwdec_mapper_driver){ + .priv_size = sizeof(struct priv), + .init = mapper_init, + .uninit = mapper_uninit, + .map = mapper_map, + .unmap = mapper_unmap, + }, +}; diff --git a/video/out/hwdec/hwdec_cuda.c b/video/out/hwdec/hwdec_cuda.c new file mode 100644 index 0000000..68ad60d --- /dev/null +++ b/video/out/hwdec/hwdec_cuda.c @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2016 Philip Langdale + * + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +/* + * This hwdec implements an optimized output path using CUDA->OpenGL + * or CUDA->Vulkan interop for frame data that is stored in CUDA + * device memory. Although it is not explicit in the code here, the + * only practical way to get data in this form is from the + * nvdec/cuvid decoder. + */ + +#include "config.h" +#include "hwdec_cuda.h" + +#include +#include + +int check_cu(const struct ra_hwdec *hw, CUresult err, const char *func) +{ + const char *err_name; + const char *err_string; + + struct cuda_hw_priv *p = hw->priv; + int level = hw->probing ? MSGL_V : MSGL_ERR; + + MP_TRACE(hw, "Calling %s\n", func); + + if (err == CUDA_SUCCESS) + return 0; + + p->cu->cuGetErrorName(err, &err_name); + p->cu->cuGetErrorString(err, &err_string); + + MP_MSG(hw, level, "%s failed", func); + if (err_name && err_string) + MP_MSG(hw, level, " -> %s: %s", err_name, err_string); + MP_MSG(hw, level, "\n"); + + return -1; +} + +#define CHECK_CU(x) check_cu(hw, (x), #x) + +const static cuda_interop_init interop_inits[] = { +#if HAVE_GL + cuda_gl_init, +#endif +#if HAVE_VULKAN + cuda_vk_init, +#endif + NULL +}; + +static int cuda_init(struct ra_hwdec *hw) +{ + AVBufferRef *hw_device_ctx = NULL; + CUcontext dummy; + int ret = 0; + struct cuda_hw_priv *p = hw->priv; + CudaFunctions *cu; + int level = hw->probing ? MSGL_V : MSGL_ERR; + + ret = cuda_load_functions(&p->cu, NULL); + if (ret != 0) { + MP_MSG(hw, level, "Failed to load CUDA symbols\n"); + return -1; + } + cu = p->cu; + + ret = CHECK_CU(cu->cuInit(0)); + if (ret < 0) + return -1; + + // Initialise CUDA context from backend. + for (int i = 0; interop_inits[i]; i++) { + if (interop_inits[i](hw)) { + break; + } + } + + if (!p->ext_init || !p->ext_uninit) { + MP_MSG(hw, level, + "CUDA hwdec only works with OpenGL or Vulkan backends.\n"); + return -1; + } + + hw_device_ctx = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_CUDA); + if (!hw_device_ctx) + goto error; + + AVHWDeviceContext *device_ctx = (void *)hw_device_ctx->data; + + AVCUDADeviceContext *device_hwctx = device_ctx->hwctx; + device_hwctx->cuda_ctx = p->decode_ctx; + + ret = av_hwdevice_ctx_init(hw_device_ctx); + if (ret < 0) { + MP_MSG(hw, level, "av_hwdevice_ctx_init failed\n"); + goto error; + } + + ret = CHECK_CU(cu->cuCtxPopCurrent(&dummy)); + if (ret < 0) + goto error; + + p->hwctx = (struct mp_hwdec_ctx) { + .driver_name = hw->driver->name, + .av_device_ref = hw_device_ctx, + .hw_imgfmt = IMGFMT_CUDA, + }; + hwdec_devices_add(hw->devs, &p->hwctx); + return 0; + + error: + av_buffer_unref(&hw_device_ctx); + CHECK_CU(cu->cuCtxPopCurrent(&dummy)); + + return -1; +} + +static void cuda_uninit(struct ra_hwdec *hw) +{ + struct cuda_hw_priv *p = hw->priv; + CudaFunctions *cu = p->cu; + + hwdec_devices_remove(hw->devs, &p->hwctx); + av_buffer_unref(&p->hwctx.av_device_ref); + + if (p->decode_ctx && p->decode_ctx != p->display_ctx) + CHECK_CU(cu->cuCtxDestroy(p->decode_ctx)); + + if (p->display_ctx) + CHECK_CU(cu->cuCtxDestroy(p->display_ctx)); + + cuda_free_functions(&p->cu); +} + +#undef CHECK_CU +#define CHECK_CU(x) check_cu((mapper)->owner, (x), #x) + +static int mapper_init(struct ra_hwdec_mapper *mapper) +{ + struct cuda_hw_priv *p_owner = mapper->owner->priv; + struct cuda_mapper_priv *p = mapper->priv; + CUcontext dummy; + CudaFunctions *cu = p_owner->cu; + int ret = 0, eret = 0; + + p->display_ctx = p_owner->display_ctx; + + int imgfmt = mapper->src_params.hw_subfmt; + mapper->dst_params = mapper->src_params; + mapper->dst_params.imgfmt = imgfmt; + mapper->dst_params.hw_subfmt = 0; + + mp_image_set_params(&p->layout, &mapper->dst_params); + + struct ra_imgfmt_desc desc; + if (!ra_get_imgfmt_desc(mapper->ra, imgfmt, &desc)) { + MP_ERR(mapper, "Unsupported format: %s\n", mp_imgfmt_to_name(imgfmt)); + return -1; + } + + ret = CHECK_CU(cu->cuCtxPushCurrent(p->display_ctx)); + if (ret < 0) + return ret; + + for (int n = 0; n < desc.num_planes; n++) { + if (!p_owner->ext_init(mapper, desc.planes[n], n)) + goto error; + } + + error: + eret = CHECK_CU(cu->cuCtxPopCurrent(&dummy)); + if (eret < 0) + return eret; + + return ret; +} + +static void mapper_uninit(struct ra_hwdec_mapper *mapper) +{ + struct cuda_mapper_priv *p = mapper->priv; + struct cuda_hw_priv *p_owner = mapper->owner->priv; + CudaFunctions *cu = p_owner->cu; + CUcontext dummy; + + // Don't bail if any CUDA calls fail. This is all best effort. + CHECK_CU(cu->cuCtxPushCurrent(p->display_ctx)); + for (int n = 0; n < 4; n++) { + p_owner->ext_uninit(mapper, n); + ra_tex_free(mapper->ra, &mapper->tex[n]); + } + CHECK_CU(cu->cuCtxPopCurrent(&dummy)); +} + +static void mapper_unmap(struct ra_hwdec_mapper *mapper) +{ +} + +static int mapper_map(struct ra_hwdec_mapper *mapper) +{ + struct cuda_mapper_priv *p = mapper->priv; + struct cuda_hw_priv *p_owner = mapper->owner->priv; + CudaFunctions *cu = p_owner->cu; + CUcontext dummy; + int ret = 0, eret = 0; + + ret = CHECK_CU(cu->cuCtxPushCurrent(p->display_ctx)); + if (ret < 0) + return ret; + + for (int n = 0; n < p->layout.num_planes; n++) { + if (p_owner->ext_wait) { + if (!p_owner->ext_wait(mapper, n)) + goto error; + } + + CUDA_MEMCPY2D cpy = { + .srcMemoryType = CU_MEMORYTYPE_DEVICE, + .srcDevice = (CUdeviceptr)mapper->src->planes[n], + .srcPitch = mapper->src->stride[n], + .srcY = 0, + .dstMemoryType = CU_MEMORYTYPE_ARRAY, + .dstArray = p->cu_array[n], + .WidthInBytes = mp_image_plane_w(&p->layout, n) * + mapper->tex[n]->params.format->pixel_size, + .Height = mp_image_plane_h(&p->layout, n), + }; + + ret = CHECK_CU(cu->cuMemcpy2DAsync(&cpy, 0)); + if (ret < 0) + goto error; + + if (p_owner->ext_signal) { + if (!p_owner->ext_signal(mapper, n)) + goto error; + } + } + if (p_owner->do_full_sync) + CHECK_CU(cu->cuStreamSynchronize(0)); + + // fall through + error: + + // Regardless of success or failure, we no longer need the source image, + // because this hwdec makes an explicit memcpy into the mapper textures + mp_image_unrefp(&mapper->src); + + eret = CHECK_CU(cu->cuCtxPopCurrent(&dummy)); + if (eret < 0) + return eret; + + return ret; +} + +const struct ra_hwdec_driver ra_hwdec_cuda = { + .name = "cuda", + .imgfmts = {IMGFMT_CUDA, 0}, + .priv_size = sizeof(struct cuda_hw_priv), + .init = cuda_init, + .uninit = cuda_uninit, + .mapper = &(const struct ra_hwdec_mapper_driver){ + .priv_size = sizeof(struct cuda_mapper_priv), + .init = mapper_init, + .uninit = mapper_uninit, + .map = mapper_map, + .unmap = mapper_unmap, + }, +}; diff --git a/video/out/hwdec/hwdec_cuda.h b/video/out/hwdec/hwdec_cuda.h new file mode 100644 index 0000000..9c55053 --- /dev/null +++ b/video/out/hwdec/hwdec_cuda.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2019 Philip Langdale + * + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#pragma once + +#include + +#include "video/out/gpu/hwdec.h" + +struct cuda_hw_priv { + struct mp_hwdec_ctx hwctx; + CudaFunctions *cu; + CUcontext display_ctx; + CUcontext decode_ctx; + + // Do we need to do a full CPU sync after copying + bool do_full_sync; + + bool (*ext_init)(struct ra_hwdec_mapper *mapper, + const struct ra_format *format, int n); + void (*ext_uninit)(const struct ra_hwdec_mapper *mapper, int n); + + // These are only necessary if the gpu api requires synchronisation + bool (*ext_wait)(const struct ra_hwdec_mapper *mapper, int n); + bool (*ext_signal)(const struct ra_hwdec_mapper *mapper, int n); +}; + +struct cuda_mapper_priv { + struct mp_image layout; + CUarray cu_array[4]; + + CUcontext display_ctx; + + void *ext[4]; +}; + +typedef bool (*cuda_interop_init)(const struct ra_hwdec *hw); + +bool cuda_gl_init(const struct ra_hwdec *hw); + +bool cuda_vk_init(const struct ra_hwdec *hw); + +int check_cu(const struct ra_hwdec *hw, CUresult err, const char *func); diff --git a/video/out/hwdec/hwdec_cuda_gl.c b/video/out/hwdec/hwdec_cuda_gl.c new file mode 100644 index 0000000..f20540e --- /dev/null +++ b/video/out/hwdec/hwdec_cuda_gl.c @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2019 Philip Langdale + * + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#include "hwdec_cuda.h" +#include "options/m_config.h" +#include "options/options.h" +#include "video/out/opengl/formats.h" +#include "video/out/opengl/ra_gl.h" + +#include +#include +#include + +#define CHECK_CU(x) check_cu((mapper)->owner, (x), #x) + +struct ext_gl { + CUgraphicsResource cu_res; +}; + +static bool cuda_ext_gl_init(struct ra_hwdec_mapper *mapper, + const struct ra_format *format, int n) +{ + struct cuda_hw_priv *p_owner = mapper->owner->priv; + struct cuda_mapper_priv *p = mapper->priv; + CudaFunctions *cu = p_owner->cu; + int ret = 0; + CUcontext dummy; + + struct ext_gl *egl = talloc_ptrtype(NULL, egl); + p->ext[n] = egl; + + struct ra_tex_params params = { + .dimensions = 2, + .w = mp_image_plane_w(&p->layout, n), + .h = mp_image_plane_h(&p->layout, n), + .d = 1, + .format = format, + .render_src = true, + .src_linear = format->linear_filter, + }; + + mapper->tex[n] = ra_tex_create(mapper->ra, ¶ms); + if (!mapper->tex[n]) { + goto error; + } + + GLuint texture; + GLenum target; + ra_gl_get_raw_tex(mapper->ra, mapper->tex[n], &texture, &target); + + ret = CHECK_CU(cu->cuGraphicsGLRegisterImage(&egl->cu_res, texture, target, + CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD)); + if (ret < 0) + goto error; + + ret = CHECK_CU(cu->cuGraphicsMapResources(1, &egl->cu_res, 0)); + if (ret < 0) + goto error; + + ret = CHECK_CU(cu->cuGraphicsSubResourceGetMappedArray(&p->cu_array[n], egl->cu_res, + 0, 0)); + if (ret < 0) + goto error; + + ret = CHECK_CU(cu->cuGraphicsUnmapResources(1, &egl->cu_res, 0)); + if (ret < 0) + goto error; + + return true; + +error: + CHECK_CU(cu->cuCtxPopCurrent(&dummy)); + return false; +} + +static void cuda_ext_gl_uninit(const struct ra_hwdec_mapper *mapper, int n) +{ + struct cuda_hw_priv *p_owner = mapper->owner->priv; + struct cuda_mapper_priv *p = mapper->priv; + CudaFunctions *cu = p_owner->cu; + + struct ext_gl *egl = p->ext[n]; + if (egl && egl->cu_res) { + CHECK_CU(cu->cuGraphicsUnregisterResource(egl->cu_res)); + egl->cu_res = 0; + } + talloc_free(egl); +} + +#undef CHECK_CU +#define CHECK_CU(x) check_cu(hw, (x), #x) + +bool cuda_gl_init(const struct ra_hwdec *hw) { + int ret = 0; + struct cuda_hw_priv *p = hw->priv; + CudaFunctions *cu = p->cu; + + if (ra_is_gl(hw->ra_ctx->ra)) { + GL *gl = ra_gl_get(hw->ra_ctx->ra); + if (gl->version < 210 && gl->es < 300) { + MP_VERBOSE(hw, "need OpenGL >= 2.1 or OpenGL-ES >= 3.0\n"); + return false; + } + } else { + // This is not an OpenGL RA. + return false; + } + + CUdevice display_dev; + unsigned int device_count; + ret = CHECK_CU(cu->cuGLGetDevices(&device_count, &display_dev, 1, + CU_GL_DEVICE_LIST_ALL)); + if (ret < 0) + return false; + + ret = CHECK_CU(cu->cuCtxCreate(&p->display_ctx, CU_CTX_SCHED_BLOCKING_SYNC, + display_dev)); + if (ret < 0) + return false; + + p->decode_ctx = p->display_ctx; + + struct cuda_opts *opts = mp_get_config_group(NULL, hw->global, &cuda_conf); + int decode_dev_idx = opts->cuda_device; + talloc_free(opts); + + if (decode_dev_idx > -1) { + CUcontext dummy; + CUdevice decode_dev; + ret = CHECK_CU(cu->cuDeviceGet(&decode_dev, decode_dev_idx)); + if (ret < 0) { + CHECK_CU(cu->cuCtxPopCurrent(&dummy)); + return false; + } + + if (decode_dev != display_dev) { + MP_INFO(hw, "Using separate decoder and display devices\n"); + + // Pop the display context. We won't use it again during init() + ret = CHECK_CU(cu->cuCtxPopCurrent(&dummy)); + if (ret < 0) + return false; + + ret = CHECK_CU(cu->cuCtxCreate(&p->decode_ctx, CU_CTX_SCHED_BLOCKING_SYNC, + decode_dev)); + if (ret < 0) + return false; + } + } + + // We don't have a way to do a GPU sync after copying + p->do_full_sync = true; + + p->ext_init = cuda_ext_gl_init; + p->ext_uninit = cuda_ext_gl_uninit; + + return true; +} diff --git a/video/out/hwdec/hwdec_cuda_vk.c b/video/out/hwdec/hwdec_cuda_vk.c new file mode 100644 index 0000000..b9f8caa --- /dev/null +++ b/video/out/hwdec/hwdec_cuda_vk.c @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2019 Philip Langdale + * + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#include "config.h" +#include "hwdec_cuda.h" +#include "video/out/placebo/ra_pl.h" + +#include +#include +#include +#include + +#if HAVE_WIN32_DESKTOP +#include +#define HANDLE_TYPE PL_HANDLE_WIN32 +#else +#define HANDLE_TYPE PL_HANDLE_FD +#endif + +#define CHECK_CU(x) check_cu((mapper)->owner, (x), #x) + +struct ext_vk { + CUexternalMemory mem; + CUmipmappedArray mma; + + pl_tex pltex; + pl_vulkan_sem vk_sem; + union pl_handle sem_handle; + CUexternalSemaphore cuda_sem; +}; + +static bool cuda_ext_vk_init(struct ra_hwdec_mapper *mapper, + const struct ra_format *format, int n) +{ + struct cuda_hw_priv *p_owner = mapper->owner->priv; + struct cuda_mapper_priv *p = mapper->priv; + CudaFunctions *cu = p_owner->cu; + int mem_fd = -1; + int ret = 0; + + struct ext_vk *evk = talloc_ptrtype(NULL, evk); + p->ext[n] = evk; + + pl_gpu gpu = ra_pl_get(mapper->ra); + + struct pl_tex_params tex_params = { + .w = mp_image_plane_w(&p->layout, n), + .h = mp_image_plane_h(&p->layout, n), + .d = 0, + .format = ra_pl_fmt_get(format), + .sampleable = true, + .export_handle = HANDLE_TYPE, + }; + + evk->pltex = pl_tex_create(gpu, &tex_params); + if (!evk->pltex) { + goto error; + } + + struct ra_tex *ratex = talloc_ptrtype(NULL, ratex); + ret = mppl_wrap_tex(mapper->ra, evk->pltex, ratex); + if (!ret) { + pl_tex_destroy(gpu, &evk->pltex); + talloc_free(ratex); + goto error; + } + mapper->tex[n] = ratex; + +#if !HAVE_WIN32_DESKTOP + mem_fd = dup(evk->pltex->shared_mem.handle.fd); + if (mem_fd < 0) + goto error; +#endif + + CUDA_EXTERNAL_MEMORY_HANDLE_DESC ext_desc = { +#if HAVE_WIN32_DESKTOP + .type = CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32, + .handle.win32.handle = evk->pltex->shared_mem.handle.handle, +#else + .type = CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD, + .handle.fd = mem_fd, +#endif + .size = evk->pltex->shared_mem.size, + .flags = 0, + }; + ret = CHECK_CU(cu->cuImportExternalMemory(&evk->mem, &ext_desc)); + if (ret < 0) + goto error; + // CUDA takes ownership of imported memory + mem_fd = -1; + + CUarray_format cufmt; + switch (format->pixel_size / format->num_components) { + case 1: + cufmt = CU_AD_FORMAT_UNSIGNED_INT8; + break; + case 2: + cufmt = CU_AD_FORMAT_UNSIGNED_INT16; + break; + default: + ret = -1; + goto error; + } + + CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC tex_desc = { + .offset = evk->pltex->shared_mem.offset, + .arrayDesc = { + .Width = mp_image_plane_w(&p->layout, n), + .Height = mp_image_plane_h(&p->layout, n), + .Depth = 0, + .Format = cufmt, + .NumChannels = format->num_components, + .Flags = 0, + }, + .numLevels = 1, + }; + + ret = CHECK_CU(cu->cuExternalMemoryGetMappedMipmappedArray(&evk->mma, evk->mem, &tex_desc)); + if (ret < 0) + goto error; + + ret = CHECK_CU(cu->cuMipmappedArrayGetLevel(&p->cu_array[n], evk->mma, 0)); + if (ret < 0) + goto error; + + evk->vk_sem.sem = pl_vulkan_sem_create(gpu, pl_vulkan_sem_params( + .type = VK_SEMAPHORE_TYPE_TIMELINE, + .export_handle = HANDLE_TYPE, + .out_handle = &(evk->sem_handle), + )); + if (evk->vk_sem.sem == VK_NULL_HANDLE) { + ret = -1; + goto error; + } + // The returned FD or Handle is owned by the caller (us). + + CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC w_desc = { +#if HAVE_WIN32_DESKTOP + .type = CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32, + .handle.win32.handle = evk->sem_handle.handle, +#else + .type = CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, + .handle.fd = evk->sem_handle.fd, +#endif + }; + ret = CHECK_CU(cu->cuImportExternalSemaphore(&evk->cuda_sem, &w_desc)); + if (ret < 0) + goto error; + // CUDA takes ownership of an imported FD *but not* an imported Handle. + evk->sem_handle.fd = -1; + + return true; + +error: + MP_ERR(mapper, "cuda_ext_vk_init failed\n"); + if (mem_fd > -1) + close(mem_fd); +#if HAVE_WIN32_DESKTOP + if (evk->sem_handle.handle != NULL) + CloseHandle(evk->sem_handle.handle); +#else + if (evk->sem_handle.fd > -1) + close(evk->sem_handle.fd); +#endif + return false; +} + +static void cuda_ext_vk_uninit(const struct ra_hwdec_mapper *mapper, int n) +{ + struct cuda_hw_priv *p_owner = mapper->owner->priv; + struct cuda_mapper_priv *p = mapper->priv; + CudaFunctions *cu = p_owner->cu; + + struct ext_vk *evk = p->ext[n]; + if (evk) { + if (evk->mma) { + CHECK_CU(cu->cuMipmappedArrayDestroy(evk->mma)); + evk->mma = 0; + } + if (evk->mem) { + CHECK_CU(cu->cuDestroyExternalMemory(evk->mem)); + evk->mem = 0; + } + if (evk->cuda_sem) { + CHECK_CU(cu->cuDestroyExternalSemaphore(evk->cuda_sem)); + evk->cuda_sem = 0; + } + pl_vulkan_sem_destroy(ra_pl_get(mapper->ra), &evk->vk_sem.sem); +#if HAVE_WIN32_DESKTOP + CloseHandle(evk->sem_handle.handle); +#endif + } + talloc_free(evk); +} + +static bool cuda_ext_vk_wait(const struct ra_hwdec_mapper *mapper, int n) +{ + struct cuda_hw_priv *p_owner = mapper->owner->priv; + struct cuda_mapper_priv *p = mapper->priv; + CudaFunctions *cu = p_owner->cu; + int ret; + struct ext_vk *evk = p->ext[n]; + + evk->vk_sem.value += 1; + ret = pl_vulkan_hold_ex(ra_pl_get(mapper->ra), pl_vulkan_hold_params( + .tex = evk->pltex, + .layout = VK_IMAGE_LAYOUT_GENERAL, + .qf = VK_QUEUE_FAMILY_EXTERNAL, + .semaphore = evk->vk_sem, + )); + if (!ret) + return false; + + CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS wp = { + .params = { + .fence = { + .value = evk->vk_sem.value + } + } + }; + ret = CHECK_CU(cu->cuWaitExternalSemaphoresAsync(&evk->cuda_sem, + &wp, 1, 0)); + return ret == 0; +} + +static bool cuda_ext_vk_signal(const struct ra_hwdec_mapper *mapper, int n) +{ + struct cuda_hw_priv *p_owner = mapper->owner->priv; + struct cuda_mapper_priv *p = mapper->priv; + CudaFunctions *cu = p_owner->cu; + int ret; + struct ext_vk *evk = p->ext[n]; + + evk->vk_sem.value += 1; + CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS sp = { + .params = { + .fence = { + .value = evk->vk_sem.value + } + } + }; + ret = CHECK_CU(cu->cuSignalExternalSemaphoresAsync(&evk->cuda_sem, + &sp, 1, 0)); + if (ret != 0) + return false; + + pl_vulkan_release_ex(ra_pl_get(mapper->ra), pl_vulkan_release_params( + .tex = evk->pltex, + .layout = VK_IMAGE_LAYOUT_GENERAL, + .qf = VK_QUEUE_FAMILY_EXTERNAL, + .semaphore = evk->vk_sem, + )); + return ret == 0; +} + +#undef CHECK_CU +#define CHECK_CU(x) check_cu(hw, (x), #x) + +bool cuda_vk_init(const struct ra_hwdec *hw) { + int ret = 0; + int level = hw->probing ? MSGL_V : MSGL_ERR; + struct cuda_hw_priv *p = hw->priv; + CudaFunctions *cu = p->cu; + + pl_gpu gpu = ra_pl_get(hw->ra_ctx->ra); + if (gpu != NULL) { + if (!(gpu->export_caps.tex & HANDLE_TYPE)) { + MP_VERBOSE(hw, "CUDA hwdec with Vulkan requires exportable texture memory of type 0x%X.\n", + HANDLE_TYPE); + return false; + } else if (!(gpu->export_caps.sync & HANDLE_TYPE)) { + MP_VERBOSE(hw, "CUDA hwdec with Vulkan requires exportable semaphores of type 0x%X.\n", + HANDLE_TYPE); + return false; + } + } else { + // This is not a Vulkan RA. + return false; + } + + if (!cu->cuImportExternalMemory) { + MP_MSG(hw, level, "CUDA hwdec with Vulkan requires driver version 410.48 or newer.\n"); + return false; + } + + int device_count; + ret = CHECK_CU(cu->cuDeviceGetCount(&device_count)); + if (ret < 0) + return false; + + CUdevice display_dev = -1; + for (int i = 0; i < device_count; i++) { + CUdevice dev; + ret = CHECK_CU(cu->cuDeviceGet(&dev, i)); + if (ret < 0) + continue; + + CUuuid uuid; + ret = CHECK_CU(cu->cuDeviceGetUuid(&uuid, dev)); + if (ret < 0) + continue; + + if (memcmp(gpu->uuid, uuid.bytes, sizeof (gpu->uuid)) == 0) { + display_dev = dev; + break; + } + } + + if (display_dev == -1) { + MP_MSG(hw, level, "Could not match Vulkan display device in CUDA.\n"); + return false; + } + + ret = CHECK_CU(cu->cuCtxCreate(&p->display_ctx, CU_CTX_SCHED_BLOCKING_SYNC, + display_dev)); + if (ret < 0) + return false; + + p->decode_ctx = p->display_ctx; + + p->ext_init = cuda_ext_vk_init; + p->ext_uninit = cuda_ext_vk_uninit; + p->ext_wait = cuda_ext_vk_wait; + p->ext_signal = cuda_ext_vk_signal; + + return true; +} + diff --git a/video/out/hwdec/hwdec_drmprime.c b/video/out/hwdec/hwdec_drmprime.c new file mode 100644 index 0000000..f7c6250 --- /dev/null +++ b/video/out/hwdec/hwdec_drmprime.c @@ -0,0 +1,294 @@ +/* + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "config.h" + +#include "libmpv/render_gl.h" +#include "options/m_config.h" +#include "video/fmt-conversion.h" +#include "video/out/drm_common.h" +#include "video/out/gpu/hwdec.h" +#include "video/out/hwdec/dmabuf_interop.h" + +extern const struct m_sub_options drm_conf; + +struct priv_owner { + struct mp_hwdec_ctx hwctx; + int *formats; + + struct dmabuf_interop dmabuf_interop; +}; + +static void uninit(struct ra_hwdec *hw) +{ + struct priv_owner *p = hw->priv; + if (p->hwctx.driver_name) + hwdec_devices_remove(hw->devs, &p->hwctx); + av_buffer_unref(&p->hwctx.av_device_ref); +} + +const static dmabuf_interop_init interop_inits[] = { +#if HAVE_DMABUF_INTEROP_GL + dmabuf_interop_gl_init, +#endif +#if HAVE_VAAPI + dmabuf_interop_pl_init, +#endif +#if HAVE_DMABUF_WAYLAND + dmabuf_interop_wl_init, +#endif + NULL +}; + +static int init(struct ra_hwdec *hw) +{ + struct priv_owner *p = hw->priv; + + for (int i = 0; interop_inits[i]; i++) { + if (interop_inits[i](hw, &p->dmabuf_interop)) { + break; + } + } + + if (!p->dmabuf_interop.interop_map || !p->dmabuf_interop.interop_unmap) { + MP_VERBOSE(hw, "drmprime hwdec requires at least one dmabuf interop backend.\n"); + return -1; + } + + /* + * The drm_params resource is not provided when using X11 or Wayland, but + * there are extensions that supposedly provide this information from the + * drivers. Not properly documented. Of course. + */ + mpv_opengl_drm_params_v2 *params = ra_get_native_resource(hw->ra_ctx->ra, + "drm_params_v2"); + + /* + * Respect drm_device option, so there is a way to control this when not + * using a DRM gpu context. If drm_params_v2 are present, they will already + * respect this option. + */ + void *tmp = talloc_new(NULL); + struct drm_opts *drm_opts = mp_get_config_group(tmp, hw->global, &drm_conf); + const char *opt_path = drm_opts->device_path; + + const char *device_path = params && params->render_fd > -1 ? + drmGetRenderDeviceNameFromFd(params->render_fd) : + opt_path ? opt_path : "/dev/dri/renderD128"; + MP_VERBOSE(hw, "Using DRM device: %s\n", device_path); + + int ret = av_hwdevice_ctx_create(&p->hwctx.av_device_ref, + AV_HWDEVICE_TYPE_DRM, + device_path, NULL, 0); + talloc_free(tmp); + if (ret != 0) { + MP_VERBOSE(hw, "Failed to create hwdevice_ctx: %s\n", av_err2str(ret)); + return -1; + } + + /* + * At the moment, there is no way to discover compatible formats + * from the hwdevice_ctx, and in fact the ffmpeg hwaccels hard-code + * formats too, so we're not missing out on anything. + */ + int num_formats = 0; + MP_TARRAY_APPEND(p, p->formats, num_formats, IMGFMT_NV12); + MP_TARRAY_APPEND(p, p->formats, num_formats, IMGFMT_420P); + MP_TARRAY_APPEND(p, p->formats, num_formats, pixfmt2imgfmt(AV_PIX_FMT_NV16)); + MP_TARRAY_APPEND(p, p->formats, num_formats, 0); // terminate it + + p->hwctx.hw_imgfmt = IMGFMT_DRMPRIME; + p->hwctx.supported_formats = p->formats; + p->hwctx.driver_name = hw->driver->name; + hwdec_devices_add(hw->devs, &p->hwctx); + + return 0; +} + +static void mapper_unmap(struct ra_hwdec_mapper *mapper) +{ + struct priv_owner *p_owner = mapper->owner->priv; + struct dmabuf_interop_priv *p = mapper->priv; + + p_owner->dmabuf_interop.interop_unmap(mapper); + + if (p->surface_acquired) { + for (int n = 0; n < p->desc.nb_objects; n++) { + if (p->desc.objects[n].fd > -1) + close(p->desc.objects[n].fd); + } + p->surface_acquired = false; + } +} + +static void mapper_uninit(struct ra_hwdec_mapper *mapper) +{ + struct priv_owner *p_owner = mapper->owner->priv; + if (p_owner->dmabuf_interop.interop_uninit) { + p_owner->dmabuf_interop.interop_uninit(mapper); + } +} + +static bool check_fmt(struct ra_hwdec_mapper *mapper, int fmt) +{ + struct priv_owner *p_owner = mapper->owner->priv; + for (int n = 0; p_owner->formats && p_owner->formats[n]; n++) { + if (p_owner->formats[n] == fmt) + return true; + } + return false; +} + +static int mapper_init(struct ra_hwdec_mapper *mapper) +{ + struct priv_owner *p_owner = mapper->owner->priv; + struct dmabuf_interop_priv *p = mapper->priv; + + mapper->dst_params = mapper->src_params; + + /* + * rpi4_8 and rpi4_10 function identically to NV12. These two pixel + * formats however are not defined in upstream ffmpeg so a string + * comparison is used to identify them instead of a mpv IMGFMT. + */ + const char* fmt_name = mp_imgfmt_to_name(mapper->src_params.hw_subfmt); + if (strcmp(fmt_name, "rpi4_8") == 0 || strcmp(fmt_name, "rpi4_10") == 0) + mapper->dst_params.imgfmt = IMGFMT_NV12; + else + mapper->dst_params.imgfmt = mapper->src_params.hw_subfmt; + mapper->dst_params.hw_subfmt = 0; + + struct ra_imgfmt_desc desc = {0}; + + if (mapper->ra->num_formats && + !ra_get_imgfmt_desc(mapper->ra, mapper->dst_params.imgfmt, &desc)) + return -1; + + p->num_planes = desc.num_planes; + mp_image_set_params(&p->layout, &mapper->dst_params); + + if (p_owner->dmabuf_interop.interop_init) + if (!p_owner->dmabuf_interop.interop_init(mapper, &desc)) + return -1; + + if (!check_fmt(mapper, mapper->dst_params.imgfmt)) + { + MP_FATAL(mapper, "unsupported DRM image format %s\n", + mp_imgfmt_to_name(mapper->dst_params.imgfmt)); + return -1; + } + + return 0; +} + +static int mapper_map(struct ra_hwdec_mapper *mapper) +{ + struct priv_owner *p_owner = mapper->owner->priv; + struct dmabuf_interop_priv *p = mapper->priv; + + /* + * Although we use the same AVDRMFrameDescriptor to hold the dmabuf + * properties, we additionally need to dup the fds to ensure the + * frame doesn't disappear out from under us. And then for clarity, + * we copy all the individual fields. + */ + const AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor *)mapper->src->planes[0]; + p->desc.nb_layers = desc->nb_layers; + p->desc.nb_objects = desc->nb_objects; + for (int i = 0; i < desc->nb_layers; i++) { + p->desc.layers[i].format = desc->layers[i].format; + p->desc.layers[i].nb_planes = desc->layers[i].nb_planes; + for (int j = 0; j < desc->layers[i].nb_planes; j++) { + p->desc.layers[i].planes[j].object_index = desc->layers[i].planes[j].object_index; + p->desc.layers[i].planes[j].offset = desc->layers[i].planes[j].offset; + p->desc.layers[i].planes[j].pitch = desc->layers[i].planes[j].pitch; + } + } + for (int i = 0; i < desc->nb_objects; i++) { + p->desc.objects[i].format_modifier = desc->objects[i].format_modifier; + p->desc.objects[i].size = desc->objects[i].size; + // Initialise fds to -1 to make partial failure cleanup easier. + p->desc.objects[i].fd = -1; + } + // Surface is now safe to treat as acquired to allow for unmapping to run. + p->surface_acquired = true; + + // Now actually dup the fds + for (int i = 0; i < desc->nb_objects; i++) { + p->desc.objects[i].fd = fcntl(desc->objects[i].fd, F_DUPFD_CLOEXEC, 0); + if (p->desc.objects[i].fd == -1) { + MP_ERR(mapper, "Failed to duplicate dmabuf fd: %s\n", + mp_strerror(errno)); + goto err; + } + } + + // We can handle composed formats if the total number of planes is still + // equal the number of planes we expect. Complex formats with auxiliary + // planes cannot be supported. + + int num_returned_planes = 0; + for (int i = 0; i < p->desc.nb_layers; i++) { + num_returned_planes += p->desc.layers[i].nb_planes; + } + + if (p->num_planes != 0 && p->num_planes != num_returned_planes) { + MP_ERR(mapper, + "Mapped surface with format '%s' has unexpected number of planes. " + "(%d layers and %d planes, but expected %d planes)\n", + mp_imgfmt_to_name(mapper->src->params.hw_subfmt), + p->desc.nb_layers, num_returned_planes, p->num_planes); + goto err; + } + + if (!p_owner->dmabuf_interop.interop_map(mapper, &p_owner->dmabuf_interop, + false)) + goto err; + + return 0; + +err: + mapper_unmap(mapper); + + MP_FATAL(mapper, "mapping DRM dmabuf failed\n"); + return -1; +} + +const struct ra_hwdec_driver ra_hwdec_drmprime = { + .name = "drmprime", + .priv_size = sizeof(struct priv_owner), + .imgfmts = {IMGFMT_DRMPRIME, 0}, + .init = init, + .uninit = uninit, + .mapper = &(const struct ra_hwdec_mapper_driver){ + .priv_size = sizeof(struct dmabuf_interop_priv), + .init = mapper_init, + .uninit = mapper_uninit, + .map = mapper_map, + .unmap = mapper_unmap, + }, +}; diff --git a/video/out/hwdec/hwdec_drmprime_overlay.c b/video/out/hwdec/hwdec_drmprime_overlay.c new file mode 100644 index 0000000..6b6aae6 --- /dev/null +++ b/video/out/hwdec/hwdec_drmprime_overlay.c @@ -0,0 +1,334 @@ +/* + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "video/hwdec.h" +#include "common/msg.h" +#include "options/m_config.h" +#include "libmpv/render_gl.h" +#include "video/out/drm_atomic.h" +#include "video/out/drm_common.h" +#include "video/out/drm_prime.h" +#include "video/out/gpu/hwdec.h" +#include "video/mp_image.h" + +extern const struct m_sub_options drm_conf; + +struct drm_frame { + struct drm_prime_framebuffer fb; + struct mp_image *image; // associated mpv image +}; + +struct priv { + struct mp_log *log; + struct mp_hwdec_ctx hwctx; + + struct mp_image_params params; + + struct drm_atomic_context *ctx; + struct drm_frame current_frame, last_frame, old_frame; + + struct mp_rect src, dst; + + int display_w, display_h; + + struct drm_prime_handle_refs handle_refs; +}; + +static void set_current_frame(struct ra_hwdec *hw, struct drm_frame *frame) +{ + struct priv *p = hw->priv; + + // frame will be on screen after next vsync + // current_frame is currently the displayed frame and will be replaced + // by frame after next vsync. + // We used old frame as triple buffering to make sure that the drm framebuffer + // is not being displayed when we release it. + + if (p->ctx) { + drm_prime_destroy_framebuffer(p->log, p->ctx->fd, &p->old_frame.fb, &p->handle_refs); + } + + mp_image_setrefp(&p->old_frame.image, p->last_frame.image); + p->old_frame.fb = p->last_frame.fb; + + mp_image_setrefp(&p->last_frame.image, p->current_frame.image); + p->last_frame.fb = p->current_frame.fb; + + if (frame) { + p->current_frame.fb = frame->fb; + mp_image_setrefp(&p->current_frame.image, frame->image); + } else { + memset(&p->current_frame.fb, 0, sizeof(p->current_frame.fb)); + mp_image_setrefp(&p->current_frame.image, NULL); + } +} + +static void scale_dst_rect(struct ra_hwdec *hw, int source_w, int source_h ,struct mp_rect *src, struct mp_rect *dst) +{ + struct priv *p = hw->priv; + + // drm can allow to have a layer that has a different size from framebuffer + // we scale here the destination size to video mode + double hratio = p->display_w / (double)source_w; + double vratio = p->display_h / (double)source_h; + double ratio = hratio <= vratio ? hratio : vratio; + + dst->x0 = src->x0 * ratio; + dst->x1 = src->x1 * ratio; + dst->y0 = src->y0 * ratio; + dst->y1 = src->y1 * ratio; + + int offset_x = (p->display_w - ratio * source_w) / 2; + int offset_y = (p->display_h - ratio * source_h) / 2; + + dst->x0 += offset_x; + dst->x1 += offset_x; + dst->y0 += offset_y; + dst->y1 += offset_y; +} + +static void disable_video_plane(struct ra_hwdec *hw) +{ + struct priv *p = hw->priv; + if (!p->ctx) + return; + + if (!p->ctx->drmprime_video_plane) + return; + + // Disabling the drmprime video plane is needed on some devices when using + // the primary plane for video. Primary buffer can't be active with no + // framebuffer associated. So we need this function to commit it right away + // as mpv will free all framebuffers on playback end. + drmModeAtomicReqPtr request = drmModeAtomicAlloc(); + if (request) { + drm_object_set_property(request, p->ctx->drmprime_video_plane, "FB_ID", 0); + drm_object_set_property(request, p->ctx->drmprime_video_plane, "CRTC_ID", 0); + + int ret = drmModeAtomicCommit(p->ctx->fd, request, + 0, NULL); + + if (ret) + MP_ERR(hw, "Failed to commit disable plane request (code %d)", ret); + drmModeAtomicFree(request); + } +} + +static int overlay_frame(struct ra_hwdec *hw, struct mp_image *hw_image, + struct mp_rect *src, struct mp_rect *dst, bool newframe) +{ + struct priv *p = hw->priv; + AVDRMFrameDescriptor *desc = NULL; + drmModeAtomicReq *request = NULL; + struct drm_frame next_frame = {0}; + int ret; + + struct ra *ra = hw->ra_ctx->ra; + + // grab atomic request from native resources + if (p->ctx) { + struct mpv_opengl_drm_params_v2 *drm_params; + drm_params = (mpv_opengl_drm_params_v2 *)ra_get_native_resource(ra, "drm_params_v2"); + if (!drm_params) { + MP_ERR(hw, "Failed to retrieve drm params from native resources\n"); + return -1; + } + if (drm_params->atomic_request_ptr) { + request = *drm_params->atomic_request_ptr; + } else { + MP_ERR(hw, "drm params pointer to atomic request is invalid\n"); + return -1; + } + } + + if (hw_image) { + + // grab draw plane windowing info to eventually upscale the overlay + // as egl windows could be upscaled to draw plane. + struct mpv_opengl_drm_draw_surface_size *draw_surface_size = ra_get_native_resource(ra, "drm_draw_surface_size"); + if (draw_surface_size) { + scale_dst_rect(hw, draw_surface_size->width, draw_surface_size->height, dst, &p->dst); + } else { + p->dst = *dst; + } + p->src = *src; + + next_frame.image = hw_image; + desc = (AVDRMFrameDescriptor *)hw_image->planes[0]; + + if (desc) { + int srcw = p->src.x1 - p->src.x0; + int srch = p->src.y1 - p->src.y0; + int dstw = MP_ALIGN_UP(p->dst.x1 - p->dst.x0, 2); + int dsth = MP_ALIGN_UP(p->dst.y1 - p->dst.y0, 2); + + if (drm_prime_create_framebuffer(p->log, p->ctx->fd, desc, srcw, srch, &next_frame.fb, &p->handle_refs)) { + ret = -1; + goto fail; + } + + if (request) { + drm_object_set_property(request, p->ctx->drmprime_video_plane, "FB_ID", next_frame.fb.fb_id); + drm_object_set_property(request, p->ctx->drmprime_video_plane, "CRTC_ID", p->ctx->crtc->id); + drm_object_set_property(request, p->ctx->drmprime_video_plane, "SRC_X", p->src.x0 << 16); + drm_object_set_property(request, p->ctx->drmprime_video_plane, "SRC_Y", p->src.y0 << 16); + drm_object_set_property(request, p->ctx->drmprime_video_plane, "SRC_W", srcw << 16); + drm_object_set_property(request, p->ctx->drmprime_video_plane, "SRC_H", srch << 16); + drm_object_set_property(request, p->ctx->drmprime_video_plane, "CRTC_X", MP_ALIGN_DOWN(p->dst.x0, 2)); + drm_object_set_property(request, p->ctx->drmprime_video_plane, "CRTC_Y", MP_ALIGN_DOWN(p->dst.y0, 2)); + drm_object_set_property(request, p->ctx->drmprime_video_plane, "CRTC_W", dstw); + drm_object_set_property(request, p->ctx->drmprime_video_plane, "CRTC_H", dsth); + drm_object_set_property(request, p->ctx->drmprime_video_plane, "ZPOS", 0); + } else { + ret = drmModeSetPlane(p->ctx->fd, p->ctx->drmprime_video_plane->id, p->ctx->crtc->id, next_frame.fb.fb_id, 0, + MP_ALIGN_DOWN(p->dst.x0, 2), MP_ALIGN_DOWN(p->dst.y0, 2), dstw, dsth, + p->src.x0 << 16, p->src.y0 << 16 , srcw << 16, srch << 16); + if (ret < 0) { + MP_ERR(hw, "Failed to set the drmprime video plane %d (buffer %d).\n", + p->ctx->drmprime_video_plane->id, next_frame.fb.fb_id); + goto fail; + } + } + } + } else { + disable_video_plane(hw); + + while (p->old_frame.fb.fb_id) + set_current_frame(hw, NULL); + } + + set_current_frame(hw, &next_frame); + return 0; + + fail: + drm_prime_destroy_framebuffer(p->log, p->ctx->fd, &next_frame.fb, &p->handle_refs); + return ret; +} + +static void uninit(struct ra_hwdec *hw) +{ + struct priv *p = hw->priv; + + disable_video_plane(hw); + set_current_frame(hw, NULL); + + hwdec_devices_remove(hw->devs, &p->hwctx); + av_buffer_unref(&p->hwctx.av_device_ref); + + if (p->ctx) { + drm_atomic_destroy_context(p->ctx); + p->ctx = NULL; + } +} + +static int init(struct ra_hwdec *hw) +{ + struct priv *p = hw->priv; + int draw_plane, drmprime_video_plane; + + p->log = hw->log; + + void *tmp = talloc_new(NULL); + struct drm_opts *opts = mp_get_config_group(tmp, hw->global, &drm_conf); + draw_plane = opts->draw_plane; + drmprime_video_plane = opts->drmprime_video_plane; + talloc_free(tmp); + + struct mpv_opengl_drm_params_v2 *drm_params; + + drm_params = ra_get_native_resource(hw->ra_ctx->ra, "drm_params_v2"); + if (drm_params) { + p->ctx = drm_atomic_create_context(p->log, drm_params->fd, drm_params->crtc_id, + drm_params->connector_id, draw_plane, drmprime_video_plane); + if (!p->ctx) { + mp_err(p->log, "Failed to retrieve DRM atomic context.\n"); + goto err; + } + if (!p->ctx->drmprime_video_plane) { + mp_warn(p->log, "No drmprime video plane. You might need to specify it manually using --drm-drmprime-video-plane\n"); + goto err; + } + } else { + mp_verbose(p->log, "Failed to retrieve DRM fd from native display.\n"); + goto err; + } + + drmModeCrtcPtr crtc; + crtc = drmModeGetCrtc(p->ctx->fd, p->ctx->crtc->id); + if (crtc) { + p->display_w = crtc->mode.hdisplay; + p->display_h = crtc->mode.vdisplay; + drmModeFreeCrtc(crtc); + } + + uint64_t has_prime; + if (drmGetCap(p->ctx->fd, DRM_CAP_PRIME, &has_prime) < 0) { + MP_ERR(hw, "Card does not support prime handles.\n"); + goto err; + } + + if (has_prime) { + drm_prime_init_handle_ref_count(p, &p->handle_refs); + } + + disable_video_plane(hw); + + p->hwctx = (struct mp_hwdec_ctx) { + .driver_name = hw->driver->name, + .hw_imgfmt = IMGFMT_DRMPRIME, + }; + + char *device = drmGetDeviceNameFromFd2(p->ctx->fd); + int ret = av_hwdevice_ctx_create(&p->hwctx.av_device_ref, + AV_HWDEVICE_TYPE_DRM, device, NULL, 0); + + if (device) + free(device); + + if (ret != 0) { + MP_VERBOSE(hw, "Failed to create hwdevice_ctx: %s\n", av_err2str(ret)); + goto err; + } + + hwdec_devices_add(hw->devs, &p->hwctx); + + return 0; + +err: + uninit(hw); + return -1; +} + +const struct ra_hwdec_driver ra_hwdec_drmprime_overlay = { + .name = "drmprime-overlay", + .priv_size = sizeof(struct priv), + .imgfmts = {IMGFMT_DRMPRIME, 0}, + .init = init, + .overlay_frame = overlay_frame, + .uninit = uninit, +}; diff --git a/video/out/hwdec/hwdec_ios_gl.m b/video/out/hwdec/hwdec_ios_gl.m new file mode 100644 index 0000000..633cc3d --- /dev/null +++ b/video/out/hwdec/hwdec_ios_gl.m @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2013 Stefano Pigozzi + * 2017 Aman Gupta + * + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#include + +#include +#include + +#include + +#include "video/out/gpu/hwdec.h" +#include "video/mp_image_pool.h" +#include "video/out/opengl/ra_gl.h" +#include "hwdec_vt.h" + +static bool check_hwdec(const struct ra_hwdec *hw) +{ + if (!ra_is_gl(hw->ra_ctx->ra)) + return false; + + GL *gl = ra_gl_get(hw->ra_ctx->ra); + if (gl->es < 200) { + MP_ERR(hw, "need OpenGLES 2.0 for CVOpenGLESTextureCacheCreateTextureFromImage()\n"); + return false; + } + + if ([EAGLContext currentContext] == nil) { + MP_ERR(hw, "need a current EAGLContext set\n"); + return false; + } + + return true; +} + +// In GLES3 mode, CVOpenGLESTextureCacheCreateTextureFromImage() +// will return error -6683 unless invoked with GL_LUMINANCE and +// GL_LUMINANCE_ALPHA (http://stackoverflow.com/q/36213994/332798) +// If a format trues to use GL_RED/GL_RG instead, try to find a format +// that uses GL_LUMINANCE[_ALPHA] instead. +static const struct ra_format *find_la_variant(struct ra *ra, + const struct ra_format *fmt) +{ + GLint internal_format; + GLenum format; + GLenum type; + ra_gl_get_format(fmt, &internal_format, &format, &type); + + if (format == GL_RED) { + format = internal_format = GL_LUMINANCE; + } else if (format == GL_RG) { + format = internal_format = GL_LUMINANCE_ALPHA; + } else { + return fmt; + } + + for (int n = 0; n < ra->num_formats; n++) { + const struct ra_format *fmt2 = ra->formats[n]; + GLint internal_format2; + GLenum format2; + GLenum type2; + ra_gl_get_format(fmt2, &internal_format2, &format2, &type2); + if (internal_format2 == internal_format && + format2 == format && type2 == type) + return fmt2; + } + + return NULL; +} + +static int mapper_init(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + + for (int n = 0; n < p->desc.num_planes; n++) { + p->desc.planes[n] = find_la_variant(mapper->ra, p->desc.planes[n]); + if (!p->desc.planes[n] || p->desc.planes[n]->ctype != RA_CTYPE_UNORM) { + MP_ERR(mapper, "Format unsupported.\n"); + return -1; + } + } + + CVReturn err = CVOpenGLESTextureCacheCreate( + kCFAllocatorDefault, + NULL, + [EAGLContext currentContext], + NULL, + &p->gl_texture_cache); + + if (err != noErr) { + MP_ERR(mapper, "Failure in CVOpenGLESTextureCacheCreate: %d\n", err); + return -1; + } + + return 0; +} + +static void mapper_unmap(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + + for (int i = 0; i < p->desc.num_planes; i++) { + ra_tex_free(mapper->ra, &mapper->tex[i]); + if (p->gl_planes[i]) { + CFRelease(p->gl_planes[i]); + p->gl_planes[i] = NULL; + } + } + + CVOpenGLESTextureCacheFlush(p->gl_texture_cache, 0); +} + +static int mapper_map(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + GL *gl = ra_gl_get(mapper->ra); + + CVPixelBufferRelease(p->pbuf); + p->pbuf = (CVPixelBufferRef)mapper->src->planes[3]; + CVPixelBufferRetain(p->pbuf); + + const bool planar = CVPixelBufferIsPlanar(p->pbuf); + const int planes = CVPixelBufferGetPlaneCount(p->pbuf); + assert((planar && planes == p->desc.num_planes) || p->desc.num_planes == 1); + + for (int i = 0; i < p->desc.num_planes; i++) { + const struct ra_format *fmt = p->desc.planes[i]; + + GLint internal_format; + GLenum format; + GLenum type; + ra_gl_get_format(fmt, &internal_format, &format, &type); + + CVReturn err = CVOpenGLESTextureCacheCreateTextureFromImage( + kCFAllocatorDefault, + p->gl_texture_cache, + p->pbuf, + NULL, + GL_TEXTURE_2D, + internal_format, + CVPixelBufferGetWidthOfPlane(p->pbuf, i), + CVPixelBufferGetHeightOfPlane(p->pbuf, i), + format, + type, + i, + &p->gl_planes[i]); + + if (err != noErr) { + MP_ERR(mapper, "error creating texture for plane %d: %d\n", i, err); + return -1; + } + + gl->BindTexture(GL_TEXTURE_2D, CVOpenGLESTextureGetName(p->gl_planes[i])); + gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + gl->BindTexture(GL_TEXTURE_2D, 0); + + struct ra_tex_params params = { + .dimensions = 2, + .w = CVPixelBufferGetWidthOfPlane(p->pbuf, i), + .h = CVPixelBufferGetHeightOfPlane(p->pbuf, i), + .d = 1, + .format = fmt, + .render_src = true, + .src_linear = true, + }; + + mapper->tex[i] = ra_create_wrapped_tex( + mapper->ra, + ¶ms, + CVOpenGLESTextureGetName(p->gl_planes[i]) + ); + if (!mapper->tex[i]) + return -1; + } + + return 0; +} + +static void mapper_uninit(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + + CVPixelBufferRelease(p->pbuf); + if (p->gl_texture_cache) { + CFRelease(p->gl_texture_cache); + p->gl_texture_cache = NULL; + } +} + +bool vt_gl_init(const struct ra_hwdec *hw) +{ + struct priv_owner *p = hw->priv; + + if (!check_hwdec(hw)) + return false; + + p->interop_init = mapper_init; + p->interop_uninit = mapper_uninit; + p->interop_map = mapper_map; + p->interop_unmap = mapper_unmap; + + return true; +} diff --git a/video/out/hwdec/hwdec_mac_gl.c b/video/out/hwdec/hwdec_mac_gl.c new file mode 100644 index 0000000..b73f5b9 --- /dev/null +++ b/video/out/hwdec/hwdec_mac_gl.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2013 Stefano Pigozzi + * + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#include + +#include +#include +#include +#include + +#include + +#include "video/mp_image_pool.h" +#include "video/out/gpu/hwdec.h" +#include "video/out/opengl/ra_gl.h" +#include "hwdec_vt.h" + +static bool check_hwdec(const struct ra_hwdec *hw) +{ + if (!ra_is_gl(hw->ra_ctx->ra)) + return false; + + GL *gl = ra_gl_get(hw->ra_ctx->ra); + if (gl->version < 300) { + MP_ERR(hw, "need >= OpenGL 3.0 for core rectangle texture support\n"); + return false; + } + + if (!CGLGetCurrentContext()) { + MP_ERR(hw, "need cocoa opengl backend to be active"); + return false; + } + + return true; +} + +static int mapper_init(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + GL *gl = ra_gl_get(mapper->ra); + + gl->GenTextures(MP_MAX_PLANES, p->gl_planes); + + for (int n = 0; n < p->desc.num_planes; n++) { + if (p->desc.planes[n]->ctype != RA_CTYPE_UNORM) { + MP_ERR(mapper, "Format unsupported.\n"); + return -1; + } + } + + return 0; +} + +static void mapper_unmap(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + + // Is this sane? No idea how to release the texture without deleting it. + CVPixelBufferRelease(p->pbuf); + p->pbuf = NULL; + + for (int i = 0; i < p->desc.num_planes; i++) + ra_tex_free(mapper->ra, &mapper->tex[i]); +} + +static int mapper_map(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + GL *gl = ra_gl_get(mapper->ra); + + CVPixelBufferRelease(p->pbuf); + p->pbuf = (CVPixelBufferRef)mapper->src->planes[3]; + CVPixelBufferRetain(p->pbuf); + IOSurfaceRef surface = CVPixelBufferGetIOSurface(p->pbuf); + if (!surface) { + MP_ERR(mapper, "CVPixelBuffer has no IOSurface\n"); + return -1; + } + + const bool planar = CVPixelBufferIsPlanar(p->pbuf); + const int planes = CVPixelBufferGetPlaneCount(p->pbuf); + assert((planar && planes == p->desc.num_planes) || p->desc.num_planes == 1); + + GLenum gl_target = GL_TEXTURE_RECTANGLE; + + for (int i = 0; i < p->desc.num_planes; i++) { + const struct ra_format *fmt = p->desc.planes[i]; + + GLint internal_format; + GLenum format; + GLenum type; + ra_gl_get_format(fmt, &internal_format, &format, &type); + + gl->BindTexture(gl_target, p->gl_planes[i]); + + CGLError err = CGLTexImageIOSurface2D( + CGLGetCurrentContext(), gl_target, + internal_format, + IOSurfaceGetWidthOfPlane(surface, i), + IOSurfaceGetHeightOfPlane(surface, i), + format, type, surface, i); + + gl->BindTexture(gl_target, 0); + + if (err != kCGLNoError) { + MP_ERR(mapper, + "error creating IOSurface texture for plane %d: %s (%x)\n", + i, CGLErrorString(err), gl->GetError()); + return -1; + } + + struct ra_tex_params params = { + .dimensions = 2, + .w = IOSurfaceGetWidthOfPlane(surface, i), + .h = IOSurfaceGetHeightOfPlane(surface, i), + .d = 1, + .format = fmt, + .render_src = true, + .src_linear = true, + .non_normalized = gl_target == GL_TEXTURE_RECTANGLE, + }; + + mapper->tex[i] = ra_create_wrapped_tex(mapper->ra, ¶ms, + p->gl_planes[i]); + if (!mapper->tex[i]) + return -1; + } + + return 0; +} + +static void mapper_uninit(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + GL *gl = ra_gl_get(mapper->ra); + + gl->DeleteTextures(MP_MAX_PLANES, p->gl_planes); +} + +bool vt_gl_init(const struct ra_hwdec *hw) +{ + struct priv_owner *p = hw->priv; + + if (!check_hwdec(hw)) + return false; + + p->interop_init = mapper_init; + p->interop_uninit = mapper_uninit; + p->interop_map = mapper_map; + p->interop_unmap = mapper_unmap; + + return true; +} diff --git a/video/out/hwdec/hwdec_vaapi.c b/video/out/hwdec/hwdec_vaapi.c new file mode 100644 index 0000000..d8a4517 --- /dev/null +++ b/video/out/hwdec/hwdec_vaapi.c @@ -0,0 +1,557 @@ +/* + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include "config.h" + +#include "video/out/gpu/hwdec.h" +#include "video/out/hwdec/dmabuf_interop.h" +#include "video/fmt-conversion.h" +#include "video/mp_image_pool.h" +#include "video/vaapi.h" + +#if HAVE_VAAPI_DRM +#include "libmpv/render_gl.h" +#endif + +#if HAVE_VAAPI_X11 +#include + +static VADisplay *create_x11_va_display(struct ra *ra) +{ + Display *x11 = ra_get_native_resource(ra, "x11"); + return x11 ? vaGetDisplay(x11) : NULL; +} +#endif + +#if HAVE_VAAPI_WAYLAND +#include + +static VADisplay *create_wayland_va_display(struct ra *ra) +{ + struct wl_display *wl = ra_get_native_resource(ra, "wl"); + + return wl ? vaGetDisplayWl(wl) : NULL; +} +#endif + +#if HAVE_VAAPI_DRM +#include + +static VADisplay *create_drm_va_display(struct ra *ra) +{ + mpv_opengl_drm_params_v2 *params = ra_get_native_resource(ra, "drm_params_v2"); + if (!params || params->render_fd == -1) + return NULL; + + return vaGetDisplayDRM(params->render_fd); +} +#endif + +struct va_create_native { + const char *name; + VADisplay *(*create)(struct ra *ra); +}; + +static const struct va_create_native create_native_cbs[] = { +#if HAVE_VAAPI_X11 + {"x11", create_x11_va_display}, +#endif +#if HAVE_VAAPI_WAYLAND + {"wayland", create_wayland_va_display}, +#endif +#if HAVE_VAAPI_DRM + {"drm", create_drm_va_display}, +#endif +}; + +static VADisplay *create_native_va_display(struct ra *ra, struct mp_log *log) +{ + for (int n = 0; n < MP_ARRAY_SIZE(create_native_cbs); n++) { + const struct va_create_native *disp = &create_native_cbs[n]; + mp_verbose(log, "Trying to open a %s VA display...\n", disp->name); + VADisplay *display = disp->create(ra); + if (display) + return display; + } + return NULL; +} + +static void determine_working_formats(struct ra_hwdec *hw); + +struct priv_owner { + struct mp_vaapi_ctx *ctx; + VADisplay *display; + int *formats; + bool probing_formats; // temporary during init + + struct dmabuf_interop dmabuf_interop; +}; + +static void uninit(struct ra_hwdec *hw) +{ + struct priv_owner *p = hw->priv; + if (p->ctx) { + hwdec_devices_remove(hw->devs, &p->ctx->hwctx); + if (p->ctx->hwctx.conversion_config) { + AVVAAPIHWConfig *hwconfig = p->ctx->hwctx.conversion_config; + vaDestroyConfig(p->ctx->display, hwconfig->config_id); + av_freep(&p->ctx->hwctx.conversion_config); + } + } + va_destroy(p->ctx); +} + +const static dmabuf_interop_init interop_inits[] = { +#if HAVE_DMABUF_INTEROP_GL + dmabuf_interop_gl_init, +#endif + dmabuf_interop_pl_init, +#if HAVE_DMABUF_WAYLAND + dmabuf_interop_wl_init, +#endif + NULL +}; + +static int init(struct ra_hwdec *hw) +{ + struct priv_owner *p = hw->priv; + VAStatus vas; + + for (int i = 0; interop_inits[i]; i++) { + if (interop_inits[i](hw, &p->dmabuf_interop)) { + break; + } + } + + if (!p->dmabuf_interop.interop_map || !p->dmabuf_interop.interop_unmap) { + MP_VERBOSE(hw, "VAAPI hwdec only works with OpenGL or Vulkan backends.\n"); + return -1; + } + + p->display = create_native_va_display(hw->ra_ctx->ra, hw->log); + if (!p->display) { + MP_VERBOSE(hw, "Could not create a VA display.\n"); + return -1; + } + + p->ctx = va_initialize(p->display, hw->log, true); + if (!p->ctx) { + vaTerminate(p->display); + return -1; + } + if (!p->ctx->av_device_ref) { + MP_VERBOSE(hw, "libavutil vaapi code rejected the driver?\n"); + return -1; + } + + if (hw->probing && va_guess_if_emulated(p->ctx)) { + return -1; + } + + determine_working_formats(hw); + if (!p->formats || !p->formats[0]) { + return -1; + } + + VAConfigID config_id; + AVVAAPIHWConfig *hwconfig = NULL; + vas = vaCreateConfig(p->display, VAProfileNone, VAEntrypointVideoProc, NULL, + 0, &config_id); + if (vas == VA_STATUS_SUCCESS) { + hwconfig = av_hwdevice_hwconfig_alloc(p->ctx->av_device_ref); + hwconfig->config_id = config_id; + } + + // it's now safe to set the display resource + ra_add_native_resource(hw->ra_ctx->ra, "VADisplay", p->display); + + p->ctx->hwctx.hw_imgfmt = IMGFMT_VAAPI; + p->ctx->hwctx.supported_formats = p->formats; + p->ctx->hwctx.driver_name = hw->driver->name; + p->ctx->hwctx.conversion_filter_name = "scale_vaapi"; + p->ctx->hwctx.conversion_config = hwconfig; + hwdec_devices_add(hw->devs, &p->ctx->hwctx); + return 0; +} + +static void mapper_unmap(struct ra_hwdec_mapper *mapper) +{ + struct priv_owner *p_owner = mapper->owner->priv; + struct dmabuf_interop_priv *p = mapper->priv; + + p_owner->dmabuf_interop.interop_unmap(mapper); + + if (p->surface_acquired) { + for (int n = 0; n < p->desc.nb_objects; n++) + close(p->desc.objects[n].fd); + p->surface_acquired = false; + } +} + +static void mapper_uninit(struct ra_hwdec_mapper *mapper) +{ + struct priv_owner *p_owner = mapper->owner->priv; + if (p_owner->dmabuf_interop.interop_uninit) { + p_owner->dmabuf_interop.interop_uninit(mapper); + } +} + +static bool check_fmt(struct ra_hwdec_mapper *mapper, int fmt) +{ + struct priv_owner *p_owner = mapper->owner->priv; + for (int n = 0; p_owner->formats && p_owner->formats[n]; n++) { + if (p_owner->formats[n] == fmt) + return true; + } + return false; +} + +static int mapper_init(struct ra_hwdec_mapper *mapper) +{ + struct priv_owner *p_owner = mapper->owner->priv; + struct dmabuf_interop_priv *p = mapper->priv; + + mapper->dst_params = mapper->src_params; + mapper->dst_params.imgfmt = mapper->src_params.hw_subfmt; + mapper->dst_params.hw_subfmt = 0; + + struct ra_imgfmt_desc desc = {0}; + + if (mapper->ra->num_formats && + !ra_get_imgfmt_desc(mapper->ra, mapper->dst_params.imgfmt, &desc)) + return -1; + + p->num_planes = desc.num_planes; + mp_image_set_params(&p->layout, &mapper->dst_params); + + if (p_owner->dmabuf_interop.interop_init) + if (!p_owner->dmabuf_interop.interop_init(mapper, &desc)) + return -1; + + if (!p_owner->probing_formats && !check_fmt(mapper, mapper->dst_params.imgfmt)) + { + MP_FATAL(mapper, "unsupported VA image format %s\n", + mp_imgfmt_to_name(mapper->dst_params.imgfmt)); + return -1; + } + + return 0; +} + +static void close_file_descriptors(VADRMPRIMESurfaceDescriptor desc) +{ + for (int i = 0; i < desc.num_objects; i++) + close(desc.objects[i].fd); +} + +static int mapper_map(struct ra_hwdec_mapper *mapper) +{ + struct priv_owner *p_owner = mapper->owner->priv; + struct dmabuf_interop_priv *p = mapper->priv; + VAStatus status; + VADisplay *display = p_owner->display; + VADRMPRIMESurfaceDescriptor desc = {0}; + + uint32_t flags = p_owner->dmabuf_interop.composed_layers ? + VA_EXPORT_SURFACE_COMPOSED_LAYERS : VA_EXPORT_SURFACE_SEPARATE_LAYERS; + status = vaExportSurfaceHandle(display, va_surface_id(mapper->src), + VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, + VA_EXPORT_SURFACE_READ_ONLY | + flags, + &desc); + if (!CHECK_VA_STATUS_LEVEL(mapper, "vaExportSurfaceHandle()", + p_owner->probing_formats ? MSGL_DEBUG : MSGL_ERR)) + { + close_file_descriptors(desc); + goto err; + } + vaSyncSurface(display, va_surface_id(mapper->src)); + // No need to error out if sync fails, but good to know if it did. + CHECK_VA_STATUS(mapper, "vaSyncSurface()"); + p->surface_acquired = true; + + // We use AVDRMFrameDescriptor to store the dmabuf so we need to copy the + // values over. + int num_returned_planes = 0; + p->desc.nb_layers = desc.num_layers; + p->desc.nb_objects = desc.num_objects; + for (int i = 0; i < desc.num_layers; i++) { + p->desc.layers[i].format = desc.layers[i].drm_format; + p->desc.layers[i].nb_planes = desc.layers[i].num_planes; + for (int j = 0; j < desc.layers[i].num_planes; j++) + { + p->desc.layers[i].planes[j].object_index = desc.layers[i].object_index[j]; + p->desc.layers[i].planes[j].offset = desc.layers[i].offset[j]; + p->desc.layers[i].planes[j].pitch = desc.layers[i].pitch[j]; + } + + num_returned_planes += desc.layers[i].num_planes; + } + for (int i = 0; i < desc.num_objects; i++) { + p->desc.objects[i].format_modifier = desc.objects[i].drm_format_modifier; + p->desc.objects[i].fd = desc.objects[i].fd; + p->desc.objects[i].size = desc.objects[i].size; + } + + // We can handle composed formats if the total number of planes is still + // equal the number of planes we expect. Complex formats with auxiliary + // planes cannot be supported. + if (p->num_planes != 0 && p->num_planes != num_returned_planes) { + mp_msg(mapper->log, p_owner->probing_formats ? MSGL_DEBUG : MSGL_ERR, + "Mapped surface with format '%s' has unexpected number of planes. " + "(%d layers and %d planes, but expected %d planes)\n", + mp_imgfmt_to_name(mapper->src->params.hw_subfmt), + desc.num_layers, num_returned_planes, p->num_planes); + goto err; + } + + if (!p_owner->dmabuf_interop.interop_map(mapper, &p_owner->dmabuf_interop, + p_owner->probing_formats)) + goto err; + + if (desc.fourcc == VA_FOURCC_YV12) + MPSWAP(struct ra_tex*, mapper->tex[1], mapper->tex[2]); + + return 0; + +err: + mapper_unmap(mapper); + + if (!p_owner->probing_formats) + MP_FATAL(mapper, "mapping VAAPI EGL image failed\n"); + return -1; +} + +static bool try_format_map(struct ra_hwdec *hw, struct mp_image *surface) +{ + struct ra_hwdec_mapper *mapper = ra_hwdec_mapper_create(hw, &surface->params); + if (!mapper) { + MP_DBG(hw, "Failed to create mapper\n"); + return false; + } + + bool ok = ra_hwdec_mapper_map(mapper, surface) >= 0; + ra_hwdec_mapper_free(&mapper); + return ok; +} + +static void try_format_pixfmt(struct ra_hwdec *hw, enum AVPixelFormat pixfmt) +{ + bool supported = false; + struct priv_owner *p = hw->priv; + + int mp_fmt = pixfmt2imgfmt(pixfmt); + if (!mp_fmt) + return; + + int num_formats = 0; + for (int n = 0; p->formats && p->formats[n]; n++) { + if (p->formats[n] == mp_fmt) + return; // already added + num_formats += 1; + } + + AVBufferRef *fref = NULL; + struct mp_image *s = NULL; + AVFrame *frame = NULL; + fref = av_hwframe_ctx_alloc(p->ctx->av_device_ref); + if (!fref) + goto err; + AVHWFramesContext *fctx = (void *)fref->data; + fctx->format = AV_PIX_FMT_VAAPI; + fctx->sw_format = pixfmt; + fctx->width = 128; + fctx->height = 128; + if (av_hwframe_ctx_init(fref) < 0) + goto err; + frame = av_frame_alloc(); + if (!frame) + goto err; + if (av_hwframe_get_buffer(fref, frame, 0) < 0) + goto err; + s = mp_image_from_av_frame(frame); + if (!s || !mp_image_params_valid(&s->params)) + goto err; + if (try_format_map(hw, s)) { + supported = true; + MP_TARRAY_APPEND(p, p->formats, num_formats, mp_fmt); + MP_TARRAY_APPEND(p, p->formats, num_formats, 0); // terminate it + } +err: + if (!supported) + MP_DBG(hw, "Unsupported format: %s\n", + mp_imgfmt_to_name(mp_fmt)); + + talloc_free(s); + av_frame_free(&frame); + av_buffer_unref(&fref); +} + +static void try_format_config(struct ra_hwdec *hw, AVVAAPIHWConfig *hwconfig) +{ + struct priv_owner *p = hw->priv; + enum AVPixelFormat *fmts = NULL; + + AVHWFramesConstraints *fc = + av_hwdevice_get_hwframe_constraints(p->ctx->av_device_ref, hwconfig); + if (!fc) { + MP_WARN(hw, "failed to retrieve libavutil frame constraints\n"); + return; + } + + /* + * We need a hwframe_ctx to be able to get the valid formats, but to + * initialise it, we need a format, so we get the first format from the + * hwconfig. We don't care about the other formats in the config because the + * transfer formats list will already include them. + */ + AVBufferRef *fref = NULL; + fref = av_hwframe_ctx_alloc(p->ctx->av_device_ref); + if (!fref) { + MP_WARN(hw, "failed to alloc libavutil frame context\n"); + goto err; + } + AVHWFramesContext *fctx = (void *)fref->data; + fctx->format = AV_PIX_FMT_VAAPI; + fctx->sw_format = fc->valid_sw_formats[0]; + fctx->width = 128; + fctx->height = 128; + if (av_hwframe_ctx_init(fref) < 0) { + MP_WARN(hw, "failed to init libavutil frame context\n"); + goto err; + } + + int ret = av_hwframe_transfer_get_formats(fref, AV_HWFRAME_TRANSFER_DIRECTION_TO, &fmts, 0); + if (ret) { + MP_WARN(hw, "failed to get libavutil frame context supported formats\n"); + goto err; + } + + for (int n = 0; fmts && + fmts[n] != AV_PIX_FMT_NONE; n++) + try_format_pixfmt(hw, fmts[n]); + +err: + av_hwframe_constraints_free(&fc); + av_buffer_unref(&fref); + av_free(fmts); +} + +static void determine_working_formats(struct ra_hwdec *hw) +{ + struct priv_owner *p = hw->priv; + VAStatus status; + VAProfile *profiles = NULL; + VAEntrypoint *entrypoints = NULL; + + MP_VERBOSE(hw, "Going to probe surface formats (may log bogus errors)...\n"); + p->probing_formats = true; + + AVVAAPIHWConfig *hwconfig = av_hwdevice_hwconfig_alloc(p->ctx->av_device_ref); + if (!hwconfig) { + MP_WARN(hw, "Could not allocate FFmpeg AVVAAPIHWConfig\n"); + goto done; + } + + profiles = talloc_zero_array(NULL, VAProfile, vaMaxNumProfiles(p->display)); + entrypoints = talloc_zero_array(NULL, VAEntrypoint, + vaMaxNumEntrypoints(p->display)); + int num_profiles = 0; + status = vaQueryConfigProfiles(p->display, profiles, &num_profiles); + if (!CHECK_VA_STATUS(hw, "vaQueryConfigProfiles()")) + num_profiles = 0; + + /* + * We need to find one declared format to bootstrap probing. So find a valid + * decoding profile and use its config. If try_format_config() finds any + * formats, they will be all the supported formats, and we don't need to + * look at any other profiles. + */ + for (int n = 0; n < num_profiles; n++) { + VAProfile profile = profiles[n]; + if (profile == VAProfileNone) { + // We don't use the None profile. + continue; + } + int num_ep = 0; + status = vaQueryConfigEntrypoints(p->display, profile, entrypoints, + &num_ep); + if (status != VA_STATUS_SUCCESS) { + MP_DBG(hw, "vaQueryConfigEntrypoints(): '%s' for profile %d", + vaErrorStr(status), (int)profile); + continue; + } + for (int ep = 0; ep < num_ep; ep++) { + if (entrypoints[ep] != VAEntrypointVLD) { + // We are only interested in decoding entrypoints. + continue; + } + VAConfigID config = VA_INVALID_ID; + status = vaCreateConfig(p->display, profile, entrypoints[ep], + NULL, 0, &config); + if (status != VA_STATUS_SUCCESS) { + MP_DBG(hw, "vaCreateConfig(): '%s' for profile %d", + vaErrorStr(status), (int)profile); + continue; + } + + hwconfig->config_id = config; + try_format_config(hw, hwconfig); + + vaDestroyConfig(p->display, config); + if (p->formats && p->formats[0]) { + goto done; + } + } + } + +done: + av_free(hwconfig); + talloc_free(profiles); + talloc_free(entrypoints); + + p->probing_formats = false; + + MP_DBG(hw, "Supported formats:\n"); + for (int n = 0; p->formats && p->formats[n]; n++) + MP_DBG(hw, " %s\n", mp_imgfmt_to_name(p->formats[n])); + MP_VERBOSE(hw, "Done probing surface formats.\n"); +} + +const struct ra_hwdec_driver ra_hwdec_vaapi = { + .name = "vaapi", + .priv_size = sizeof(struct priv_owner), + .imgfmts = {IMGFMT_VAAPI, 0}, + .init = init, + .uninit = uninit, + .mapper = &(const struct ra_hwdec_mapper_driver){ + .priv_size = sizeof(struct dmabuf_interop_priv), + .init = mapper_init, + .uninit = mapper_uninit, + .map = mapper_map, + .unmap = mapper_unmap, + }, +}; diff --git a/video/out/hwdec/hwdec_vt.c b/video/out/hwdec/hwdec_vt.c new file mode 100644 index 0000000..ab41d02 --- /dev/null +++ b/video/out/hwdec/hwdec_vt.c @@ -0,0 +1,141 @@ +/* + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#include +#include +#include +#include + +#include +#include + +#include "config.h" + +#include "video/out/gpu/hwdec.h" +#include "video/out/hwdec/hwdec_vt.h" + +static void uninit(struct ra_hwdec *hw) +{ + struct priv_owner *p = hw->priv; + + hwdec_devices_remove(hw->devs, &p->hwctx); + av_buffer_unref(&p->hwctx.av_device_ref); +} + +const static vt_interop_init interop_inits[] = { +#if HAVE_VIDEOTOOLBOX_GL || HAVE_IOS_GL + vt_gl_init, +#endif +#if HAVE_VIDEOTOOLBOX_PL + vt_pl_init, +#endif + NULL +}; + +static int init(struct ra_hwdec *hw) +{ + struct priv_owner *p = hw->priv; + + for (int i = 0; interop_inits[i]; i++) { + if (interop_inits[i](hw)) { + break; + } + } + + if (!p->interop_map || !p->interop_unmap) { + MP_VERBOSE(hw, "VT hwdec only works with OpenGL or Vulkan backends.\n"); + return -1; + } + + p->hwctx = (struct mp_hwdec_ctx){ + .driver_name = hw->driver->name, + .hw_imgfmt = IMGFMT_VIDEOTOOLBOX, + }; + + int ret = av_hwdevice_ctx_create(&p->hwctx.av_device_ref, + AV_HWDEVICE_TYPE_VIDEOTOOLBOX, NULL, NULL, 0); + if (ret != 0) { + MP_VERBOSE(hw, "Failed to create hwdevice_ctx: %s\n", av_err2str(ret)); + return -1; + } + + hwdec_devices_add(hw->devs, &p->hwctx); + + return 0; +} + +static void mapper_unmap(struct ra_hwdec_mapper *mapper) +{ + struct priv_owner *p_owner = mapper->owner->priv; + + p_owner->interop_unmap(mapper); +} + +static void mapper_uninit(struct ra_hwdec_mapper *mapper) +{ + struct priv_owner *p_owner = mapper->owner->priv; + if (p_owner->interop_uninit) { + p_owner->interop_uninit(mapper); + } +} + +static int mapper_init(struct ra_hwdec_mapper *mapper) +{ + struct priv_owner *p_owner = mapper->owner->priv; + struct priv *p = mapper->priv; + + mapper->dst_params = mapper->src_params; + mapper->dst_params.imgfmt = mapper->src_params.hw_subfmt; + mapper->dst_params.hw_subfmt = 0; + + if (!mapper->dst_params.imgfmt) { + MP_ERR(mapper, "Unsupported CVPixelBuffer format.\n"); + return -1; + } + + if (!ra_get_imgfmt_desc(mapper->ra, mapper->dst_params.imgfmt, &p->desc)) { + MP_ERR(mapper, "Unsupported texture format.\n"); + return -1; + } + + if (p_owner->interop_init) + return p_owner->interop_init(mapper); + + return 0; +} + +static int mapper_map(struct ra_hwdec_mapper *mapper) +{ + struct priv_owner *p_owner = mapper->owner->priv; + + return p_owner->interop_map(mapper); +} + +const struct ra_hwdec_driver ra_hwdec_videotoolbox = { + .name = "videotoolbox", + .priv_size = sizeof(struct priv_owner), + .imgfmts = {IMGFMT_VIDEOTOOLBOX, 0}, + .init = init, + .uninit = uninit, + .mapper = &(const struct ra_hwdec_mapper_driver){ + .priv_size = sizeof(struct priv), + .init = mapper_init, + .uninit = mapper_uninit, + .map = mapper_map, + .unmap = mapper_unmap, + }, +}; diff --git a/video/out/hwdec/hwdec_vt.h b/video/out/hwdec/hwdec_vt.h new file mode 100644 index 0000000..b79c641 --- /dev/null +++ b/video/out/hwdec/hwdec_vt.h @@ -0,0 +1,63 @@ +/* + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#pragma once + +#include + +#include "config.h" +#include "video/out/gpu/hwdec.h" + +struct priv_owner { + struct mp_hwdec_ctx hwctx; + + int (*interop_init)(struct ra_hwdec_mapper *mapper); + void (*interop_uninit)(struct ra_hwdec_mapper *mapper); + + int (*interop_map)(struct ra_hwdec_mapper *mapper); + void (*interop_unmap)(struct ra_hwdec_mapper *mapper); +}; + +#ifndef __OBJC__ +typedef struct __CVMetalTextureCache *CVMetalTextureCacheRef; +typedef CVImageBufferRef CVMetalTextureRef; +#endif + +struct priv { + void *interop_mapper_priv; + + CVPixelBufferRef pbuf; + +#if HAVE_VIDEOTOOLBOX_GL + GLuint gl_planes[MP_MAX_PLANES]; +#elif HAVE_IOS_GL + CVOpenGLESTextureCacheRef gl_texture_cache; + CVOpenGLESTextureRef gl_planes[MP_MAX_PLANES]; +#endif + +#if HAVE_VIDEOTOOLBOX_PL + CVMetalTextureCacheRef mtl_texture_cache; + CVMetalTextureRef mtl_planes[MP_MAX_PLANES]; +#endif + + struct ra_imgfmt_desc desc; +}; + +typedef bool (*vt_interop_init)(const struct ra_hwdec *hw); + +bool vt_gl_init(const struct ra_hwdec *hw); +bool vt_pl_init(const struct ra_hwdec *hw); diff --git a/video/out/hwdec/hwdec_vt_pl.m b/video/out/hwdec/hwdec_vt_pl.m new file mode 100644 index 0000000..cd133a0 --- /dev/null +++ b/video/out/hwdec/hwdec_vt_pl.m @@ -0,0 +1,312 @@ +/* + * Copyright (c) 2013 Stefano Pigozzi + * 2017 Aman Gupta + * 2023 rcombs + * + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#include + +#include +#include + +#include + +#include + +#include "config.h" + +#include "video/out/gpu/hwdec.h" +#include "video/out/placebo/ra_pl.h" +#include "video/mp_image_pool.h" + +#if HAVE_VULKAN +#include "video/out/vulkan/common.h" +#endif + +#include "hwdec_vt.h" + +static bool check_hwdec(const struct ra_hwdec *hw) +{ + pl_gpu gpu = ra_pl_get(hw->ra_ctx->ra); + if (!gpu) { + // This is not a libplacebo RA; + return false; + } + + if (!(gpu->import_caps.tex & PL_HANDLE_MTL_TEX)) { + MP_VERBOSE(hw, "VideoToolbox libplacebo interop requires support for " + "PL_HANDLE_MTL_TEX import.\n"); + return false; + } + + return true; +} + +static int mapper_init(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + + mapper->dst_params = mapper->src_params; + mapper->dst_params.imgfmt = mapper->src_params.hw_subfmt; + mapper->dst_params.hw_subfmt = 0; + + if (!mapper->dst_params.imgfmt) { + MP_ERR(mapper, "Unsupported CVPixelBuffer format.\n"); + return -1; + } + + if (!ra_get_imgfmt_desc(mapper->ra, mapper->dst_params.imgfmt, &p->desc)) { + MP_ERR(mapper, "Unsupported texture format.\n"); + return -1; + } + + for (int n = 0; n < p->desc.num_planes; n++) { + if (!p->desc.planes[n] || p->desc.planes[n]->ctype != RA_CTYPE_UNORM) { + MP_ERR(mapper, "Format unsupported.\n"); + return -1; + } + } + + id mtl_device = nil; + +#ifdef VK_EXT_METAL_OBJECTS_SPEC_VERSION + pl_gpu gpu = ra_pl_get(mapper->ra); + if (gpu) { + pl_vulkan vulkan = pl_vulkan_get(gpu); + if (vulkan && vulkan->device && vulkan->instance && vulkan->get_proc_addr) { + PFN_vkExportMetalObjectsEXT pExportMetalObjects = (PFN_vkExportMetalObjectsEXT)vulkan->get_proc_addr(vulkan->instance, "vkExportMetalObjectsEXT"); + if (pExportMetalObjects) { + VkExportMetalDeviceInfoEXT device_info = { + .sType = VK_STRUCTURE_TYPE_EXPORT_METAL_DEVICE_INFO_EXT, + .pNext = NULL, + .mtlDevice = nil, + }; + + VkExportMetalObjectsInfoEXT objects_info = { + .sType = VK_STRUCTURE_TYPE_EXPORT_METAL_OBJECTS_INFO_EXT, + .pNext = &device_info, + }; + + pExportMetalObjects(vulkan->device, &objects_info); + + mtl_device = device_info.mtlDevice; + [mtl_device retain]; + } + } + } +#endif + + if (!mtl_device) { + mtl_device = MTLCreateSystemDefaultDevice(); + } + + CVReturn err = CVMetalTextureCacheCreate( + kCFAllocatorDefault, + NULL, + mtl_device, + NULL, + &p->mtl_texture_cache); + + [mtl_device release]; + + if (err != noErr) { + MP_ERR(mapper, "Failure in CVOpenGLESTextureCacheCreate: %d\n", err); + return -1; + } + + return 0; +} + +static void mapper_unmap(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + + for (int i = 0; i < p->desc.num_planes; i++) { + ra_tex_free(mapper->ra, &mapper->tex[i]); + if (p->mtl_planes[i]) { + CFRelease(p->mtl_planes[i]); + p->mtl_planes[i] = NULL; + } + } + + CVMetalTextureCacheFlush(p->mtl_texture_cache, 0); +} + +static const struct { + const char *glsl; + MTLPixelFormat mtl; +} mtl_fmts[] = { + {"r16f", MTLPixelFormatR16Float }, + {"r32f", MTLPixelFormatR32Float }, + {"rg16f", MTLPixelFormatRG16Float }, + {"rg32f", MTLPixelFormatRG32Float }, + {"rgba16f", MTLPixelFormatRGBA16Float }, + {"rgba32f", MTLPixelFormatRGBA32Float }, + {"r11f_g11f_b10f", MTLPixelFormatRG11B10Float }, + + {"r8", MTLPixelFormatR8Unorm }, + {"r16", MTLPixelFormatR16Unorm }, + {"rg8", MTLPixelFormatRG8Unorm }, + {"rg16", MTLPixelFormatRG16Unorm }, + {"rgba8", MTLPixelFormatRGBA8Unorm }, + {"rgba16", MTLPixelFormatRGBA16Unorm }, + {"rgb10_a2", MTLPixelFormatRGB10A2Unorm }, + + {"r8_snorm", MTLPixelFormatR8Snorm }, + {"r16_snorm", MTLPixelFormatR16Snorm }, + {"rg8_snorm", MTLPixelFormatRG8Snorm }, + {"rg16_snorm", MTLPixelFormatRG16Snorm }, + {"rgba8_snorm", MTLPixelFormatRGBA8Snorm }, + {"rgba16_snorm", MTLPixelFormatRGBA16Snorm }, + + {"r8ui", MTLPixelFormatR8Uint }, + {"r16ui", MTLPixelFormatR16Uint }, + {"r32ui", MTLPixelFormatR32Uint }, + {"rg8ui", MTLPixelFormatRG8Uint }, + {"rg16ui", MTLPixelFormatRG16Uint }, + {"rg32ui", MTLPixelFormatRG32Uint }, + {"rgba8ui", MTLPixelFormatRGBA8Uint }, + {"rgba16ui", MTLPixelFormatRGBA16Uint }, + {"rgba32ui", MTLPixelFormatRGBA32Uint }, + {"rgb10_a2ui", MTLPixelFormatRGB10A2Uint }, + + {"r8i", MTLPixelFormatR8Sint }, + {"r16i", MTLPixelFormatR16Sint }, + {"r32i", MTLPixelFormatR32Sint }, + {"rg8i", MTLPixelFormatRG8Sint }, + {"rg16i", MTLPixelFormatRG16Sint }, + {"rg32i", MTLPixelFormatRG32Sint }, + {"rgba8i", MTLPixelFormatRGBA8Sint }, + {"rgba16i", MTLPixelFormatRGBA16Sint }, + {"rgba32i", MTLPixelFormatRGBA32Sint }, + + { NULL, MTLPixelFormatInvalid }, +}; + +static MTLPixelFormat get_mtl_fmt(const char* glsl) +{ + if (!glsl) + return MTLPixelFormatInvalid; + + for (int i = 0; mtl_fmts[i].glsl; i++) { + if (!strcmp(glsl, mtl_fmts[i].glsl)) + return mtl_fmts[i].mtl; + } + + return MTLPixelFormatInvalid; +} + +static int mapper_map(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + pl_gpu gpu = ra_pl_get(mapper->owner->ra_ctx->ra); + + CVPixelBufferRelease(p->pbuf); + p->pbuf = (CVPixelBufferRef)mapper->src->planes[3]; + CVPixelBufferRetain(p->pbuf); + + const bool planar = CVPixelBufferIsPlanar(p->pbuf); + const int planes = CVPixelBufferGetPlaneCount(p->pbuf); + assert((planar && planes == p->desc.num_planes) || p->desc.num_planes == 1); + + for (int i = 0; i < p->desc.num_planes; i++) { + const struct ra_format *fmt = p->desc.planes[i]; + + pl_fmt plfmt = ra_pl_fmt_get(fmt); + MTLPixelFormat format = get_mtl_fmt(plfmt->glsl_format); + + if (!format) { + MP_ERR(mapper, "Format unsupported.\n"); + return -1; + } + + size_t width = CVPixelBufferGetWidthOfPlane(p->pbuf, i), + height = CVPixelBufferGetHeightOfPlane(p->pbuf, i); + + CVReturn err = CVMetalTextureCacheCreateTextureFromImage( + kCFAllocatorDefault, + p->mtl_texture_cache, + p->pbuf, + NULL, + format, + width, + height, + i, + &p->mtl_planes[i]); + + if (err != noErr) { + MP_ERR(mapper, "error creating texture for plane %d: %d\n", i, err); + return -1; + } + + struct pl_tex_params tex_params = { + .w = width, + .h = height, + .d = 0, + .format = plfmt, + .sampleable = true, + .import_handle = PL_HANDLE_MTL_TEX, + .shared_mem = (struct pl_shared_mem) { + .handle = { + .handle = CVMetalTextureGetTexture(p->mtl_planes[i]), + }, + }, + }; + + pl_tex pltex = pl_tex_create(gpu, &tex_params); + if (!pltex) + return -1; + + struct ra_tex *ratex = talloc_ptrtype(NULL, ratex); + int ret = mppl_wrap_tex(mapper->ra, pltex, ratex); + if (!ret) { + pl_tex_destroy(gpu, &pltex); + talloc_free(ratex); + return -1; + } + mapper->tex[i] = ratex; + } + + return 0; +} + +static void mapper_uninit(struct ra_hwdec_mapper *mapper) +{ + struct priv *p = mapper->priv; + + CVPixelBufferRelease(p->pbuf); + if (p->mtl_texture_cache) { + CFRelease(p->mtl_texture_cache); + p->mtl_texture_cache = NULL; + } +} + +bool vt_pl_init(const struct ra_hwdec *hw) +{ + struct priv_owner *p = hw->priv; + + if (!check_hwdec(hw)) + return false; + + p->interop_init = mapper_init; + p->interop_uninit = mapper_uninit; + p->interop_map = mapper_map; + p->interop_unmap = mapper_unmap; + + return true; +} diff --git a/video/out/hwdec/hwdec_vulkan.c b/video/out/hwdec/hwdec_vulkan.c new file mode 100644 index 0000000..5f7354d --- /dev/null +++ b/video/out/hwdec/hwdec_vulkan.c @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2022 Philip Langdale + * + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see . + */ + +#include "config.h" +#include "video/out/gpu/hwdec.h" +#include "video/out/vulkan/context.h" +#include "video/out/placebo/ra_pl.h" + +#include +#include + +struct vulkan_hw_priv { + struct mp_hwdec_ctx hwctx; + pl_gpu gpu; +}; + +struct vulkan_mapper_priv { + struct mp_image layout; + AVVkFrame *vkf; + pl_tex tex[4]; +}; + +static void lock_queue(struct AVHWDeviceContext *ctx, + uint32_t queue_family, uint32_t index) +{ + pl_vulkan vulkan = ctx->user_opaque; + vulkan->lock_queue(vulkan, queue_family, index); +} + +static void unlock_queue(struct AVHWDeviceContext *ctx, + uint32_t queue_family, uint32_t index) +{ + pl_vulkan vulkan = ctx->user_opaque; + vulkan->unlock_queue(vulkan, queue_family, index); +} + +static int vulkan_init(struct ra_hwdec *hw) +{ + AVBufferRef *hw_device_ctx = NULL; + int ret = 0; + struct vulkan_hw_priv *p = hw->priv; + int level = hw->probing ? MSGL_V : MSGL_ERR; + + struct mpvk_ctx *vk = ra_vk_ctx_get(hw->ra_ctx); + if (!vk) { + MP_MSG(hw, level, "This is not a libplacebo vulkan gpu api context.\n"); + return 0; + } + + p->gpu = ra_pl_get(hw->ra_ctx->ra); + if (!p->gpu) { + MP_MSG(hw, level, "Failed to obtain pl_gpu.\n"); + return 0; + } + + /* + * libplacebo initialises all queues, but we still need to discover which + * one is the decode queue. + */ + uint32_t num_qf = 0; + VkQueueFamilyProperties *qf = NULL; + vkGetPhysicalDeviceQueueFamilyProperties(vk->vulkan->phys_device, &num_qf, NULL); + if (!num_qf) + goto error; + + qf = talloc_array(NULL, VkQueueFamilyProperties, num_qf); + vkGetPhysicalDeviceQueueFamilyProperties(vk->vulkan->phys_device, &num_qf, qf); + + int decode_index = -1, decode_count = 0; + for (int i = 0; i < num_qf; i++) { + /* + * Pick the first discovered decode queue that we find. Maybe a day will + * come when this needs to be smarter, but I'm sure a bunch of other + * things will have to change too. + */ + if ((qf[i].queueFlags) & VK_QUEUE_VIDEO_DECODE_BIT_KHR) { + decode_index = i; + decode_count = qf[i].queueCount; + } + } + + hw_device_ctx = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_VULKAN); + if (!hw_device_ctx) + goto error; + + AVHWDeviceContext *device_ctx = (void *)hw_device_ctx->data; + AVVulkanDeviceContext *device_hwctx = device_ctx->hwctx; + + device_ctx->user_opaque = (void *)vk->vulkan; + device_hwctx->lock_queue = lock_queue; + device_hwctx->unlock_queue = unlock_queue; + device_hwctx->get_proc_addr = vk->vkinst->get_proc_addr; + device_hwctx->inst = vk->vkinst->instance; + device_hwctx->phys_dev = vk->vulkan->phys_device; + device_hwctx->act_dev = vk->vulkan->device; + device_hwctx->device_features = *vk->vulkan->features; + device_hwctx->enabled_inst_extensions = vk->vkinst->extensions; + device_hwctx->nb_enabled_inst_extensions = vk->vkinst->num_extensions; + device_hwctx->enabled_dev_extensions = vk->vulkan->extensions; + device_hwctx->nb_enabled_dev_extensions = vk->vulkan->num_extensions; + device_hwctx->queue_family_index = vk->vulkan->queue_graphics.index; + device_hwctx->nb_graphics_queues = vk->vulkan->queue_graphics.count; + device_hwctx->queue_family_tx_index = vk->vulkan->queue_transfer.index; + device_hwctx->nb_tx_queues = vk->vulkan->queue_transfer.count; + device_hwctx->queue_family_comp_index = vk->vulkan->queue_compute.index; + device_hwctx->nb_comp_queues = vk->vulkan->queue_compute.count; + device_hwctx->queue_family_decode_index = decode_index; + device_hwctx->nb_decode_queues = decode_count; + + ret = av_hwdevice_ctx_init(hw_device_ctx); + if (ret < 0) { + MP_MSG(hw, level, "av_hwdevice_ctx_init failed\n"); + goto error; + } + + p->hwctx = (struct mp_hwdec_ctx) { + .driver_name = hw->driver->name, + .av_device_ref = hw_device_ctx, + .hw_imgfmt = IMGFMT_VULKAN, + }; + hwdec_devices_add(hw->devs, &p->hwctx); + + talloc_free(qf); + return 0; + + error: + talloc_free(qf); + av_buffer_unref(&hw_device_ctx); + return -1; +} + +static void vulkan_uninit(struct ra_hwdec *hw) +{ + struct vulkan_hw_priv *p = hw->priv; + + hwdec_devices_remove(hw->devs, &p->hwctx); + av_buffer_unref(&p->hwctx.av_device_ref); +} + +static int mapper_init(struct ra_hwdec_mapper *mapper) +{ + struct vulkan_mapper_priv *p = mapper->priv; + + mapper->dst_params = mapper->src_params; + mapper->dst_params.imgfmt = mapper->src_params.hw_subfmt; + mapper->dst_params.hw_subfmt = 0; + + mp_image_set_params(&p->layout, &mapper->dst_params); + + struct ra_imgfmt_desc desc = {0}; + if (!ra_get_imgfmt_desc(mapper->ra, mapper->dst_params.imgfmt, &desc)) + return -1; + + return 0; +} + +static void mapper_uninit(struct ra_hwdec_mapper *mapper) +{ + +} + +static void mapper_unmap(struct ra_hwdec_mapper *mapper) +{ + struct vulkan_hw_priv *p_owner = mapper->owner->priv; + struct vulkan_mapper_priv *p = mapper->priv; + if (!mapper->src) + goto end; + + AVHWFramesContext *hwfc = (AVHWFramesContext *) mapper->src->hwctx->data;; + const AVVulkanFramesContext *vkfc = hwfc->hwctx;; + AVVkFrame *vkf = p->vkf; + + int num_images; + for (num_images = 0; (vkf->img[num_images] != VK_NULL_HANDLE); num_images++); + + for (int i = 0; (p->tex[i] != NULL); i++) { + pl_tex *tex = &p->tex[i]; + if (!*tex) + continue; + + // If we have multiple planes and one image, then that is a multiplane + // frame. Anything else is treated as one-image-per-plane. + int index = p->layout.num_planes > 1 && num_images == 1 ? 0 : i; + + // Update AVVkFrame state to reflect current layout + bool ok = pl_vulkan_hold_ex(p_owner->gpu, pl_vulkan_hold_params( + .tex = *tex, + .out_layout = &vkf->layout[index], + .qf = VK_QUEUE_FAMILY_IGNORED, + .semaphore = (pl_vulkan_sem) { + .sem = vkf->sem[index], + .value = vkf->sem_value[index] + 1, + }, + )); + + vkf->access[index] = 0; + vkf->sem_value[index] += !!ok; + *tex = NULL; + } + + vkfc->unlock_frame(hwfc, vkf); + + end: + for (int i = 0; i < p->layout.num_planes; i++) + ra_tex_free(mapper->ra, &mapper->tex[i]); + + p->vkf = NULL; +} + +static int mapper_map(struct ra_hwdec_mapper *mapper) +{ + bool result = false; + struct vulkan_hw_priv *p_owner = mapper->owner->priv; + struct vulkan_mapper_priv *p = mapper->priv; + pl_vulkan vk = pl_vulkan_get(p_owner->gpu); + if (!vk) + return -1; + + AVHWFramesContext *hwfc = (AVHWFramesContext *) mapper->src->hwctx->data; + const AVVulkanFramesContext *vkfc = hwfc->hwctx; + AVVkFrame *vkf = (AVVkFrame *) mapper->src->planes[0]; + + /* + * We need to use the dimensions from the HW Frames Context for the + * textures, as the underlying images may be larger than the logical frame + * size. This most often happens with 1080p content where the actual frame + * height is 1088. + */ + struct mp_image raw_layout; + mp_image_setfmt(&raw_layout, p->layout.params.imgfmt); + mp_image_set_size(&raw_layout, hwfc->width, hwfc->height); + + int num_images; + for (num_images = 0; (vkf->img[num_images] != VK_NULL_HANDLE); num_images++); + const VkFormat *vk_fmt = av_vkfmt_from_pixfmt(hwfc->sw_format); + + vkfc->lock_frame(hwfc, vkf); + + for (int i = 0; i < p->layout.num_planes; i++) { + pl_tex *tex = &p->tex[i]; + VkImageAspectFlags aspect = VK_IMAGE_ASPECT_COLOR_BIT; + int index = i; + + // If we have multiple planes and one image, then that is a multiplane + // frame. Anything else is treated as one-image-per-plane. + if (p->layout.num_planes > 1 && num_images == 1) { + index = 0; + + switch (i) { + case 0: + aspect = VK_IMAGE_ASPECT_PLANE_0_BIT_KHR; + break; + case 1: + aspect = VK_IMAGE_ASPECT_PLANE_1_BIT_KHR; + break; + case 2: + aspect = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; + break; + default: + goto error; + } + } + + *tex = pl_vulkan_wrap(p_owner->gpu, pl_vulkan_wrap_params( + .image = vkf->img[index], + .width = mp_image_plane_w(&raw_layout, i), + .height = mp_image_plane_h(&raw_layout, i), + .format = vk_fmt[i], + .usage = vkfc->usage, + .aspect = aspect, + )); + if (!*tex) + goto error; + + pl_vulkan_release_ex(p_owner->gpu, pl_vulkan_release_params( + .tex = p->tex[i], + .layout = vkf->layout[index], + .qf = VK_QUEUE_FAMILY_IGNORED, + .semaphore = (pl_vulkan_sem) { + .sem = vkf->sem[index], + .value = vkf->sem_value[index], + }, + )); + + struct ra_tex *ratex = talloc_ptrtype(NULL, ratex); + result = mppl_wrap_tex(mapper->ra, *tex, ratex); + if (!result) { + pl_tex_destroy(p_owner->gpu, tex); + talloc_free(ratex); + goto error; + } + mapper->tex[i] = ratex; + } + + p->vkf = vkf; + return 0; + + error: + vkfc->unlock_frame(hwfc, vkf); + mapper_unmap(mapper); + return -1; +} + +const struct ra_hwdec_driver ra_hwdec_vulkan = { + .name = "vulkan", + .imgfmts = {IMGFMT_VULKAN, 0}, + .priv_size = sizeof(struct vulkan_hw_priv), + .init = vulkan_init, + .uninit = vulkan_uninit, + .mapper = &(const struct ra_hwdec_mapper_driver){ + .priv_size = sizeof(struct vulkan_mapper_priv), + .init = mapper_init, + .uninit = mapper_uninit, + .map = mapper_map, + .unmap = mapper_unmap, + }, +}; -- cgit v1.2.3