summaryrefslogtreecommitdiffstats
path: root/video/out/gpu
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--video/out/gpu/context.c277
-rw-r--r--video/out/gpu/context.h107
-rw-r--r--video/out/gpu/d3d11_helpers.c966
-rw-r--r--video/out/gpu/d3d11_helpers.h103
-rw-r--r--video/out/gpu/error_diffusion.c316
-rw-r--r--video/out/gpu/error_diffusion.h48
-rw-r--r--video/out/gpu/hwdec.c358
-rw-r--r--video/out/gpu/hwdec.h156
-rw-r--r--video/out/gpu/lcms.c526
-rw-r--r--video/out/gpu/lcms.h61
-rw-r--r--video/out/gpu/libmpv_gpu.c248
-rw-r--r--video/out/gpu/libmpv_gpu.h40
-rw-r--r--video/out/gpu/osd.c363
-rw-r--r--video/out/gpu/osd.h25
-rw-r--r--video/out/gpu/ra.c424
-rw-r--r--video/out/gpu/ra.h559
-rw-r--r--video/out/gpu/shader_cache.c1056
-rw-r--r--video/out/gpu/shader_cache.h66
-rw-r--r--video/out/gpu/spirv.c70
-rw-r--r--video/out/gpu/spirv.h41
-rw-r--r--video/out/gpu/spirv_shaderc.c125
-rw-r--r--video/out/gpu/user_shaders.c463
-rw-r--r--video/out/gpu/user_shaders.h99
-rw-r--r--video/out/gpu/utils.c349
-rw-r--r--video/out/gpu/utils.h108
-rw-r--r--video/out/gpu/video.c4364
-rw-r--r--video/out/gpu/video.h238
-rw-r--r--video/out/gpu/video_shaders.c1033
-rw-r--r--video/out/gpu/video_shaders.h59
-rw-r--r--video/out/gpu_next/context.c240
-rw-r--r--video/out/gpu_next/context.h40
31 files changed, 12928 insertions, 0 deletions
diff --git a/video/out/gpu/context.c b/video/out/gpu/context.c
new file mode 100644
index 0000000..5ce18af
--- /dev/null
+++ b/video/out/gpu/context.c
@@ -0,0 +1,277 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdbool.h>
+#include <math.h>
+#include <assert.h>
+
+#include "config.h"
+#include "common/common.h"
+#include "common/msg.h"
+#include "options/options.h"
+#include "options/m_option.h"
+#include "video/out/vo.h"
+
+#include "context.h"
+#include "spirv.h"
+
+/* OpenGL */
+extern const struct ra_ctx_fns ra_ctx_glx;
+extern const struct ra_ctx_fns ra_ctx_x11_egl;
+extern const struct ra_ctx_fns ra_ctx_drm_egl;
+extern const struct ra_ctx_fns ra_ctx_wayland_egl;
+extern const struct ra_ctx_fns ra_ctx_wgl;
+extern const struct ra_ctx_fns ra_ctx_angle;
+extern const struct ra_ctx_fns ra_ctx_dxgl;
+extern const struct ra_ctx_fns ra_ctx_rpi;
+extern const struct ra_ctx_fns ra_ctx_android;
+
+/* Vulkan */
+extern const struct ra_ctx_fns ra_ctx_vulkan_wayland;
+extern const struct ra_ctx_fns ra_ctx_vulkan_win;
+extern const struct ra_ctx_fns ra_ctx_vulkan_xlib;
+extern const struct ra_ctx_fns ra_ctx_vulkan_android;
+extern const struct ra_ctx_fns ra_ctx_vulkan_display;
+extern const struct ra_ctx_fns ra_ctx_vulkan_mac;
+
+/* Direct3D 11 */
+extern const struct ra_ctx_fns ra_ctx_d3d11;
+
+/* No API */
+extern const struct ra_ctx_fns ra_ctx_wldmabuf;
+
+static const struct ra_ctx_fns *contexts[] = {
+#if HAVE_D3D11
+ &ra_ctx_d3d11,
+#endif
+
+// OpenGL contexts:
+#if HAVE_EGL_ANDROID
+ &ra_ctx_android,
+#endif
+#if HAVE_RPI
+ &ra_ctx_rpi,
+#endif
+#if HAVE_EGL_ANGLE_WIN32
+ &ra_ctx_angle,
+#endif
+#if HAVE_GL_WIN32
+ &ra_ctx_wgl,
+#endif
+#if HAVE_GL_DXINTEROP
+ &ra_ctx_dxgl,
+#endif
+#if HAVE_EGL_WAYLAND
+ &ra_ctx_wayland_egl,
+#endif
+#if HAVE_EGL_X11
+ &ra_ctx_x11_egl,
+#endif
+#if HAVE_GL_X11
+ &ra_ctx_glx,
+#endif
+#if HAVE_EGL_DRM
+ &ra_ctx_drm_egl,
+#endif
+
+// Vulkan contexts:
+#if HAVE_VULKAN
+
+#if HAVE_ANDROID
+ &ra_ctx_vulkan_android,
+#endif
+#if HAVE_WIN32_DESKTOP
+ &ra_ctx_vulkan_win,
+#endif
+#if HAVE_WAYLAND
+ &ra_ctx_vulkan_wayland,
+#endif
+#if HAVE_X11
+ &ra_ctx_vulkan_xlib,
+#endif
+#if HAVE_VK_KHR_DISPLAY
+ &ra_ctx_vulkan_display,
+#endif
+#if HAVE_COCOA && HAVE_SWIFT
+ &ra_ctx_vulkan_mac,
+#endif
+#endif
+
+/* No API contexts: */
+#if HAVE_DMABUF_WAYLAND
+ &ra_ctx_wldmabuf,
+#endif
+};
+
+static int ra_ctx_api_help(struct mp_log *log, const struct m_option *opt,
+ struct bstr name)
+{
+ mp_info(log, "GPU APIs (contexts):\n");
+ mp_info(log, " auto (autodetect)\n");
+ for (int n = 0; n < MP_ARRAY_SIZE(contexts); n++) {
+ if (!contexts[n]->hidden)
+ mp_info(log, " %s (%s)\n", contexts[n]->type, contexts[n]->name);
+ }
+ return M_OPT_EXIT;
+}
+
+static int ra_ctx_validate_api(struct mp_log *log, const struct m_option *opt,
+ struct bstr name, const char **value)
+{
+ struct bstr param = bstr0(*value);
+ if (bstr_equals0(param, "auto"))
+ return 1;
+ for (int i = 0; i < MP_ARRAY_SIZE(contexts); i++) {
+ if (bstr_equals0(param, contexts[i]->type) && !contexts[i]->hidden)
+ return 1;
+ }
+ return M_OPT_INVALID;
+}
+
+static int ra_ctx_context_help(struct mp_log *log, const struct m_option *opt,
+ struct bstr name)
+{
+ mp_info(log, "GPU contexts (APIs):\n");
+ mp_info(log, " auto (autodetect)\n");
+ for (int n = 0; n < MP_ARRAY_SIZE(contexts); n++) {
+ if (!contexts[n]->hidden)
+ mp_info(log, " %s (%s)\n", contexts[n]->name, contexts[n]->type);
+ }
+ return M_OPT_EXIT;
+}
+
+static int ra_ctx_validate_context(struct mp_log *log, const struct m_option *opt,
+ struct bstr name, const char **value)
+{
+ struct bstr param = bstr0(*value);
+ if (bstr_equals0(param, "auto"))
+ return 1;
+ for (int i = 0; i < MP_ARRAY_SIZE(contexts); i++) {
+ if (bstr_equals0(param, contexts[i]->name) && !contexts[i]->hidden)
+ return 1;
+ }
+ return M_OPT_INVALID;
+}
+
+// Create a VO window and create a RA context on it.
+// vo_flags: passed to the backend's create window function
+struct ra_ctx *ra_ctx_create(struct vo *vo, struct ra_ctx_opts opts)
+{
+ bool api_auto = !opts.context_type || strcmp(opts.context_type, "auto") == 0;
+ bool ctx_auto = !opts.context_name || strcmp(opts.context_name, "auto") == 0;
+
+ if (ctx_auto) {
+ MP_VERBOSE(vo, "Probing for best GPU context.\n");
+ opts.probing = true;
+ }
+
+ // Hack to silence backend (X11/Wayland/etc.) errors. Kill it once backends
+ // are separate from `struct vo`
+ bool old_probing = vo->probing;
+ vo->probing = opts.probing;
+
+ for (int i = 0; i < MP_ARRAY_SIZE(contexts); i++) {
+ if (contexts[i]->hidden)
+ continue;
+ if (!opts.probing && strcmp(contexts[i]->name, opts.context_name) != 0)
+ continue;
+ if (!api_auto && strcmp(contexts[i]->type, opts.context_type) != 0)
+ continue;
+
+ struct ra_ctx *ctx = talloc_ptrtype(NULL, ctx);
+ *ctx = (struct ra_ctx) {
+ .vo = vo,
+ .global = vo->global,
+ .log = mp_log_new(ctx, vo->log, contexts[i]->type),
+ .opts = opts,
+ .fns = contexts[i],
+ };
+
+ MP_VERBOSE(ctx, "Initializing GPU context '%s'\n", ctx->fns->name);
+ if (contexts[i]->init(ctx)) {
+ vo->probing = old_probing;
+ return ctx;
+ }
+
+ talloc_free(ctx);
+ }
+
+ vo->probing = old_probing;
+
+ // If we've reached this point, then none of the contexts matched the name
+ // requested, or the backend creation failed for all of them.
+ if (!vo->probing)
+ MP_ERR(vo, "Failed initializing any suitable GPU context!\n");
+ return NULL;
+}
+
+struct ra_ctx *ra_ctx_create_by_name(struct vo *vo, const char *name)
+{
+ for (int i = 0; i < MP_ARRAY_SIZE(contexts); i++) {
+ if (strcmp(name, contexts[i]->name) != 0)
+ continue;
+
+ struct ra_ctx *ctx = talloc_ptrtype(NULL, ctx);
+ *ctx = (struct ra_ctx) {
+ .vo = vo,
+ .global = vo->global,
+ .log = mp_log_new(ctx, vo->log, contexts[i]->type),
+ .fns = contexts[i],
+ };
+
+ MP_VERBOSE(ctx, "Initializing GPU context '%s'\n", ctx->fns->name);
+ if (contexts[i]->init(ctx))
+ return ctx;
+ talloc_free(ctx);
+ }
+ return NULL;
+}
+
+void ra_ctx_destroy(struct ra_ctx **ctx_ptr)
+{
+ struct ra_ctx *ctx = *ctx_ptr;
+ if (!ctx)
+ return;
+
+ if (ctx->spirv && ctx->spirv->fns->uninit)
+ ctx->spirv->fns->uninit(ctx);
+
+ ctx->fns->uninit(ctx);
+ talloc_free(ctx);
+
+ *ctx_ptr = NULL;
+}
+
+#define OPT_BASE_STRUCT struct ra_ctx_opts
+const struct m_sub_options ra_ctx_conf = {
+ .opts = (const m_option_t[]) {
+ {"gpu-context",
+ OPT_STRING_VALIDATE(context_name, ra_ctx_validate_context),
+ .help = ra_ctx_context_help},
+ {"gpu-api",
+ OPT_STRING_VALIDATE(context_type, ra_ctx_validate_api),
+ .help = ra_ctx_api_help},
+ {"gpu-debug", OPT_BOOL(debug)},
+ {"gpu-sw", OPT_BOOL(allow_sw)},
+ {0}
+ },
+ .size = sizeof(struct ra_ctx_opts),
+};
diff --git a/video/out/gpu/context.h b/video/out/gpu/context.h
new file mode 100644
index 0000000..6788e6f
--- /dev/null
+++ b/video/out/gpu/context.h
@@ -0,0 +1,107 @@
+#pragma once
+
+#include "video/out/vo.h"
+#include "video/csputils.h"
+
+#include "ra.h"
+
+struct ra_ctx_opts {
+ bool allow_sw; // allow software renderers
+ bool want_alpha; // create an alpha framebuffer if possible
+ bool debug; // enable debugging layers/callbacks etc.
+ bool probing; // the backend was auto-probed
+ char *context_name; // filter by `ra_ctx_fns.name`
+ char *context_type; // filter by `ra_ctx_fns.type`
+};
+
+extern const struct m_sub_options ra_ctx_conf;
+
+struct ra_ctx {
+ struct vo *vo;
+ struct ra *ra;
+ struct mpv_global *global;
+ struct mp_log *log;
+
+ struct ra_ctx_opts opts;
+ const struct ra_ctx_fns *fns;
+ struct ra_swapchain *swapchain;
+ struct spirv_compiler *spirv;
+
+ void *priv;
+};
+
+// The functions that make up a ra_ctx.
+struct ra_ctx_fns {
+ const char *type; // API type (for --gpu-api)
+ const char *name; // name (for --gpu-context)
+
+ bool hidden; // hide the ra_ctx from users
+
+ // Resize the window, or create a new window if there isn't one yet.
+ // Currently, there is an unfortunate interaction with ctx->vo, and
+ // display size etc. are determined by it.
+ bool (*reconfig)(struct ra_ctx *ctx);
+
+ // This behaves exactly like vo_driver.control().
+ int (*control)(struct ra_ctx *ctx, int *events, int request, void *arg);
+
+ // These behave exactly like vo_driver.wakeup/wait_events. They are
+ // optional.
+ void (*wakeup)(struct ra_ctx *ctx);
+ void (*wait_events)(struct ra_ctx *ctx, int64_t until_time_ns);
+ void (*update_render_opts)(struct ra_ctx *ctx);
+
+ // Initialize/destroy the 'struct ra' and possibly the underlying VO backend.
+ // Not normally called by the user of the ra_ctx.
+ bool (*init)(struct ra_ctx *ctx);
+ void (*uninit)(struct ra_ctx *ctx);
+};
+
+// Extra struct for the swapchain-related functions so they can be easily
+// inherited from helpers.
+struct ra_swapchain {
+ struct ra_ctx *ctx;
+ struct priv *priv;
+ const struct ra_swapchain_fns *fns;
+};
+
+// Represents a framebuffer / render target
+struct ra_fbo {
+ struct ra_tex *tex;
+ bool flip; // rendering needs to be inverted
+
+ // Host system's colorspace that it will be interpreting
+ // the frame buffer as.
+ struct mp_colorspace color_space;
+};
+
+struct ra_swapchain_fns {
+ // Gets the current framebuffer depth in bits (0 if unknown). Optional.
+ int (*color_depth)(struct ra_swapchain *sw);
+
+ // Called when rendering starts. Returns NULL on failure. This must be
+ // followed by submit_frame, to submit the rendered frame. This function
+ // can also fail sporadically, and such errors should be ignored unless
+ // they persist.
+ bool (*start_frame)(struct ra_swapchain *sw, struct ra_fbo *out_fbo);
+
+ // Present the frame. Issued in lockstep with start_frame, with rendering
+ // commands in between. The `frame` is just there for timing data, for
+ // swapchains smart enough to do something with it.
+ bool (*submit_frame)(struct ra_swapchain *sw, const struct vo_frame *frame);
+
+ // Performs a buffer swap. This blocks for as long as necessary to meet
+ // params.swapchain_depth, or until the next vblank (for vsynced contexts)
+ void (*swap_buffers)(struct ra_swapchain *sw);
+
+ // See vo. Usually called after swap_buffers().
+ void (*get_vsync)(struct ra_swapchain *sw, struct vo_vsync_info *info);
+};
+
+// Create and destroy a ra_ctx. This also takes care of creating and destroying
+// the underlying `struct ra`, and perhaps the underlying VO backend.
+struct ra_ctx *ra_ctx_create(struct vo *vo, struct ra_ctx_opts opts);
+void ra_ctx_destroy(struct ra_ctx **ctx);
+
+// Special case of creating a ra_ctx while specifying a specific context by name.
+struct ra_ctx *ra_ctx_create_by_name(struct vo *vo, const char *name);
diff --git a/video/out/gpu/d3d11_helpers.c b/video/out/gpu/d3d11_helpers.c
new file mode 100644
index 0000000..30d9eae
--- /dev/null
+++ b/video/out/gpu/d3d11_helpers.c
@@ -0,0 +1,966 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <windows.h>
+#include <d3d11.h>
+#include <dxgi1_6.h>
+#include <versionhelpers.h>
+
+#include "common/common.h"
+#include "common/msg.h"
+#include "misc/bstr.h"
+#include "osdep/io.h"
+#include "osdep/threads.h"
+#include "osdep/windows_utils.h"
+
+#include "d3d11_helpers.h"
+
+// Windows 8 enum value, not present in mingw-w64 headers
+#define DXGI_ADAPTER_FLAG_SOFTWARE (2)
+typedef HRESULT(WINAPI *PFN_CREATE_DXGI_FACTORY)(REFIID riid, void **ppFactory);
+
+static mp_once d3d11_once = MP_STATIC_ONCE_INITIALIZER;
+static PFN_D3D11_CREATE_DEVICE pD3D11CreateDevice = NULL;
+static PFN_CREATE_DXGI_FACTORY pCreateDXGIFactory1 = NULL;
+static void d3d11_load(void)
+{
+ HMODULE d3d11 = LoadLibraryW(L"d3d11.dll");
+ HMODULE dxgilib = LoadLibraryW(L"dxgi.dll");
+ if (!d3d11 || !dxgilib)
+ return;
+
+ pD3D11CreateDevice = (PFN_D3D11_CREATE_DEVICE)
+ GetProcAddress(d3d11, "D3D11CreateDevice");
+ pCreateDXGIFactory1 = (PFN_CREATE_DXGI_FACTORY)
+ GetProcAddress(dxgilib, "CreateDXGIFactory1");
+}
+
+static bool load_d3d11_functions(struct mp_log *log)
+{
+ mp_exec_once(&d3d11_once, d3d11_load);
+ if (!pD3D11CreateDevice || !pCreateDXGIFactory1) {
+ mp_fatal(log, "Failed to load base d3d11 functionality: "
+ "CreateDevice: %s, CreateDXGIFactory1: %s\n",
+ pD3D11CreateDevice ? "success" : "failure",
+ pCreateDXGIFactory1 ? "success": "failure");
+ return false;
+ }
+
+ return true;
+}
+
+#define D3D11_DXGI_ENUM(prefix, define) { case prefix ## define: return #define; }
+
+static const char *d3d11_get_format_name(DXGI_FORMAT fmt)
+{
+ switch (fmt) {
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, UNKNOWN);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32G32B32A32_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32G32B32A32_FLOAT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32G32B32A32_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32G32B32A32_SINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32G32B32_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32G32B32_FLOAT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32G32B32_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32G32B32_SINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16G16B16A16_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16G16B16A16_FLOAT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16G16B16A16_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16G16B16A16_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16G16B16A16_SNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16G16B16A16_SINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32G32_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32G32_FLOAT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32G32_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32G32_SINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32G8X24_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, D32_FLOAT_S8X24_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32_FLOAT_X8X24_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, X32_TYPELESS_G8X24_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R10G10B10A2_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R10G10B10A2_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R10G10B10A2_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R11G11B10_FLOAT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8G8B8A8_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8G8B8A8_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8G8B8A8_UNORM_SRGB);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8G8B8A8_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8G8B8A8_SNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8G8B8A8_SINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16G16_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16G16_FLOAT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16G16_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16G16_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16G16_SNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16G16_SINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, D32_FLOAT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32_FLOAT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R32_SINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R24G8_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, D24_UNORM_S8_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R24_UNORM_X8_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, X24_TYPELESS_G8_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8G8_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8G8_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8G8_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8G8_SNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8G8_SINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16_FLOAT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, D16_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16_SNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R16_SINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8_UINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8_SNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8_SINT);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, A8_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R1_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R9G9B9E5_SHAREDEXP);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R8G8_B8G8_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, G8R8_G8B8_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC1_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC1_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC1_UNORM_SRGB);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC2_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC2_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC2_UNORM_SRGB);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC3_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC3_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC3_UNORM_SRGB);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC4_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC4_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC4_SNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC5_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC5_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC5_SNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, B5G6R5_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, B5G5R5A1_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, B8G8R8A8_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, B8G8R8X8_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, R10G10B10_XR_BIAS_A2_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, B8G8R8A8_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, B8G8R8A8_UNORM_SRGB);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, B8G8R8X8_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, B8G8R8X8_UNORM_SRGB);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC6H_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC6H_UF16);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC6H_SF16);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC7_TYPELESS);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC7_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, BC7_UNORM_SRGB);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, AYUV);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, Y410);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, Y416);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, NV12);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, P010);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, P016);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, 420_OPAQUE);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, YUY2);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, Y210);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, Y216);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, NV11);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, AI44);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, IA44);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, P8);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, A8P8);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, B4G4R4A4_UNORM);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, P208);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, V208);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, V408);
+ D3D11_DXGI_ENUM(DXGI_FORMAT_, FORCE_UINT);
+ default:
+ return "<Unknown>";
+ }
+}
+
+static const char *d3d11_get_csp_name(DXGI_COLOR_SPACE_TYPE csp)
+{
+ switch (csp) {
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, RGB_FULL_G22_NONE_P709);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, RGB_FULL_G10_NONE_P709);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, RGB_STUDIO_G22_NONE_P709);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, RGB_STUDIO_G22_NONE_P2020);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, RESERVED);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_FULL_G22_NONE_P709_X601);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_STUDIO_G22_LEFT_P601);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_FULL_G22_LEFT_P601);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_STUDIO_G22_LEFT_P709);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_FULL_G22_LEFT_P709);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_STUDIO_G22_LEFT_P2020);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_FULL_G22_LEFT_P2020);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, RGB_FULL_G2084_NONE_P2020);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_STUDIO_G2084_LEFT_P2020);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, RGB_STUDIO_G2084_NONE_P2020);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_STUDIO_G22_TOPLEFT_P2020);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_STUDIO_G2084_TOPLEFT_P2020);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, RGB_FULL_G22_NONE_P2020);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_STUDIO_GHLG_TOPLEFT_P2020);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_FULL_GHLG_TOPLEFT_P2020);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, RGB_STUDIO_G24_NONE_P709);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, RGB_STUDIO_G24_NONE_P2020);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_STUDIO_G24_LEFT_P709);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_STUDIO_G24_LEFT_P2020);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, YCBCR_STUDIO_G24_TOPLEFT_P2020);
+ D3D11_DXGI_ENUM(DXGI_COLOR_SPACE_, CUSTOM);
+ default:
+ return "<Unknown>";
+ }
+}
+
+static bool d3d11_get_mp_csp(DXGI_COLOR_SPACE_TYPE csp,
+ struct mp_colorspace *mp_csp)
+{
+ if (!mp_csp)
+ return false;
+
+ // Colorspaces utilizing gamma 2.2 (G22) are set to
+ // AUTO as that keeps the current default flow regarding
+ // SDR transfer function handling.
+ // (no adjustment is done unless the user has a CMS LUT).
+ //
+ // Additionally, only set primary information with colorspaces
+ // utilizing non-709 primaries to keep the current behavior
+ // regarding not doing conversion from BT.601 to BT.709.
+ switch (csp) {
+ case DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P709:
+ *mp_csp = (struct mp_colorspace){
+ .gamma = MP_CSP_TRC_AUTO,
+ .primaries = MP_CSP_PRIM_AUTO,
+ };
+ break;
+ case DXGI_COLOR_SPACE_RGB_FULL_G10_NONE_P709:
+ *mp_csp = (struct mp_colorspace) {
+ .gamma = MP_CSP_TRC_LINEAR,
+ .primaries = MP_CSP_PRIM_AUTO,
+ };
+ break;
+ case DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020:
+ *mp_csp = (struct mp_colorspace) {
+ .gamma = MP_CSP_TRC_PQ,
+ .primaries = MP_CSP_PRIM_BT_2020,
+ };
+ break;
+ case DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P2020:
+ *mp_csp = (struct mp_colorspace) {
+ .gamma = MP_CSP_TRC_AUTO,
+ .primaries = MP_CSP_PRIM_BT_2020,
+ };
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool query_output_format_and_colorspace(struct mp_log *log,
+ IDXGISwapChain *swapchain,
+ DXGI_FORMAT *out_fmt,
+ DXGI_COLOR_SPACE_TYPE *out_cspace)
+{
+ IDXGIOutput *output = NULL;
+ IDXGIOutput6 *output6 = NULL;
+ DXGI_OUTPUT_DESC1 desc = { 0 };
+ char *monitor_name = NULL;
+ bool success = false;
+
+ if (!out_fmt || !out_cspace)
+ return false;
+
+ HRESULT hr = IDXGISwapChain_GetContainingOutput(swapchain, &output);
+ if (FAILED(hr)) {
+ mp_err(log, "Failed to get swap chain's containing output: %s!\n",
+ mp_HRESULT_to_str(hr));
+ goto done;
+ }
+
+ hr = IDXGIOutput_QueryInterface(output, &IID_IDXGIOutput6,
+ (void**)&output6);
+ if (FAILED(hr)) {
+ // point where systems older than Windows 10 would fail,
+ // thus utilizing error log level only with windows 10+
+ mp_msg(log, IsWindows10OrGreater() ? MSGL_ERR : MSGL_V,
+ "Failed to create a DXGI 1.6 output interface: %s\n",
+ mp_HRESULT_to_str(hr));
+ goto done;
+ }
+
+ hr = IDXGIOutput6_GetDesc1(output6, &desc);
+ if (FAILED(hr)) {
+ mp_err(log, "Failed to query swap chain's output information: %s\n",
+ mp_HRESULT_to_str(hr));
+ goto done;
+ }
+
+ monitor_name = mp_to_utf8(NULL, desc.DeviceName);
+
+ mp_verbose(log, "Queried output: %s, %ldx%ld @ %d bits, colorspace: %s (%d)\n",
+ monitor_name,
+ desc.DesktopCoordinates.right - desc.DesktopCoordinates.left,
+ desc.DesktopCoordinates.bottom - desc.DesktopCoordinates.top,
+ desc.BitsPerColor,
+ d3d11_get_csp_name(desc.ColorSpace),
+ desc.ColorSpace);
+
+ *out_cspace = desc.ColorSpace;
+
+ // limit ourselves to the 8bit and 10bit formats for now.
+ // while the 16bit float format would be preferable as something
+ // to default to, it seems to be hard-coded to linear transfer
+ // in windowed mode, and follows configured colorspace in full screen.
+ *out_fmt = desc.BitsPerColor > 8 ?
+ DXGI_FORMAT_R10G10B10A2_UNORM : DXGI_FORMAT_R8G8B8A8_UNORM;
+
+ success = true;
+
+done:
+ talloc_free(monitor_name);
+ SAFE_RELEASE(output6);
+ SAFE_RELEASE(output);
+ return success;
+}
+
+// Get a const array of D3D_FEATURE_LEVELs from max_fl to min_fl (inclusive)
+static int get_feature_levels(int max_fl, int min_fl,
+ const D3D_FEATURE_LEVEL **out)
+{
+ static const D3D_FEATURE_LEVEL levels[] = {
+ D3D_FEATURE_LEVEL_12_1,
+ D3D_FEATURE_LEVEL_12_0,
+ D3D_FEATURE_LEVEL_11_1,
+ D3D_FEATURE_LEVEL_11_0,
+ D3D_FEATURE_LEVEL_10_1,
+ D3D_FEATURE_LEVEL_10_0,
+ D3D_FEATURE_LEVEL_9_3,
+ D3D_FEATURE_LEVEL_9_2,
+ D3D_FEATURE_LEVEL_9_1,
+ };
+ static const int levels_len = MP_ARRAY_SIZE(levels);
+
+ int start = 0;
+ for (; start < levels_len; start++) {
+ if (levels[start] <= max_fl)
+ break;
+ }
+ int len = 0;
+ for (; start + len < levels_len; len++) {
+ if (levels[start + len] < min_fl)
+ break;
+ }
+ *out = &levels[start];
+ return len;
+}
+
+static IDXGIAdapter1 *get_d3d11_adapter(struct mp_log *log,
+ struct bstr requested_adapter_name,
+ struct bstr *listing)
+{
+ HRESULT hr = S_OK;
+ IDXGIFactory1 *factory;
+ IDXGIAdapter1 *picked_adapter = NULL;
+
+ hr = pCreateDXGIFactory1(&IID_IDXGIFactory1, (void **)&factory);
+ if (FAILED(hr)) {
+ mp_fatal(log, "Failed to create a DXGI factory: %s\n",
+ mp_HRESULT_to_str(hr));
+ return NULL;
+ }
+
+ for (unsigned int adapter_num = 0; hr != DXGI_ERROR_NOT_FOUND; adapter_num++)
+ {
+ IDXGIAdapter1 *adapter = NULL;
+ DXGI_ADAPTER_DESC1 desc = { 0 };
+ char *adapter_description = NULL;
+
+ hr = IDXGIFactory1_EnumAdapters1(factory, adapter_num, &adapter);
+ if (FAILED(hr)) {
+ if (hr != DXGI_ERROR_NOT_FOUND) {
+ mp_fatal(log, "Failed to enumerate at adapter %u\n",
+ adapter_num);
+ }
+ continue;
+ }
+
+ if (FAILED(IDXGIAdapter1_GetDesc1(adapter, &desc))) {
+ mp_fatal(log, "Failed to get adapter description when listing at adapter %u\n",
+ adapter_num);
+ continue;
+ }
+
+ adapter_description = mp_to_utf8(NULL, desc.Description);
+
+ if (listing) {
+ bstr_xappend_asprintf(NULL, listing,
+ "Adapter %u: vendor: %u, description: %s\n",
+ adapter_num, desc.VendorId,
+ adapter_description);
+ }
+
+ if (requested_adapter_name.len &&
+ bstr_case_startswith(bstr0(adapter_description),
+ requested_adapter_name))
+ {
+ picked_adapter = adapter;
+ }
+
+ talloc_free(adapter_description);
+
+ if (picked_adapter) {
+ break;
+ }
+
+ SAFE_RELEASE(adapter);
+ }
+
+ SAFE_RELEASE(factory);
+
+ return picked_adapter;
+}
+
+static HRESULT create_device(struct mp_log *log, IDXGIAdapter1 *adapter,
+ bool warp, bool debug, int max_fl, int min_fl,
+ ID3D11Device **dev)
+{
+ const D3D_FEATURE_LEVEL *levels;
+ int levels_len = get_feature_levels(max_fl, min_fl, &levels);
+ if (!levels_len) {
+ mp_fatal(log, "No suitable Direct3D feature level found\n");
+ return E_FAIL;
+ }
+
+ D3D_DRIVER_TYPE type = warp ? D3D_DRIVER_TYPE_WARP
+ : D3D_DRIVER_TYPE_HARDWARE;
+ UINT flags = debug ? D3D11_CREATE_DEVICE_DEBUG : 0;
+ return pD3D11CreateDevice((IDXGIAdapter *)adapter, adapter ? D3D_DRIVER_TYPE_UNKNOWN : type,
+ NULL, flags, levels, levels_len, D3D11_SDK_VERSION, dev, NULL, NULL);
+}
+
+bool mp_d3d11_list_or_verify_adapters(struct mp_log *log,
+ bstr adapter_name,
+ bstr *listing)
+{
+ IDXGIAdapter1 *picked_adapter = NULL;
+
+ if (!load_d3d11_functions(log)) {
+ return false;
+ }
+
+ if ((picked_adapter = get_d3d11_adapter(log, adapter_name, listing))) {
+ SAFE_RELEASE(picked_adapter);
+ return true;
+ }
+
+ return false;
+}
+
+// Create a Direct3D 11 device for rendering and presentation. This is meant to
+// reduce boilerplate in backends that D3D11, while also making sure they share
+// the same device creation logic and log the same information.
+bool mp_d3d11_create_present_device(struct mp_log *log,
+ struct d3d11_device_opts *opts,
+ ID3D11Device **dev_out)
+{
+ bool debug = opts->debug;
+ bool warp = opts->force_warp;
+ int max_fl = opts->max_feature_level;
+ int min_fl = opts->min_feature_level;
+ // Normalize nullptr and an empty string to nullptr to simplify handling.
+ char *adapter_name = (opts->adapter_name && *(opts->adapter_name)) ?
+ opts->adapter_name : NULL;
+ ID3D11Device *dev = NULL;
+ IDXGIDevice1 *dxgi_dev = NULL;
+ IDXGIAdapter1 *adapter = NULL;
+ bool success = false;
+ HRESULT hr;
+
+ if (!load_d3d11_functions(log)) {
+ goto done;
+ }
+
+ adapter = get_d3d11_adapter(log, bstr0(adapter_name), NULL);
+
+ if (adapter_name && !adapter) {
+ mp_warn(log, "Adapter matching '%s' was not found in the system! "
+ "Will fall back to the default adapter.\n",
+ adapter_name);
+ }
+
+ // Return here to retry creating the device
+ do {
+ // Use these default feature levels if they are not set
+ max_fl = max_fl ? max_fl : D3D_FEATURE_LEVEL_11_0;
+ min_fl = min_fl ? min_fl : D3D_FEATURE_LEVEL_9_1;
+
+ hr = create_device(log, adapter, warp, debug, max_fl, min_fl, &dev);
+
+ // Retry without debug, if SDK is not available
+ if (debug && hr == DXGI_ERROR_SDK_COMPONENT_MISSING) {
+ mp_warn(log, "gpu-debug disabled due to error: %s\n", mp_HRESULT_to_str(hr));
+ debug = false;
+ continue;
+ }
+
+ if (SUCCEEDED(hr))
+ break;
+
+ // Trying to create a D3D_FEATURE_LEVEL_12_0 device on Windows 8.1 or
+ // below will not succeed. Try an 11_1 device.
+ if (max_fl >= D3D_FEATURE_LEVEL_12_0 &&
+ min_fl <= D3D_FEATURE_LEVEL_11_1)
+ {
+ mp_dbg(log, "Failed to create 12_0+ device, trying 11_1\n");
+ max_fl = D3D_FEATURE_LEVEL_11_1;
+ continue;
+ }
+
+ // Trying to create a D3D_FEATURE_LEVEL_11_1 device on Windows 7
+ // without the platform update will not succeed. Try an 11_0 device.
+ if (max_fl >= D3D_FEATURE_LEVEL_11_1 &&
+ min_fl <= D3D_FEATURE_LEVEL_11_0)
+ {
+ mp_dbg(log, "Failed to create 11_1+ device, trying 11_0\n");
+ max_fl = D3D_FEATURE_LEVEL_11_0;
+ continue;
+ }
+
+ // Retry with WARP if allowed
+ if (!warp && opts->allow_warp) {
+ mp_dbg(log, "Failed to create hardware device, trying WARP\n");
+ warp = true;
+ max_fl = opts->max_feature_level;
+ min_fl = opts->min_feature_level;
+ continue;
+ }
+
+ mp_fatal(log, "Failed to create Direct3D 11 device: %s\n",
+ mp_HRESULT_to_str(hr));
+ goto done;
+ } while (true);
+
+ // if we picked an adapter, release it here - we're taking another
+ // from the device.
+ SAFE_RELEASE(adapter);
+
+ hr = ID3D11Device_QueryInterface(dev, &IID_IDXGIDevice1, (void**)&dxgi_dev);
+ if (FAILED(hr)) {
+ mp_fatal(log, "Failed to get DXGI device\n");
+ goto done;
+ }
+ hr = IDXGIDevice1_GetParent(dxgi_dev, &IID_IDXGIAdapter1, (void**)&adapter);
+ if (FAILED(hr)) {
+ mp_fatal(log, "Failed to get DXGI adapter\n");
+ goto done;
+ }
+
+ IDXGIDevice1_SetMaximumFrameLatency(dxgi_dev, opts->max_frame_latency);
+
+ DXGI_ADAPTER_DESC1 desc;
+ hr = IDXGIAdapter1_GetDesc1(adapter, &desc);
+ if (FAILED(hr)) {
+ mp_fatal(log, "Failed to get adapter description\n");
+ goto done;
+ }
+
+ D3D_FEATURE_LEVEL selected_level = ID3D11Device_GetFeatureLevel(dev);
+ mp_verbose(log, "Using Direct3D 11 feature level %u_%u\n",
+ ((unsigned)selected_level) >> 12,
+ (((unsigned)selected_level) >> 8) & 0xf);
+
+ char *dev_name = mp_to_utf8(NULL, desc.Description);
+ mp_verbose(log, "Device Name: %s\n"
+ "Device ID: %04x:%04x (rev %02x)\n"
+ "Subsystem ID: %04x:%04x\n"
+ "LUID: %08lx%08lx\n",
+ dev_name,
+ desc.VendorId, desc.DeviceId, desc.Revision,
+ LOWORD(desc.SubSysId), HIWORD(desc.SubSysId),
+ desc.AdapterLuid.HighPart, desc.AdapterLuid.LowPart);
+ talloc_free(dev_name);
+
+ if (desc.Flags & DXGI_ADAPTER_FLAG_SOFTWARE)
+ warp = true;
+ // If the primary display adapter is a software adapter, the
+ // DXGI_ADAPTER_FLAG_SOFTWARE flag won't be set, but the device IDs should
+ // still match the Microsoft Basic Render Driver
+ if (desc.VendorId == 0x1414 && desc.DeviceId == 0x8c)
+ warp = true;
+ if (warp) {
+ mp_msg(log, opts->force_warp ? MSGL_V : MSGL_WARN,
+ "Using a software adapter\n");
+ }
+
+ *dev_out = dev;
+ dev = NULL;
+ success = true;
+
+done:
+ SAFE_RELEASE(adapter);
+ SAFE_RELEASE(dxgi_dev);
+ SAFE_RELEASE(dev);
+ return success;
+}
+
+static HRESULT create_swapchain_1_2(ID3D11Device *dev, IDXGIFactory2 *factory,
+ struct mp_log *log,
+ struct d3d11_swapchain_opts *opts,
+ bool flip, DXGI_FORMAT format,
+ IDXGISwapChain **swapchain_out)
+{
+ IDXGISwapChain *swapchain = NULL;
+ IDXGISwapChain1 *swapchain1 = NULL;
+ HRESULT hr;
+
+ DXGI_SWAP_CHAIN_DESC1 desc = {
+ .Width = opts->width ? opts->width : 1,
+ .Height = opts->height ? opts->height : 1,
+ .Format = format,
+ .SampleDesc = { .Count = 1 },
+ .BufferUsage = opts->usage,
+ };
+
+ if (flip) {
+ // UNORDERED_ACCESS with FLIP_SEQUENTIAL seems to be buggy with
+ // Windows 7 drivers
+ if ((desc.BufferUsage & DXGI_USAGE_UNORDERED_ACCESS) &&
+ !IsWindows8OrGreater())
+ {
+ mp_verbose(log, "Disabling UNORDERED_ACCESS for flip-model "
+ "swapchain backbuffers in Windows 7\n");
+ desc.BufferUsage &= ~DXGI_USAGE_UNORDERED_ACCESS;
+ }
+
+ if (IsWindows10OrGreater()) {
+ desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
+ } else {
+ desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
+ }
+ desc.BufferCount = opts->length;
+ } else {
+ desc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
+ desc.BufferCount = 1;
+ }
+
+ hr = IDXGIFactory2_CreateSwapChainForHwnd(factory, (IUnknown*)dev,
+ opts->window, &desc, NULL, NULL, &swapchain1);
+ if (FAILED(hr))
+ goto done;
+ hr = IDXGISwapChain1_QueryInterface(swapchain1, &IID_IDXGISwapChain,
+ (void**)&swapchain);
+ if (FAILED(hr))
+ goto done;
+
+ *swapchain_out = swapchain;
+ swapchain = NULL;
+
+done:
+ SAFE_RELEASE(swapchain1);
+ SAFE_RELEASE(swapchain);
+ return hr;
+}
+
+static HRESULT create_swapchain_1_1(ID3D11Device *dev, IDXGIFactory1 *factory,
+ struct mp_log *log,
+ struct d3d11_swapchain_opts *opts,
+ DXGI_FORMAT format,
+ IDXGISwapChain **swapchain_out)
+{
+ DXGI_SWAP_CHAIN_DESC desc = {
+ .BufferDesc = {
+ .Width = opts->width ? opts->width : 1,
+ .Height = opts->height ? opts->height : 1,
+ .Format = format,
+ },
+ .SampleDesc = { .Count = 1 },
+ .BufferUsage = opts->usage,
+ .BufferCount = 1,
+ .OutputWindow = opts->window,
+ .Windowed = TRUE,
+ .SwapEffect = DXGI_SWAP_EFFECT_DISCARD,
+ };
+
+ return IDXGIFactory1_CreateSwapChain(factory, (IUnknown*)dev, &desc,
+ swapchain_out);
+}
+
+static bool update_swapchain_format(struct mp_log *log,
+ IDXGISwapChain *swapchain,
+ DXGI_FORMAT format)
+{
+ DXGI_SWAP_CHAIN_DESC desc;
+
+ HRESULT hr = IDXGISwapChain_GetDesc(swapchain, &desc);
+ if (FAILED(hr)) {
+ mp_fatal(log, "Failed to query swap chain's current state: %s\n",
+ mp_HRESULT_to_str(hr));
+ return false;
+ }
+
+ hr = IDXGISwapChain_ResizeBuffers(swapchain, 0, desc.BufferDesc.Width,
+ desc.BufferDesc.Height,
+ format, 0);
+ if (FAILED(hr)) {
+ mp_fatal(log, "Couldn't update swapchain format: %s\n",
+ mp_HRESULT_to_str(hr));
+ return false;
+ }
+
+ return true;
+}
+
+static bool update_swapchain_color_space(struct mp_log *log,
+ IDXGISwapChain *swapchain,
+ DXGI_COLOR_SPACE_TYPE color_space)
+{
+ IDXGISwapChain4 *swapchain4 = NULL;
+ const char *csp_name = d3d11_get_csp_name(color_space);
+ bool success = false;
+ HRESULT hr = E_FAIL;
+ unsigned int csp_support_flags;
+
+ hr = IDXGISwapChain_QueryInterface(swapchain, &IID_IDXGISwapChain4,
+ (void *)&(swapchain4));
+ if (FAILED(hr)) {
+ mp_err(log, "Failed to create v4 swapchain for color space "
+ "configuration (%s)!\n",
+ mp_HRESULT_to_str(hr));
+ goto done;
+ }
+
+ hr = IDXGISwapChain4_CheckColorSpaceSupport(swapchain4,
+ color_space,
+ &csp_support_flags);
+ if (FAILED(hr)) {
+ mp_err(log, "Failed to check color space support for color space "
+ "%s (%d): %s!\n",
+ csp_name, color_space, mp_HRESULT_to_str(hr));
+ goto done;
+ }
+
+ mp_verbose(log,
+ "Swapchain capabilities for color space %s (%d): "
+ "normal: %s, overlay: %s\n",
+ csp_name, color_space,
+ (csp_support_flags & DXGI_SWAP_CHAIN_COLOR_SPACE_SUPPORT_FLAG_PRESENT) ?
+ "yes" : "no",
+ (csp_support_flags & DXGI_SWAP_CHAIN_COLOR_SPACE_SUPPORT_FLAG_OVERLAY_PRESENT) ?
+ "yes" : "no");
+
+ if (!(csp_support_flags & DXGI_SWAP_CHAIN_COLOR_SPACE_SUPPORT_FLAG_PRESENT)) {
+ mp_err(log, "Color space %s (%d) is not supported by this swapchain!\n",
+ csp_name, color_space);
+ goto done;
+ }
+
+ hr = IDXGISwapChain4_SetColorSpace1(swapchain4, color_space);
+ if (FAILED(hr)) {
+ mp_err(log, "Failed to set color space %s (%d) for this swapchain "
+ "(%s)!\n",
+ csp_name, color_space, mp_HRESULT_to_str(hr));
+ goto done;
+ }
+
+ mp_verbose(log, "Swapchain successfully configured to color space %s (%d)!\n",
+ csp_name, color_space);
+
+ success = true;
+
+done:
+ SAFE_RELEASE(swapchain4);
+ return success;
+}
+
+static bool configure_created_swapchain(struct mp_log *log,
+ IDXGISwapChain *swapchain,
+ DXGI_FORMAT requested_format,
+ DXGI_COLOR_SPACE_TYPE requested_csp,
+ struct mp_colorspace *configured_csp)
+{
+ DXGI_FORMAT probed_format = DXGI_FORMAT_UNKNOWN;
+ DXGI_FORMAT selected_format = DXGI_FORMAT_UNKNOWN;
+ DXGI_COLOR_SPACE_TYPE probed_colorspace = DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P709;
+ DXGI_COLOR_SPACE_TYPE selected_colorspace;
+ const char *format_name = NULL;
+ const char *csp_name = NULL;
+ struct mp_colorspace mp_csp = { 0 };
+ bool mp_csp_mapped = false;
+
+ query_output_format_and_colorspace(log, swapchain,
+ &probed_format,
+ &probed_colorspace);
+
+
+ selected_format = requested_format != DXGI_FORMAT_UNKNOWN ?
+ requested_format :
+ (probed_format != DXGI_FORMAT_UNKNOWN ?
+ probed_format : DXGI_FORMAT_R8G8B8A8_UNORM);
+ selected_colorspace = requested_csp != -1 ?
+ requested_csp : probed_colorspace;
+ format_name = d3d11_get_format_name(selected_format);
+ csp_name = d3d11_get_csp_name(selected_colorspace);
+ mp_csp_mapped = d3d11_get_mp_csp(selected_colorspace, &mp_csp);
+
+ mp_verbose(log, "Selected swapchain format %s (%d), attempting "
+ "to utilize it.\n",
+ format_name, selected_format);
+
+ if (!update_swapchain_format(log, swapchain, selected_format)) {
+ return false;
+ }
+
+ if (!IsWindows10OrGreater()) {
+ // On older than Windows 10, query_output_format_and_colorspace
+ // will not change probed_colorspace, and even if a user sets
+ // a colorspace it will not get applied. Thus warn user in case a
+ // value was specifically set and finish.
+ if (requested_csp != -1) {
+ mp_warn(log, "User selected a D3D11 color space %s (%d), "
+ "but configuration of color spaces is only supported"
+ "from Windows 10! The default configuration has been "
+ "left as-is.\n",
+ csp_name, selected_colorspace);
+ }
+
+ return true;
+ }
+
+ if (!mp_csp_mapped) {
+ mp_warn(log, "Color space %s (%d) does not have an mpv color space "
+ "mapping! Overriding to standard sRGB!\n",
+ csp_name, selected_colorspace);
+ selected_colorspace = DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P709;
+ d3d11_get_mp_csp(selected_colorspace, &mp_csp);
+ }
+
+ mp_verbose(log, "Selected swapchain color space %s (%d), attempting to "
+ "utilize it.\n",
+ csp_name, selected_colorspace);
+
+ if (!update_swapchain_color_space(log, swapchain, selected_colorspace)) {
+ return false;
+ }
+
+ if (configured_csp) {
+ *configured_csp = mp_csp;
+ }
+
+ return true;
+}
+
+// Create a Direct3D 11 swapchain
+bool mp_d3d11_create_swapchain(ID3D11Device *dev, struct mp_log *log,
+ struct d3d11_swapchain_opts *opts,
+ IDXGISwapChain **swapchain_out)
+{
+ IDXGIDevice1 *dxgi_dev = NULL;
+ IDXGIAdapter1 *adapter = NULL;
+ IDXGIFactory1 *factory = NULL;
+ IDXGIFactory2 *factory2 = NULL;
+ IDXGISwapChain *swapchain = NULL;
+ bool success = false;
+ HRESULT hr;
+
+ hr = ID3D11Device_QueryInterface(dev, &IID_IDXGIDevice1, (void**)&dxgi_dev);
+ if (FAILED(hr)) {
+ mp_fatal(log, "Failed to get DXGI device\n");
+ goto done;
+ }
+ hr = IDXGIDevice1_GetParent(dxgi_dev, &IID_IDXGIAdapter1, (void**)&adapter);
+ if (FAILED(hr)) {
+ mp_fatal(log, "Failed to get DXGI adapter\n");
+ goto done;
+ }
+ hr = IDXGIAdapter1_GetParent(adapter, &IID_IDXGIFactory1, (void**)&factory);
+ if (FAILED(hr)) {
+ mp_fatal(log, "Failed to get DXGI factory\n");
+ goto done;
+ }
+ hr = IDXGIFactory1_QueryInterface(factory, &IID_IDXGIFactory2,
+ (void**)&factory2);
+ if (FAILED(hr))
+ factory2 = NULL;
+
+ bool flip = factory2 && opts->flip;
+
+ // Return here to retry creating the swapchain
+ do {
+ if (factory2) {
+ // Create a DXGI 1.2+ (Windows 8+) swap chain if possible
+ hr = create_swapchain_1_2(dev, factory2, log, opts, flip,
+ DXGI_FORMAT_R8G8B8A8_UNORM, &swapchain);
+ } else {
+ // Fall back to DXGI 1.1 (Windows 7)
+ hr = create_swapchain_1_1(dev, factory, log, opts,
+ DXGI_FORMAT_R8G8B8A8_UNORM, &swapchain);
+ }
+ if (SUCCEEDED(hr))
+ break;
+
+ if (flip) {
+ mp_dbg(log, "Failed to create flip-model swapchain, trying bitblt\n");
+ flip = false;
+ continue;
+ }
+
+ mp_fatal(log, "Failed to create swapchain: %s\n", mp_HRESULT_to_str(hr));
+ goto done;
+ } while (true);
+
+ // Prevent DXGI from making changes to the VO window, otherwise it will
+ // hook the Alt+Enter keystroke and make it trigger an ugly transition to
+ // exclusive fullscreen mode instead of running the user-set command.
+ IDXGIFactory_MakeWindowAssociation(factory, opts->window,
+ DXGI_MWA_NO_WINDOW_CHANGES | DXGI_MWA_NO_ALT_ENTER |
+ DXGI_MWA_NO_PRINT_SCREEN);
+
+ if (factory2) {
+ mp_verbose(log, "Using DXGI 1.2+\n");
+ } else {
+ mp_verbose(log, "Using DXGI 1.1\n");
+ }
+
+ configure_created_swapchain(log, swapchain, opts->format,
+ opts->color_space,
+ opts->configured_csp);
+
+ DXGI_SWAP_CHAIN_DESC scd = {0};
+ IDXGISwapChain_GetDesc(swapchain, &scd);
+ if (scd.SwapEffect == DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL ||
+ scd.SwapEffect == DXGI_SWAP_EFFECT_FLIP_DISCARD)
+ {
+ mp_verbose(log, "Using flip-model presentation\n");
+ } else {
+ mp_verbose(log, "Using bitblt-model presentation\n");
+ }
+
+ *swapchain_out = swapchain;
+ swapchain = NULL;
+ success = true;
+
+done:
+ SAFE_RELEASE(swapchain);
+ SAFE_RELEASE(factory2);
+ SAFE_RELEASE(factory);
+ SAFE_RELEASE(adapter);
+ SAFE_RELEASE(dxgi_dev);
+ return success;
+}
diff --git a/video/out/gpu/d3d11_helpers.h b/video/out/gpu/d3d11_helpers.h
new file mode 100644
index 0000000..c115d33
--- /dev/null
+++ b/video/out/gpu/d3d11_helpers.h
@@ -0,0 +1,103 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MP_D3D11_HELPERS_H_
+#define MP_D3D11_HELPERS_H_
+
+#include <stdbool.h>
+#include <windows.h>
+#include <d3d11.h>
+#include <dxgi1_2.h>
+
+#include "video/mp_image.h"
+
+#define D3D_FEATURE_LEVEL_12_0 (0xc000)
+#define D3D_FEATURE_LEVEL_12_1 (0xc100)
+
+#define DXGI_COLOR_SPACE_RGB_STUDIO_G24_NONE_P709 ((DXGI_COLOR_SPACE_TYPE)20)
+#define DXGI_COLOR_SPACE_RGB_STUDIO_G24_NONE_P2020 ((DXGI_COLOR_SPACE_TYPE)21)
+#define DXGI_COLOR_SPACE_YCBCR_STUDIO_G24_LEFT_P709 ((DXGI_COLOR_SPACE_TYPE)22)
+#define DXGI_COLOR_SPACE_YCBCR_STUDIO_G24_LEFT_P2020 ((DXGI_COLOR_SPACE_TYPE)23)
+#define DXGI_COLOR_SPACE_YCBCR_STUDIO_G24_TOPLEFT_P2020 ((DXGI_COLOR_SPACE_TYPE)24)
+
+struct d3d11_device_opts {
+ // Enable the debug layer (D3D11_CREATE_DEVICE_DEBUG)
+ bool debug;
+
+ // Allow a software (WARP) adapter. Note, sometimes a software adapter will
+ // be used even when allow_warp is false. This is because, on Windows 8 and
+ // up, if there are no hardware adapters, Windows will pretend the WARP
+ // adapter is the primary hardware adapter.
+ bool allow_warp;
+
+ // Always use a WARP adapter. This is mainly for testing purposes.
+ bool force_warp;
+
+ // The maximum number of pending frames allowed to be queued to a swapchain
+ int max_frame_latency;
+
+ // The maximum Direct3D 11 feature level to attempt to create
+ // If unset, defaults to D3D_FEATURE_LEVEL_11_0
+ int max_feature_level;
+
+ // The minimum Direct3D 11 feature level to attempt to create. If this is
+ // not supported, device creation will fail.
+ // If unset, defaults to D3D_FEATURE_LEVEL_9_1
+ int min_feature_level;
+
+ // The adapter name to utilize if a specific adapter is required
+ // If unset, the default adapter will be utilized when creating
+ // a device.
+ char *adapter_name;
+};
+
+bool mp_d3d11_list_or_verify_adapters(struct mp_log *log,
+ bstr adapter_name,
+ bstr *listing);
+
+bool mp_d3d11_create_present_device(struct mp_log *log,
+ struct d3d11_device_opts *opts,
+ ID3D11Device **dev_out);
+
+struct d3d11_swapchain_opts {
+ HWND window;
+ int width;
+ int height;
+ DXGI_FORMAT format;
+ DXGI_COLOR_SPACE_TYPE color_space;
+
+ // mp_colorspace mapping of the configured swapchain colorspace
+ // shall be written into this memory location if configuration
+ // succeeds. Will be ignored if NULL.
+ struct mp_colorspace *configured_csp;
+
+ // Use DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL if possible
+ bool flip;
+
+ // Number of surfaces in the swapchain
+ int length;
+
+ // The BufferUsage value for swapchain surfaces. This should probably
+ // contain DXGI_USAGE_RENDER_TARGET_OUTPUT.
+ DXGI_USAGE usage;
+};
+
+bool mp_d3d11_create_swapchain(ID3D11Device *dev, struct mp_log *log,
+ struct d3d11_swapchain_opts *opts,
+ IDXGISwapChain **swapchain_out);
+
+#endif
diff --git a/video/out/gpu/error_diffusion.c b/video/out/gpu/error_diffusion.c
new file mode 100644
index 0000000..c1ea542
--- /dev/null
+++ b/video/out/gpu/error_diffusion.c
@@ -0,0 +1,316 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stdlib.h>
+
+#include "error_diffusion.h"
+
+#include "common/common.h"
+
+#define GLSL(...) gl_sc_addf(sc, __VA_ARGS__)
+#define GLSLH(...) gl_sc_haddf(sc, __VA_ARGS__)
+
+// After a (y, x) -> (y, x + y * shift) mapping, find the right most column that
+// will be affected by the current column.
+static int compute_rightmost_shifted_column(const struct error_diffusion_kernel *k)
+{
+ int ret = 0;
+ for (int y = 0; y <= EF_MAX_DELTA_Y; y++) {
+ for (int x = EF_MIN_DELTA_X; x <= EF_MAX_DELTA_X; x++) {
+ if (k->pattern[y][x - EF_MIN_DELTA_X] != 0) {
+ int shifted_x = x + y * k->shift;
+
+ // The shift mapping guarantees current column (or left of it)
+ // won't be affected by error diffusion.
+ assert(shifted_x > 0);
+
+ ret = MPMAX(ret, shifted_x);
+ }
+ }
+ }
+ return ret;
+}
+
+const struct error_diffusion_kernel *mp_find_error_diffusion_kernel(const char *name)
+{
+ if (!name)
+ return NULL;
+ for (const struct error_diffusion_kernel *k = mp_error_diffusion_kernels;
+ k->name;
+ k++) {
+ if (strcmp(k->name, name) == 0)
+ return k;
+ }
+ return NULL;
+}
+
+int mp_ef_compute_shared_memory_size(const struct error_diffusion_kernel *k,
+ int height)
+{
+ // We add EF_MAX_DELTA_Y empty lines on the bottom to handle errors
+ // propagated out from bottom side.
+ int rows = height + EF_MAX_DELTA_Y;
+ int shifted_columns = compute_rightmost_shifted_column(k) + 1;
+
+ // The shared memory is an array of size rows*shifted_columns. Each element
+ // is a single uint for three RGB component.
+ return rows * shifted_columns * 4;
+}
+
+void pass_error_diffusion(struct gl_shader_cache *sc,
+ const struct error_diffusion_kernel *k,
+ int tex, int width, int height, int depth, int block_size)
+{
+ assert(block_size <= height);
+
+ // The parallel error diffusion works by applying the shift mapping first.
+ // Taking the Floyd and Steinberg algorithm for example. After applying
+ // the (y, x) -> (y, x + y * shift) mapping (with shift=2), all errors are
+ // propagated into the next few columns, which makes parallel processing on
+ // the same column possible.
+ //
+ // X 7/16 X 7/16
+ // 3/16 5/16 1/16 ==> 0 0 3/16 5/16 1/16
+
+ // Figuring out the size of rectangle containing all shifted pixels.
+ // The rectangle height is not changed.
+ int shifted_width = width + (height - 1) * k->shift;
+
+ // We process all pixels from the shifted rectangles column by column, with
+ // a single global work group of size |block_size|.
+ // Figuring out how many block are required to process all pixels. We need
+ // this explicitly to make the number of barrier() calls match.
+ int blocks = (height * shifted_width + block_size - 1) / block_size;
+
+ // If we figure out how many of the next columns will be affected while the
+ // current columns is being processed. We can store errors of only a few
+ // columns in the shared memory. Using a ring buffer will further save the
+ // cost while iterating to next column.
+ int ring_buffer_rows = height + EF_MAX_DELTA_Y;
+ int ring_buffer_columns = compute_rightmost_shifted_column(k) + 1;
+ int ring_buffer_size = ring_buffer_rows * ring_buffer_columns;
+
+ // Defines the ring buffer in shared memory.
+ GLSLH("shared uint err_rgb8[%d];\n", ring_buffer_size);
+
+ // Initialize the ring buffer.
+ GLSL("for (int i = int(gl_LocalInvocationIndex); i < %d; i += %d) ",
+ ring_buffer_size, block_size);
+ GLSL("err_rgb8[i] = 0u;\n");
+
+ GLSL("for (int block_id = 0; block_id < %d; ++block_id) {\n", blocks);
+
+ // Add barrier here to have previous block all processed before starting
+ // the processing of the next.
+ GLSL("groupMemoryBarrier();\n");
+ GLSL("barrier();\n");
+
+ // Compute the coordinate of the pixel we are currently processing, both
+ // before and after the shift mapping.
+ GLSL("int id = int(gl_LocalInvocationIndex) + block_id * %d;\n", block_size);
+ GLSL("int y = id %% %d, x_shifted = id / %d;\n", height, height);
+ GLSL("int x = x_shifted - y * %d;\n", k->shift);
+
+ // Proceed only if we are processing a valid pixel.
+ GLSL("if (0 <= x && x < %d) {\n", width);
+
+ // The index that the current pixel have on the ring buffer.
+ GLSL("int idx = (x_shifted * %d + y) %% %d;\n", ring_buffer_rows, ring_buffer_size);
+
+ // Fetch the current pixel.
+ GLSL("vec3 pix = texelFetch(texture%d, ivec2(x, y), 0).rgb;\n", tex);
+
+ // The dithering will quantize pixel value into multiples of 1/dither_quant.
+ int dither_quant = (1 << depth) - 1;
+
+ // We encode errors in RGB components into a single 32-bit unsigned integer.
+ // The error we propagate from the current pixel is in range of
+ // [-0.5 / dither_quant, 0.5 / dither_quant]. While not quite obvious, the
+ // sum of all errors been propagated into a pixel is also in the same range.
+ // It's possible to map errors in this range into [-127, 127], and use an
+ // unsigned 8-bit integer to store it (using standard two's complement).
+ // The three 8-bit unsigned integers can then be encoded into a single
+ // 32-bit unsigned integer, with two 4-bit padding to prevent addition
+ // operation overflows affecting other component. There are at most 12
+ // addition operations on each pixel, so 4-bit padding should be enough.
+ // The overflow from R component will be discarded.
+ //
+ // The following figure is how the encoding looks like.
+ //
+ // +------------------------------------+
+ // |RRRRRRRR|0000|GGGGGGGG|0000|BBBBBBBB|
+ // +------------------------------------+
+ //
+
+ // The bitshift position for R and G component.
+ int bitshift_r = 24, bitshift_g = 12;
+ // The multiplier we use to map [-0.5, 0.5] to [-127, 127].
+ int uint8_mul = 127 * 2;
+
+ // Adding the error previously propagated into current pixel, and clear it
+ // in the buffer.
+ GLSL("uint err_u32 = err_rgb8[idx] + %uu;\n",
+ (128u << bitshift_r) | (128u << bitshift_g) | 128u);
+ GLSL("pix = pix * %d.0 + vec3("
+ "int((err_u32 >> %d) & 255u) - 128,"
+ "int((err_u32 >> %d) & 255u) - 128,"
+ "int( err_u32 & 255u) - 128"
+ ") / %d.0;\n", dither_quant, bitshift_r, bitshift_g, uint8_mul);
+ GLSL("err_rgb8[idx] = 0u;\n");
+
+ // Write the dithered pixel.
+ GLSL("vec3 dithered = round(pix);\n");
+ GLSL("imageStore(out_image, ivec2(x, y), vec4(dithered / %d.0, 0.0));\n",
+ dither_quant);
+
+ GLSL("vec3 err_divided = (pix - dithered) * %d.0 / %d.0;\n",
+ uint8_mul, k->divisor);
+ GLSL("ivec3 tmp;\n");
+
+ // Group error propagation with same weight factor together, in order to
+ // reduce the number of annoying error encoding.
+ for (int dividend = 1; dividend <= k->divisor; dividend++) {
+ bool err_assigned = false;
+
+ for (int y = 0; y <= EF_MAX_DELTA_Y; y++) {
+ for (int x = EF_MIN_DELTA_X; x <= EF_MAX_DELTA_X; x++) {
+ if (k->pattern[y][x - EF_MIN_DELTA_X] != dividend)
+ continue;
+
+ if (!err_assigned) {
+ err_assigned = true;
+
+ GLSL("tmp = ivec3(round(err_divided * %d.0));\n", dividend);
+
+ GLSL("err_u32 = "
+ "(uint(tmp.r & 255) << %d)|"
+ "(uint(tmp.g & 255) << %d)|"
+ " uint(tmp.b & 255);\n",
+ bitshift_r, bitshift_g);
+ }
+
+ int shifted_x = x + y * k->shift;
+
+ // Unlike the right border, errors propagated out from left
+ // border will remain in the ring buffer. This will produce
+ // visible artifacts near the left border, especially for
+ // shift=3 kernels.
+ if (x < 0)
+ GLSL("if (x >= %d) ", -x);
+
+ // Calculate the new position in the ring buffer to propagate
+ // the error into.
+ int ring_buffer_delta = shifted_x * ring_buffer_rows + y;
+ GLSL("atomicAdd(err_rgb8[(idx + %d) %% %d], err_u32);\n",
+ ring_buffer_delta, ring_buffer_size);
+ }
+ }
+ }
+
+ GLSL("}\n"); // if (0 <= x && x < width)
+
+ GLSL("}\n"); // block_id
+}
+
+// Different kernels for error diffusion.
+// Patterns are from http://www.efg2.com/Lab/Library/ImageProcessing/DHALF.TXT
+const struct error_diffusion_kernel mp_error_diffusion_kernels[] = {
+ {
+ .name = "simple",
+ .shift = 1,
+ .pattern = {{0, 0, 0, 1, 0},
+ {0, 0, 1, 0, 0},
+ {0, 0, 0, 0, 0}},
+ .divisor = 2
+ },
+ {
+ // The "false" Floyd-Steinberg kernel
+ .name = "false-fs",
+ .shift = 1,
+ .pattern = {{0, 0, 0, 3, 0},
+ {0, 0, 3, 2, 0},
+ {0, 0, 0, 0, 0}},
+ .divisor = 8
+ },
+ {
+ .name = "sierra-lite",
+ .shift = 2,
+ .pattern = {{0, 0, 0, 2, 0},
+ {0, 1, 1, 0, 0},
+ {0, 0, 0, 0, 0}},
+ .divisor = 4
+ },
+ {
+ .name = "floyd-steinberg",
+ .shift = 2,
+ .pattern = {{0, 0, 0, 7, 0},
+ {0, 3, 5, 1, 0},
+ {0, 0, 0, 0, 0}},
+ .divisor = 16
+ },
+ {
+ .name = "atkinson",
+ .shift = 2,
+ .pattern = {{0, 0, 0, 1, 1},
+ {0, 1, 1, 1, 0},
+ {0, 0, 1, 0, 0}},
+ .divisor = 8
+ },
+ // All kernels below have shift value of 3, and probably are too heavy for
+ // low end GPU.
+ {
+ .name = "jarvis-judice-ninke",
+ .shift = 3,
+ .pattern = {{0, 0, 0, 7, 5},
+ {3, 5, 7, 5, 3},
+ {1, 3, 5, 3, 1}},
+ .divisor = 48
+ },
+ {
+ .name = "stucki",
+ .shift = 3,
+ .pattern = {{0, 0, 0, 8, 4},
+ {2, 4, 8, 4, 2},
+ {1, 2, 4, 2, 1}},
+ .divisor = 42
+ },
+ {
+ .name = "burkes",
+ .shift = 3,
+ .pattern = {{0, 0, 0, 8, 4},
+ {2, 4, 8, 4, 2},
+ {0, 0, 0, 0, 0}},
+ .divisor = 32
+ },
+ {
+ .name = "sierra-3",
+ .shift = 3,
+ .pattern = {{0, 0, 0, 5, 3},
+ {2, 4, 5, 4, 2},
+ {0, 2, 3, 2, 0}},
+ .divisor = 32
+ },
+ {
+ .name = "sierra-2",
+ .shift = 3,
+ .pattern = {{0, 0, 0, 4, 3},
+ {1, 2, 3, 2, 1},
+ {0, 0, 0, 0, 0}},
+ .divisor = 16
+ },
+ {0}
+};
diff --git a/video/out/gpu/error_diffusion.h b/video/out/gpu/error_diffusion.h
new file mode 100644
index 0000000..6bdcea1
--- /dev/null
+++ b/video/out/gpu/error_diffusion.h
@@ -0,0 +1,48 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MP_GL_ERROR_DIFFUSION
+#define MP_GL_ERROR_DIFFUSION
+
+#include "shader_cache.h"
+
+// defines the border of all error diffusion kernels
+#define EF_MIN_DELTA_X (-2)
+#define EF_MAX_DELTA_X (2)
+#define EF_MAX_DELTA_Y (2)
+
+struct error_diffusion_kernel {
+ const char *name;
+
+ // The minimum value such that a (y, x) -> (y, x + y * shift) mapping will
+ // make all error pushing operations affect next column (and after it) only.
+ int shift;
+
+ // The diffusion factor for (y, x) is pattern[y][x - EF_MIN_DELTA_X] / divisor.
+ int pattern[EF_MAX_DELTA_Y + 1][EF_MAX_DELTA_X - EF_MIN_DELTA_X + 1];
+ int divisor;
+};
+
+extern const struct error_diffusion_kernel mp_error_diffusion_kernels[];
+
+const struct error_diffusion_kernel *mp_find_error_diffusion_kernel(const char *name);
+int mp_ef_compute_shared_memory_size(const struct error_diffusion_kernel *k, int height);
+void pass_error_diffusion(struct gl_shader_cache *sc,
+ const struct error_diffusion_kernel *k,
+ int tex, int width, int height, int depth, int block_size);
+
+#endif /* MP_GL_ERROR_DIFFUSION */
diff --git a/video/out/gpu/hwdec.c b/video/out/gpu/hwdec.c
new file mode 100644
index 0000000..c8098f3
--- /dev/null
+++ b/video/out/gpu/hwdec.c
@@ -0,0 +1,358 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stddef.h>
+#include <string.h>
+
+#include "config.h"
+
+#include "common/common.h"
+#include "common/msg.h"
+#include "options/m_config.h"
+#include "hwdec.h"
+
+extern const struct ra_hwdec_driver ra_hwdec_vaapi;
+extern const struct ra_hwdec_driver ra_hwdec_videotoolbox;
+extern const struct ra_hwdec_driver ra_hwdec_vdpau;
+extern const struct ra_hwdec_driver ra_hwdec_dxva2egl;
+extern const struct ra_hwdec_driver ra_hwdec_d3d11egl;
+extern const struct ra_hwdec_driver ra_hwdec_dxva2gldx;
+extern const struct ra_hwdec_driver ra_hwdec_d3d11va;
+extern const struct ra_hwdec_driver ra_hwdec_dxva2dxgi;
+extern const struct ra_hwdec_driver ra_hwdec_cuda;
+extern const struct ra_hwdec_driver ra_hwdec_rpi_overlay;
+extern const struct ra_hwdec_driver ra_hwdec_drmprime;
+extern const struct ra_hwdec_driver ra_hwdec_drmprime_overlay;
+extern const struct ra_hwdec_driver ra_hwdec_aimagereader;
+extern const struct ra_hwdec_driver ra_hwdec_vulkan;
+
+const struct ra_hwdec_driver *const ra_hwdec_drivers[] = {
+#if HAVE_VAAPI
+ &ra_hwdec_vaapi,
+#endif
+#if HAVE_VIDEOTOOLBOX_GL || HAVE_IOS_GL || HAVE_VIDEOTOOLBOX_PL
+ &ra_hwdec_videotoolbox,
+#endif
+#if HAVE_D3D_HWACCEL
+ #if HAVE_EGL_ANGLE
+ &ra_hwdec_d3d11egl,
+ #if HAVE_D3D9_HWACCEL
+ &ra_hwdec_dxva2egl,
+ #endif
+ #endif
+ #if HAVE_D3D11
+ &ra_hwdec_d3d11va,
+ #if HAVE_D3D9_HWACCEL
+ &ra_hwdec_dxva2dxgi,
+ #endif
+ #endif
+#endif
+#if HAVE_GL_DXINTEROP_D3D9
+ &ra_hwdec_dxva2gldx,
+#endif
+#if HAVE_CUDA_INTEROP
+ &ra_hwdec_cuda,
+#endif
+#if HAVE_VDPAU_GL_X11
+ &ra_hwdec_vdpau,
+#endif
+#if HAVE_RPI_MMAL
+ &ra_hwdec_rpi_overlay,
+#endif
+#if HAVE_DRM
+ &ra_hwdec_drmprime,
+ &ra_hwdec_drmprime_overlay,
+#endif
+#if HAVE_ANDROID_MEDIA_NDK
+ &ra_hwdec_aimagereader,
+#endif
+#if HAVE_VULKAN_INTEROP
+ &ra_hwdec_vulkan,
+#endif
+
+ NULL
+};
+
+struct ra_hwdec *ra_hwdec_load_driver(struct ra_ctx *ra_ctx,
+ struct mp_log *log,
+ struct mpv_global *global,
+ struct mp_hwdec_devices *devs,
+ const struct ra_hwdec_driver *drv,
+ bool is_auto)
+{
+ struct ra_hwdec *hwdec = talloc(NULL, struct ra_hwdec);
+ *hwdec = (struct ra_hwdec) {
+ .driver = drv,
+ .log = mp_log_new(hwdec, log, drv->name),
+ .global = global,
+ .ra_ctx = ra_ctx,
+ .devs = devs,
+ .probing = is_auto,
+ .priv = talloc_zero_size(hwdec, drv->priv_size),
+ };
+ mp_verbose(log, "Loading hwdec driver '%s'\n", drv->name);
+ if (hwdec->driver->init(hwdec) < 0) {
+ ra_hwdec_uninit(hwdec);
+ mp_verbose(log, "Loading failed.\n");
+ return NULL;
+ }
+ return hwdec;
+}
+
+void ra_hwdec_uninit(struct ra_hwdec *hwdec)
+{
+ if (hwdec)
+ hwdec->driver->uninit(hwdec);
+ talloc_free(hwdec);
+}
+
+bool ra_hwdec_test_format(struct ra_hwdec *hwdec, int imgfmt)
+{
+ for (int n = 0; hwdec->driver->imgfmts[n]; n++) {
+ if (hwdec->driver->imgfmts[n] == imgfmt)
+ return true;
+ }
+ return false;
+}
+
+struct ra_hwdec_mapper *ra_hwdec_mapper_create(struct ra_hwdec *hwdec,
+ const struct mp_image_params *params)
+{
+ assert(ra_hwdec_test_format(hwdec, params->imgfmt));
+
+ struct ra_hwdec_mapper *mapper = talloc_ptrtype(NULL, mapper);
+ *mapper = (struct ra_hwdec_mapper){
+ .owner = hwdec,
+ .driver = hwdec->driver->mapper,
+ .log = hwdec->log,
+ .ra = hwdec->ra_ctx->ra,
+ .priv = talloc_zero_size(mapper, hwdec->driver->mapper->priv_size),
+ .src_params = *params,
+ .dst_params = *params,
+ };
+ if (mapper->driver->init(mapper) < 0)
+ ra_hwdec_mapper_free(&mapper);
+ return mapper;
+}
+
+void ra_hwdec_mapper_free(struct ra_hwdec_mapper **mapper)
+{
+ struct ra_hwdec_mapper *p = *mapper;
+ if (p) {
+ ra_hwdec_mapper_unmap(p);
+ p->driver->uninit(p);
+ talloc_free(p);
+ }
+ *mapper = NULL;
+}
+
+void ra_hwdec_mapper_unmap(struct ra_hwdec_mapper *mapper)
+{
+ if (mapper->driver->unmap)
+ mapper->driver->unmap(mapper);
+
+ // Clean up after the image if the mapper didn't already
+ mp_image_unrefp(&mapper->src);
+}
+
+int ra_hwdec_mapper_map(struct ra_hwdec_mapper *mapper, struct mp_image *img)
+{
+ ra_hwdec_mapper_unmap(mapper);
+ mp_image_setrefp(&mapper->src, img);
+ if (mapper->driver->map(mapper) < 0) {
+ ra_hwdec_mapper_unmap(mapper);
+ return -1;
+ }
+ return 0;
+}
+
+static int ra_hwdec_validate_opt_full(struct mp_log *log, bool include_modes,
+ const m_option_t *opt,
+ struct bstr name, const char **value)
+{
+ struct bstr param = bstr0(*value);
+ bool help = bstr_equals0(param, "help");
+ if (help)
+ mp_info(log, "Available hwdecs:\n");
+ for (int n = 0; ra_hwdec_drivers[n]; n++) {
+ const struct ra_hwdec_driver *drv = ra_hwdec_drivers[n];
+ if (help) {
+ mp_info(log, " %s\n", drv->name);
+ } else if (bstr_equals0(param, drv->name)) {
+ return 1;
+ }
+ }
+ if (help) {
+ if (include_modes) {
+ mp_info(log, " auto (behavior depends on context)\n"
+ " all (load all hwdecs)\n"
+ " no (do not load any and block loading on demand)\n");
+ }
+ return M_OPT_EXIT;
+ }
+ if (!param.len)
+ return 1; // "" is treated specially
+ if (include_modes &&
+ (bstr_equals0(param, "all") || bstr_equals0(param, "auto") ||
+ bstr_equals0(param, "no")))
+ return 1;
+ mp_fatal(log, "No hwdec backend named '%.*s' found!\n", BSTR_P(param));
+ return M_OPT_INVALID;
+}
+
+int ra_hwdec_validate_opt(struct mp_log *log, const m_option_t *opt,
+ struct bstr name, const char **value)
+{
+ return ra_hwdec_validate_opt_full(log, true, opt, name, value);
+}
+
+int ra_hwdec_validate_drivers_only_opt(struct mp_log *log,
+ const m_option_t *opt,
+ struct bstr name, const char **value)
+{
+ return ra_hwdec_validate_opt_full(log, false, opt, name, value);
+}
+
+static void load_add_hwdec(struct ra_hwdec_ctx *ctx, struct mp_hwdec_devices *devs,
+ const struct ra_hwdec_driver *drv, bool is_auto)
+{
+ // Don't load duplicate hwdecs
+ for (int j = 0; j < ctx->num_hwdecs; j++) {
+ if (ctx->hwdecs[j]->driver == drv)
+ return;
+ }
+
+ struct ra_hwdec *hwdec =
+ ra_hwdec_load_driver(ctx->ra_ctx, ctx->log, ctx->global, devs, drv, is_auto);
+ if (hwdec)
+ MP_TARRAY_APPEND(NULL, ctx->hwdecs, ctx->num_hwdecs, hwdec);
+}
+
+static void load_hwdecs_all(struct ra_hwdec_ctx *ctx, struct mp_hwdec_devices *devs)
+{
+ if (!ctx->loading_done) {
+ for (int n = 0; ra_hwdec_drivers[n]; n++)
+ load_add_hwdec(ctx, devs, ra_hwdec_drivers[n], true);
+ ctx->loading_done = true;
+ }
+}
+
+void ra_hwdec_ctx_init(struct ra_hwdec_ctx *ctx, struct mp_hwdec_devices *devs,
+ const char *type, bool load_all_by_default)
+{
+ assert(ctx->ra_ctx);
+
+ /*
+ * By default, or if the option value is "auto", we will not pre-emptively
+ * load any interops, and instead allow them to be loaded on-demand.
+ *
+ * If the option value is "no", then no interops will be loaded now, and
+ * no interops will be loaded, even if requested later.
+ *
+ * If the option value is "all", then all interops will be loaded now, and
+ * obviously no interops will need to be loaded later.
+ *
+ * Finally, if a specific interop is requested, it will be loaded now, and
+ * other interops can be loaded, if requested later.
+ */
+ if (!type || !type[0] || strcmp(type, "auto") == 0) {
+ if (!load_all_by_default)
+ return;
+ type = "all";
+ }
+ if (strcmp(type, "no") == 0) {
+ // do nothing, just block further loading
+ } else if (strcmp(type, "all") == 0) {
+ load_hwdecs_all(ctx, devs);
+ } else {
+ for (int n = 0; ra_hwdec_drivers[n]; n++) {
+ const struct ra_hwdec_driver *drv = ra_hwdec_drivers[n];
+ if (strcmp(type, drv->name) == 0) {
+ load_add_hwdec(ctx, devs, drv, false);
+ break;
+ }
+ }
+ }
+ ctx->loading_done = true;
+}
+
+void ra_hwdec_ctx_uninit(struct ra_hwdec_ctx *ctx)
+{
+ for (int n = 0; n < ctx->num_hwdecs; n++)
+ ra_hwdec_uninit(ctx->hwdecs[n]);
+
+ talloc_free(ctx->hwdecs);
+ memset(ctx, 0, sizeof(*ctx));
+}
+
+void ra_hwdec_ctx_load_fmt(struct ra_hwdec_ctx *ctx, struct mp_hwdec_devices *devs,
+ struct hwdec_imgfmt_request *params)
+{
+ int imgfmt = params->imgfmt;
+ if (ctx->loading_done) {
+ /*
+ * If we previously marked interop loading as done (for reasons
+ * discussed above), then do not load any other interops regardless
+ * of imgfmt.
+ */
+ return;
+ }
+
+ if (imgfmt == IMGFMT_NONE) {
+ MP_VERBOSE(ctx, "Loading hwdec drivers for all formats\n");
+ load_hwdecs_all(ctx, devs);
+ return;
+ }
+
+ MP_VERBOSE(ctx, "Loading hwdec drivers for format: '%s'\n",
+ mp_imgfmt_to_name(imgfmt));
+ for (int i = 0; ra_hwdec_drivers[i]; i++) {
+ bool matched_fmt = false;
+ const struct ra_hwdec_driver *drv = ra_hwdec_drivers[i];
+ for (int j = 0; drv->imgfmts[j]; j++) {
+ if (imgfmt == drv->imgfmts[j]) {
+ matched_fmt = true;
+ break;
+ }
+ }
+ if (!matched_fmt) {
+ continue;
+ }
+
+ load_add_hwdec(ctx, devs, drv, params->probing);
+ }
+}
+
+struct ra_hwdec *ra_hwdec_get(struct ra_hwdec_ctx *ctx, int imgfmt)
+{
+ for (int n = 0; n < ctx->num_hwdecs; n++) {
+ if (ra_hwdec_test_format(ctx->hwdecs[n], imgfmt))
+ return ctx->hwdecs[n];
+ }
+
+ return NULL;
+}
+
+int ra_hwdec_driver_get_imgfmt_for_name(const char *name)
+{
+ for (int i = 0; ra_hwdec_drivers[i]; i++) {
+ if (!strcmp(ra_hwdec_drivers[i]->name, name)) {
+ return ra_hwdec_drivers[i]->imgfmts[0];
+ }
+ }
+ return IMGFMT_NONE;
+}
diff --git a/video/out/gpu/hwdec.h b/video/out/gpu/hwdec.h
new file mode 100644
index 0000000..7766073
--- /dev/null
+++ b/video/out/gpu/hwdec.h
@@ -0,0 +1,156 @@
+#ifndef MPGL_HWDEC_H_
+#define MPGL_HWDEC_H_
+
+#include "video/mp_image.h"
+#include "context.h"
+#include "ra.h"
+#include "video/hwdec.h"
+
+// Helper to organize/load hwdecs dynamically
+struct ra_hwdec_ctx {
+ // Set these before calling `ra_hwdec_ctx_init`
+ struct mp_log *log;
+ struct mpv_global *global;
+ struct ra_ctx *ra_ctx;
+
+ bool loading_done;
+ struct ra_hwdec **hwdecs;
+ int num_hwdecs;
+};
+
+int ra_hwdec_validate_opt(struct mp_log *log, const m_option_t *opt,
+ struct bstr name, const char **value);
+
+int ra_hwdec_validate_drivers_only_opt(struct mp_log *log,
+ const m_option_t *opt,
+ struct bstr name, const char **value);
+
+void ra_hwdec_ctx_init(struct ra_hwdec_ctx *ctx, struct mp_hwdec_devices *devs,
+ const char *opt, bool load_all_by_default);
+void ra_hwdec_ctx_uninit(struct ra_hwdec_ctx *ctx);
+
+void ra_hwdec_ctx_load_fmt(struct ra_hwdec_ctx *ctx, struct mp_hwdec_devices *devs,
+ struct hwdec_imgfmt_request *params);
+
+// Gets the right `ra_hwdec` for a format, if any
+struct ra_hwdec *ra_hwdec_get(struct ra_hwdec_ctx *ctx, int imgfmt);
+
+struct ra_hwdec {
+ const struct ra_hwdec_driver *driver;
+ struct mp_log *log;
+ struct mpv_global *global;
+ struct ra_ctx *ra_ctx;
+ struct mp_hwdec_devices *devs;
+ // GLSL extensions required to sample textures from this.
+ const char **glsl_extensions;
+ // For free use by hwdec driver
+ void *priv;
+ // For working around the vdpau vs. vaapi mess.
+ bool probing;
+ // Used in overlay mode only.
+ float overlay_colorkey[4];
+};
+
+struct ra_hwdec_mapper {
+ const struct ra_hwdec_mapper_driver *driver;
+ struct mp_log *log;
+ struct ra *ra;
+ void *priv;
+ struct ra_hwdec *owner;
+ // Input frame parameters. (Set before init(), immutable.)
+ struct mp_image_params src_params;
+ // Output frame parameters (represents the format the textures return). Must
+ // be set by init(), immutable afterwards,
+ struct mp_image_params dst_params;
+
+ // The currently mapped source image (or the image about to be mapped in
+ // ->map()). NULL if unmapped. The mapper can also clear this reference if
+ // the mapped textures contain a full copy.
+ struct mp_image *src;
+
+ // The mapped textures and metadata about them. These fields change if a
+ // new frame is mapped (or unmapped), but otherwise remain constant.
+ // The common code won't mess with these, so you can e.g. set them in the
+ // .init() callback.
+ struct ra_tex *tex[4];
+};
+
+// This can be used to map frames of a specific hw format as GL textures.
+struct ra_hwdec_mapper_driver {
+ // Used to create ra_hwdec_mapper.priv.
+ size_t priv_size;
+
+ // Init the mapper implementation. At this point, the field src_params,
+ // fns, devs, priv are initialized.
+ int (*init)(struct ra_hwdec_mapper *mapper);
+ // Destroy the mapper. unmap is called before this.
+ void (*uninit)(struct ra_hwdec_mapper *mapper);
+
+ // Map mapper->src as texture, and set mapper->frame to textures using it.
+ // It is expected that the textures remain valid until the next unmap
+ // or uninit call.
+ // The function is allowed to unref mapper->src if it's not needed (i.e.
+ // this function creates a copy).
+ // The underlying format can change, so you might need to do some form
+ // of change detection. You also must reject unsupported formats with an
+ // error.
+ // On error, returns negative value on error and remains unmapped.
+ int (*map)(struct ra_hwdec_mapper *mapper);
+ // Unmap the frame. Does nothing if already unmapped. Optional.
+ void (*unmap)(struct ra_hwdec_mapper *mapper);
+};
+
+struct ra_hwdec_driver {
+ // Name of the interop backend. This is used for informational purposes and
+ // for use with debugging options.
+ const char *name;
+ // Used to create ra_hwdec.priv.
+ size_t priv_size;
+ // One of the hardware surface IMGFMT_ that must be passed to map_image later.
+ // Terminated with a 0 entry. (Extend the array size as needed.)
+ const int imgfmts[3];
+
+ // Create the hwdec device. It must add it to hw->devs, if applicable.
+ int (*init)(struct ra_hwdec *hw);
+ void (*uninit)(struct ra_hwdec *hw);
+
+ // This will be used to create a ra_hwdec_mapper from ra_hwdec.
+ const struct ra_hwdec_mapper_driver *mapper;
+
+ // The following function provides an alternative API. Each ra_hwdec_driver
+ // must have either provide a mapper or overlay_frame (not both or none), and
+ // if overlay_frame is set, it operates in overlay mode. In this mode,
+ // OSD etc. is rendered via OpenGL, but the video is rendered as a separate
+ // layer below it.
+ // Non-overlay mode is strictly preferred, so try not to use overlay mode.
+ // Set the given frame as overlay, replacing the previous one. This can also
+ // just change the position of the overlay.
+ // hw_image==src==dst==NULL is passed to clear the overlay.
+ int (*overlay_frame)(struct ra_hwdec *hw, struct mp_image *hw_image,
+ struct mp_rect *src, struct mp_rect *dst, bool newframe);
+};
+
+extern const struct ra_hwdec_driver *const ra_hwdec_drivers[];
+
+struct ra_hwdec *ra_hwdec_load_driver(struct ra_ctx *ra_ctx,
+ struct mp_log *log,
+ struct mpv_global *global,
+ struct mp_hwdec_devices *devs,
+ const struct ra_hwdec_driver *drv,
+ bool is_auto);
+
+void ra_hwdec_uninit(struct ra_hwdec *hwdec);
+
+bool ra_hwdec_test_format(struct ra_hwdec *hwdec, int imgfmt);
+
+struct ra_hwdec_mapper *ra_hwdec_mapper_create(struct ra_hwdec *hwdec,
+ const struct mp_image_params *params);
+void ra_hwdec_mapper_free(struct ra_hwdec_mapper **mapper);
+void ra_hwdec_mapper_unmap(struct ra_hwdec_mapper *mapper);
+int ra_hwdec_mapper_map(struct ra_hwdec_mapper *mapper, struct mp_image *img);
+
+// Get the primary image format for the given driver name.
+// Returns IMGFMT_NONE if the name doesn't get matched.
+int ra_hwdec_driver_get_imgfmt_for_name(const char *name);
+
+#endif
diff --git a/video/out/gpu/lcms.c b/video/out/gpu/lcms.c
new file mode 100644
index 0000000..7006a96
--- /dev/null
+++ b/video/out/gpu/lcms.c
@@ -0,0 +1,526 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <string.h>
+#include <math.h>
+
+#include "mpv_talloc.h"
+
+#include "config.h"
+
+#include "stream/stream.h"
+#include "common/common.h"
+#include "misc/bstr.h"
+#include "common/msg.h"
+#include "options/m_option.h"
+#include "options/path.h"
+#include "video/csputils.h"
+#include "lcms.h"
+
+#include "osdep/io.h"
+
+#if HAVE_LCMS2
+
+#include <lcms2.h>
+#include <libavutil/sha.h>
+#include <libavutil/mem.h>
+
+struct gl_lcms {
+ void *icc_data;
+ size_t icc_size;
+ struct AVBufferRef *vid_profile;
+ char *current_profile;
+ bool using_memory_profile;
+ bool changed;
+ enum mp_csp_prim current_prim;
+ enum mp_csp_trc current_trc;
+
+ struct mp_log *log;
+ struct mpv_global *global;
+ struct mp_icc_opts *opts;
+};
+
+static void lcms2_error_handler(cmsContext ctx, cmsUInt32Number code,
+ const char *msg)
+{
+ struct gl_lcms *p = cmsGetContextUserData(ctx);
+ MP_ERR(p, "lcms2: %s\n", msg);
+}
+
+static void load_profile(struct gl_lcms *p)
+{
+ talloc_free(p->icc_data);
+ p->icc_data = NULL;
+ p->icc_size = 0;
+ p->using_memory_profile = false;
+ talloc_free(p->current_profile);
+ p->current_profile = NULL;
+
+ if (!p->opts->profile || !p->opts->profile[0])
+ return;
+
+ char *fname = mp_get_user_path(NULL, p->global, p->opts->profile);
+ MP_VERBOSE(p, "Opening ICC profile '%s'\n", fname);
+ struct bstr iccdata = stream_read_file(fname, p, p->global,
+ 100000000); // 100 MB
+ talloc_free(fname);
+ if (!iccdata.len)
+ return;
+
+ talloc_free(p->icc_data);
+
+ p->icc_data = iccdata.start;
+ p->icc_size = iccdata.len;
+ p->current_profile = talloc_strdup(p, p->opts->profile);
+}
+
+static void gl_lcms_destructor(void *ptr)
+{
+ struct gl_lcms *p = ptr;
+ av_buffer_unref(&p->vid_profile);
+}
+
+struct gl_lcms *gl_lcms_init(void *talloc_ctx, struct mp_log *log,
+ struct mpv_global *global,
+ struct mp_icc_opts *opts)
+{
+ struct gl_lcms *p = talloc_ptrtype(talloc_ctx, p);
+ talloc_set_destructor(p, gl_lcms_destructor);
+ *p = (struct gl_lcms) {
+ .global = global,
+ .log = log,
+ .opts = opts,
+ };
+ gl_lcms_update_options(p);
+ return p;
+}
+
+void gl_lcms_update_options(struct gl_lcms *p)
+{
+ if ((p->using_memory_profile && !p->opts->profile_auto) ||
+ !bstr_equals(bstr0(p->opts->profile), bstr0(p->current_profile)))
+ {
+ load_profile(p);
+ }
+
+ p->changed = true; // probably
+}
+
+// Warning: profile.start must point to a ta allocation, and the function
+// takes over ownership.
+// Returns whether the internal profile was changed.
+bool gl_lcms_set_memory_profile(struct gl_lcms *p, bstr profile)
+{
+ if (!p->opts->profile_auto || (p->opts->profile && p->opts->profile[0])) {
+ talloc_free(profile.start);
+ return false;
+ }
+
+ if (p->using_memory_profile &&
+ p->icc_data && profile.start &&
+ profile.len == p->icc_size &&
+ memcmp(profile.start, p->icc_data, p->icc_size) == 0)
+ {
+ talloc_free(profile.start);
+ return false;
+ }
+
+ p->changed = true;
+ p->using_memory_profile = true;
+
+ talloc_free(p->icc_data);
+
+ p->icc_data = talloc_steal(p, profile.start);
+ p->icc_size = profile.len;
+
+ return true;
+}
+
+// Guards against NULL and uses bstr_equals to short-circuit some special cases
+static bool vid_profile_eq(struct AVBufferRef *a, struct AVBufferRef *b)
+{
+ if (!a || !b)
+ return a == b;
+
+ return bstr_equals((struct bstr){ a->data, a->size },
+ (struct bstr){ b->data, b->size });
+}
+
+// Return whether the profile or config has changed since the last time it was
+// retrieved. If it has changed, gl_lcms_get_lut3d() should be called.
+bool gl_lcms_has_changed(struct gl_lcms *p, enum mp_csp_prim prim,
+ enum mp_csp_trc trc, struct AVBufferRef *vid_profile)
+{
+ if (p->changed || p->current_prim != prim || p->current_trc != trc)
+ return true;
+
+ return !vid_profile_eq(p->vid_profile, vid_profile);
+}
+
+// Whether a profile is set. (gl_lcms_get_lut3d() is expected to return a lut,
+// but it could still fail due to runtime errors, such as invalid icc data.)
+bool gl_lcms_has_profile(struct gl_lcms *p)
+{
+ return p->icc_size > 0;
+}
+
+static cmsHPROFILE get_vid_profile(struct gl_lcms *p, cmsContext cms,
+ cmsHPROFILE disp_profile,
+ enum mp_csp_prim prim, enum mp_csp_trc trc)
+{
+ if (p->opts->use_embedded && p->vid_profile) {
+ // Try using the embedded ICC profile
+ cmsHPROFILE prof = cmsOpenProfileFromMemTHR(cms, p->vid_profile->data,
+ p->vid_profile->size);
+ if (prof) {
+ MP_VERBOSE(p, "Successfully opened embedded ICC profile\n");
+ return prof;
+ }
+
+ // Otherwise, warn the user and generate the profile as usual
+ MP_WARN(p, "Video contained an invalid ICC profile! Ignoring...\n");
+ }
+
+ // The input profile for the transformation is dependent on the video
+ // primaries and transfer characteristics
+ struct mp_csp_primaries csp = mp_get_csp_primaries(prim);
+ cmsCIExyY wp_xyY = {csp.white.x, csp.white.y, 1.0};
+ cmsCIExyYTRIPLE prim_xyY = {
+ .Red = {csp.red.x, csp.red.y, 1.0},
+ .Green = {csp.green.x, csp.green.y, 1.0},
+ .Blue = {csp.blue.x, csp.blue.y, 1.0},
+ };
+
+ cmsToneCurve *tonecurve[3] = {0};
+ switch (trc) {
+ case MP_CSP_TRC_LINEAR: tonecurve[0] = cmsBuildGamma(cms, 1.0); break;
+ case MP_CSP_TRC_GAMMA18: tonecurve[0] = cmsBuildGamma(cms, 1.8); break;
+ case MP_CSP_TRC_GAMMA20: tonecurve[0] = cmsBuildGamma(cms, 2.0); break;
+ case MP_CSP_TRC_GAMMA22: tonecurve[0] = cmsBuildGamma(cms, 2.2); break;
+ case MP_CSP_TRC_GAMMA24: tonecurve[0] = cmsBuildGamma(cms, 2.4); break;
+ case MP_CSP_TRC_GAMMA26: tonecurve[0] = cmsBuildGamma(cms, 2.6); break;
+ case MP_CSP_TRC_GAMMA28: tonecurve[0] = cmsBuildGamma(cms, 2.8); break;
+
+ case MP_CSP_TRC_SRGB:
+ // Values copied from Little-CMS
+ tonecurve[0] = cmsBuildParametricToneCurve(cms, 4,
+ (double[5]){2.40, 1/1.055, 0.055/1.055, 1/12.92, 0.04045});
+ break;
+
+ case MP_CSP_TRC_PRO_PHOTO:
+ tonecurve[0] = cmsBuildParametricToneCurve(cms, 4,
+ (double[5]){1.8, 1.0, 0.0, 1/16.0, 0.03125});
+ break;
+
+ case MP_CSP_TRC_BT_1886: {
+ double src_black[3];
+ if (p->opts->contrast < 0) {
+ // User requested infinite contrast, return 2.4 profile
+ tonecurve[0] = cmsBuildGamma(cms, 2.4);
+ break;
+ } else if (p->opts->contrast > 0) {
+ MP_VERBOSE(p, "Using specified contrast: %d\n", p->opts->contrast);
+ for (int i = 0; i < 3; i++)
+ src_black[i] = 1.0 / p->opts->contrast;
+ } else {
+ // To build an appropriate BT.1886 transformation we need access to
+ // the display's black point, so we use LittleCMS' detection
+ // function. Relative colorimetric is used since we want to
+ // approximate the BT.1886 to the target device's actual black
+ // point even in e.g. perceptual mode
+ const int intent = MP_INTENT_RELATIVE_COLORIMETRIC;
+ cmsCIEXYZ bp_XYZ;
+ if (!cmsDetectBlackPoint(&bp_XYZ, disp_profile, intent, 0))
+ return false;
+
+ // Map this XYZ value back into the (linear) source space
+ cmsHPROFILE rev_profile;
+ cmsToneCurve *linear = cmsBuildGamma(cms, 1.0);
+ rev_profile = cmsCreateRGBProfileTHR(cms, &wp_xyY, &prim_xyY,
+ (cmsToneCurve*[3]){linear, linear, linear});
+ cmsHPROFILE xyz_profile = cmsCreateXYZProfile();
+ cmsHTRANSFORM xyz2src = cmsCreateTransformTHR(cms,
+ xyz_profile, TYPE_XYZ_DBL, rev_profile, TYPE_RGB_DBL,
+ intent, cmsFLAGS_NOCACHE | cmsFLAGS_NOOPTIMIZE);
+ cmsFreeToneCurve(linear);
+ cmsCloseProfile(rev_profile);
+ cmsCloseProfile(xyz_profile);
+ if (!xyz2src)
+ return false;
+
+ cmsDoTransform(xyz2src, &bp_XYZ, src_black, 1);
+ cmsDeleteTransform(xyz2src);
+
+ double contrast = 3.0 / (src_black[0] + src_black[1] + src_black[2]);
+ MP_VERBOSE(p, "Detected ICC profile contrast: %f\n", contrast);
+ }
+
+ // Build the parametric BT.1886 transfer curve, one per channel
+ for (int i = 0; i < 3; i++) {
+ const double gamma = 2.40;
+ double binv = pow(src_black[i], 1.0/gamma);
+ tonecurve[i] = cmsBuildParametricToneCurve(cms, 6,
+ (double[4]){gamma, 1.0 - binv, binv, 0.0});
+ }
+ break;
+ }
+
+ default:
+ abort();
+ }
+
+ if (!tonecurve[0])
+ return false;
+
+ if (!tonecurve[1]) tonecurve[1] = tonecurve[0];
+ if (!tonecurve[2]) tonecurve[2] = tonecurve[0];
+
+ cmsHPROFILE *vid_profile = cmsCreateRGBProfileTHR(cms, &wp_xyY, &prim_xyY,
+ tonecurve);
+
+ if (tonecurve[2] != tonecurve[0]) cmsFreeToneCurve(tonecurve[2]);
+ if (tonecurve[1] != tonecurve[0]) cmsFreeToneCurve(tonecurve[1]);
+ cmsFreeToneCurve(tonecurve[0]);
+
+ return vid_profile;
+}
+
+bool gl_lcms_get_lut3d(struct gl_lcms *p, struct lut3d **result_lut3d,
+ enum mp_csp_prim prim, enum mp_csp_trc trc,
+ struct AVBufferRef *vid_profile)
+{
+ int s_r, s_g, s_b;
+ bool result = false;
+
+ p->changed = false;
+ p->current_prim = prim;
+ p->current_trc = trc;
+
+ // We need to hold on to a reference to the video's ICC profile for as long
+ // as we still need to perform equality checking, so generate a new
+ // reference here
+ av_buffer_unref(&p->vid_profile);
+ if (vid_profile) {
+ MP_VERBOSE(p, "Got an embedded ICC profile.\n");
+ p->vid_profile = av_buffer_ref(vid_profile);
+ MP_HANDLE_OOM(p->vid_profile);
+ }
+
+ if (!gl_parse_3dlut_size(p->opts->size_str, &s_r, &s_g, &s_b))
+ return false;
+
+ if (!gl_lcms_has_profile(p))
+ return false;
+
+ // For simplicity, default to 65x65x65, which is large enough to cover
+ // typical profiles with good accuracy while not being too wasteful
+ s_r = s_r ? s_r : 65;
+ s_g = s_g ? s_g : 65;
+ s_b = s_b ? s_b : 65;
+
+ void *tmp = talloc_new(NULL);
+ uint16_t *output = talloc_array(tmp, uint16_t, s_r * s_g * s_b * 4);
+ struct lut3d *lut = NULL;
+ cmsContext cms = NULL;
+
+ char *cache_file = NULL;
+ if (p->opts->cache) {
+ // Gamma is included in the header to help uniquely identify it,
+ // because we may change the parameter in the future or make it
+ // customizable, same for the primaries.
+ char *cache_info = talloc_asprintf(tmp,
+ "ver=1.4, intent=%d, size=%dx%dx%d, prim=%d, trc=%d, "
+ "contrast=%d\n",
+ p->opts->intent, s_r, s_g, s_b, prim, trc, p->opts->contrast);
+
+ uint8_t hash[32];
+ struct AVSHA *sha = av_sha_alloc();
+ MP_HANDLE_OOM(sha);
+ av_sha_init(sha, 256);
+ av_sha_update(sha, cache_info, strlen(cache_info));
+ if (vid_profile)
+ av_sha_update(sha, vid_profile->data, vid_profile->size);
+ av_sha_update(sha, p->icc_data, p->icc_size);
+ av_sha_final(sha, hash);
+ av_free(sha);
+
+ char *cache_dir = p->opts->cache_dir;
+ if (cache_dir && cache_dir[0]) {
+ cache_dir = mp_get_user_path(tmp, p->global, cache_dir);
+ } else {
+ cache_dir = mp_find_user_file(tmp, p->global, "cache", "");
+ }
+
+ if (cache_dir && cache_dir[0]) {
+ cache_file = talloc_strdup(tmp, "");
+ for (int i = 0; i < sizeof(hash); i++)
+ cache_file = talloc_asprintf_append(cache_file, "%02X", hash[i]);
+ cache_file = mp_path_join(tmp, cache_dir, cache_file);
+ mp_mkdirp(cache_dir);
+ }
+ }
+
+ // check cache
+ if (cache_file && stat(cache_file, &(struct stat){0}) == 0) {
+ MP_VERBOSE(p, "Opening 3D LUT cache in file '%s'.\n", cache_file);
+ struct bstr cachedata = stream_read_file(cache_file, tmp, p->global,
+ 1000000000); // 1 GB
+ if (cachedata.len == talloc_get_size(output)) {
+ memcpy(output, cachedata.start, cachedata.len);
+ goto done;
+ } else {
+ MP_WARN(p, "3D LUT cache invalid!\n");
+ }
+ }
+
+ cms = cmsCreateContext(NULL, p);
+ if (!cms)
+ goto error_exit;
+ cmsSetLogErrorHandlerTHR(cms, lcms2_error_handler);
+
+ cmsHPROFILE profile =
+ cmsOpenProfileFromMemTHR(cms, p->icc_data, p->icc_size);
+ if (!profile)
+ goto error_exit;
+
+ cmsHPROFILE vid_hprofile = get_vid_profile(p, cms, profile, prim, trc);
+ if (!vid_hprofile) {
+ cmsCloseProfile(profile);
+ goto error_exit;
+ }
+
+ cmsHTRANSFORM trafo = cmsCreateTransformTHR(cms, vid_hprofile, TYPE_RGB_16,
+ profile, TYPE_RGBA_16,
+ p->opts->intent,
+ cmsFLAGS_NOCACHE |
+ cmsFLAGS_NOOPTIMIZE |
+ cmsFLAGS_BLACKPOINTCOMPENSATION);
+ cmsCloseProfile(profile);
+ cmsCloseProfile(vid_hprofile);
+
+ if (!trafo)
+ goto error_exit;
+
+ // transform a (s_r)x(s_g)x(s_b) cube, with 3 components per channel
+ uint16_t *input = talloc_array(tmp, uint16_t, s_r * 3);
+ for (int b = 0; b < s_b; b++) {
+ for (int g = 0; g < s_g; g++) {
+ for (int r = 0; r < s_r; r++) {
+ input[r * 3 + 0] = r * 65535 / (s_r - 1);
+ input[r * 3 + 1] = g * 65535 / (s_g - 1);
+ input[r * 3 + 2] = b * 65535 / (s_b - 1);
+ }
+ size_t base = (b * s_r * s_g + g * s_r) * 4;
+ cmsDoTransform(trafo, input, output + base, s_r);
+ }
+ }
+
+ cmsDeleteTransform(trafo);
+
+ if (cache_file) {
+ FILE *out = fopen(cache_file, "wb");
+ if (out) {
+ fwrite(output, talloc_get_size(output), 1, out);
+ fclose(out);
+ }
+ }
+
+done: ;
+
+ lut = talloc_ptrtype(NULL, lut);
+ *lut = (struct lut3d) {
+ .data = talloc_steal(lut, output),
+ .size = {s_r, s_g, s_b},
+ };
+
+ *result_lut3d = lut;
+ result = true;
+
+error_exit:
+
+ if (cms)
+ cmsDeleteContext(cms);
+
+ if (!lut)
+ MP_FATAL(p, "Error loading ICC profile.\n");
+
+ talloc_free(tmp);
+ return result;
+}
+
+#else /* HAVE_LCMS2 */
+
+struct gl_lcms *gl_lcms_init(void *talloc_ctx, struct mp_log *log,
+ struct mpv_global *global,
+ struct mp_icc_opts *opts)
+{
+ return (struct gl_lcms *) talloc_new(talloc_ctx);
+}
+
+void gl_lcms_update_options(struct gl_lcms *p) { }
+bool gl_lcms_set_memory_profile(struct gl_lcms *p, bstr profile) {return false;}
+
+bool gl_lcms_has_changed(struct gl_lcms *p, enum mp_csp_prim prim,
+ enum mp_csp_trc trc, struct AVBufferRef *vid_profile)
+{
+ return false;
+}
+
+bool gl_lcms_has_profile(struct gl_lcms *p)
+{
+ return false;
+}
+
+bool gl_lcms_get_lut3d(struct gl_lcms *p, struct lut3d **result_lut3d,
+ enum mp_csp_prim prim, enum mp_csp_trc trc,
+ struct AVBufferRef *vid_profile)
+{
+ return false;
+}
+
+#endif
+
+static int validate_3dlut_size_opt(struct mp_log *log, const m_option_t *opt,
+ struct bstr name, const char **value)
+{
+ int p1, p2, p3;
+ return gl_parse_3dlut_size(*value, &p1, &p2, &p3) ? 0 : M_OPT_INVALID;
+}
+
+#define OPT_BASE_STRUCT struct mp_icc_opts
+const struct m_sub_options mp_icc_conf = {
+ .opts = (const m_option_t[]) {
+ {"use-embedded-icc-profile", OPT_BOOL(use_embedded)},
+ {"icc-profile", OPT_STRING(profile), .flags = M_OPT_FILE},
+ {"icc-profile-auto", OPT_BOOL(profile_auto)},
+ {"icc-cache", OPT_BOOL(cache)},
+ {"icc-cache-dir", OPT_STRING(cache_dir), .flags = M_OPT_FILE},
+ {"icc-intent", OPT_INT(intent)},
+ {"icc-force-contrast", OPT_CHOICE(contrast, {"no", 0}, {"inf", -1}),
+ M_RANGE(0, 1000000)},
+ {"icc-3dlut-size", OPT_STRING_VALIDATE(size_str, validate_3dlut_size_opt)},
+ {"icc-use-luma", OPT_BOOL(icc_use_luma)},
+ {0}
+ },
+ .size = sizeof(struct mp_icc_opts),
+ .defaults = &(const struct mp_icc_opts) {
+ .size_str = "auto",
+ .intent = MP_INTENT_RELATIVE_COLORIMETRIC,
+ .use_embedded = true,
+ .cache = true,
+ },
+};
diff --git a/video/out/gpu/lcms.h b/video/out/gpu/lcms.h
new file mode 100644
index 0000000..607353a
--- /dev/null
+++ b/video/out/gpu/lcms.h
@@ -0,0 +1,61 @@
+#ifndef MP_GL_LCMS_H
+#define MP_GL_LCMS_H
+
+#include <stddef.h>
+#include <stdbool.h>
+#include "misc/bstr.h"
+#include "video/csputils.h"
+#include <libavutil/buffer.h>
+
+extern const struct m_sub_options mp_icc_conf;
+
+struct mp_icc_opts {
+ bool use_embedded;
+ char *profile;
+ bool profile_auto;
+ bool cache;
+ char *cache_dir;
+ char *size_str;
+ int intent;
+ int contrast;
+ bool icc_use_luma;
+};
+
+struct lut3d {
+ uint16_t *data;
+ int size[3];
+};
+
+struct mp_log;
+struct mpv_global;
+struct gl_lcms;
+
+struct gl_lcms *gl_lcms_init(void *talloc_ctx, struct mp_log *log,
+ struct mpv_global *global,
+ struct mp_icc_opts *opts);
+void gl_lcms_update_options(struct gl_lcms *p);
+bool gl_lcms_set_memory_profile(struct gl_lcms *p, bstr profile);
+bool gl_lcms_has_profile(struct gl_lcms *p);
+bool gl_lcms_get_lut3d(struct gl_lcms *p, struct lut3d **,
+ enum mp_csp_prim prim, enum mp_csp_trc trc,
+ struct AVBufferRef *vid_profile);
+bool gl_lcms_has_changed(struct gl_lcms *p, enum mp_csp_prim prim,
+ enum mp_csp_trc trc, struct AVBufferRef *vid_profile);
+
+static inline bool gl_parse_3dlut_size(const char *arg, int *p1, int *p2, int *p3)
+{
+ if (!strcmp(arg, "auto")) {
+ *p1 = *p2 = *p3 = 0;
+ return true;
+ }
+ if (sscanf(arg, "%dx%dx%d", p1, p2, p3) != 3)
+ return false;
+ for (int n = 0; n < 3; n++) {
+ int s = ((int[]) { *p1, *p2, *p3 })[n];
+ if (s < 2 || s > 512)
+ return false;
+ }
+ return true;
+}
+
+#endif
diff --git a/video/out/gpu/libmpv_gpu.c b/video/out/gpu/libmpv_gpu.c
new file mode 100644
index 0000000..aae1d18
--- /dev/null
+++ b/video/out/gpu/libmpv_gpu.c
@@ -0,0 +1,248 @@
+#include "config.h"
+#include "hwdec.h"
+#include "libmpv_gpu.h"
+#include "libmpv/render_gl.h"
+#include "video.h"
+#include "video/out/libmpv.h"
+
+static const struct libmpv_gpu_context_fns *context_backends[] = {
+#if HAVE_GL
+ &libmpv_gpu_context_gl,
+#endif
+ NULL
+};
+
+struct priv {
+ struct libmpv_gpu_context *context;
+
+ struct gl_video *renderer;
+};
+
+struct native_resource_entry {
+ const char *name; // ra_add_native_resource() internal name argument
+ size_t size; // size of struct pointed to (0 for no copy)
+};
+
+static const struct native_resource_entry native_resource_map[] = {
+ [MPV_RENDER_PARAM_X11_DISPLAY] = {
+ .name = "x11",
+ .size = 0,
+ },
+ [MPV_RENDER_PARAM_WL_DISPLAY] = {
+ .name = "wl",
+ .size = 0,
+ },
+ [MPV_RENDER_PARAM_DRM_DRAW_SURFACE_SIZE] = {
+ .name = "drm_draw_surface_size",
+ .size = sizeof (mpv_opengl_drm_draw_surface_size),
+ },
+ [MPV_RENDER_PARAM_DRM_DISPLAY_V2] = {
+ .name = "drm_params_v2",
+ .size = sizeof (mpv_opengl_drm_params_v2),
+ },
+};
+
+static int init(struct render_backend *ctx, mpv_render_param *params)
+{
+ ctx->priv = talloc_zero(NULL, struct priv);
+ struct priv *p = ctx->priv;
+
+ char *api = get_mpv_render_param(params, MPV_RENDER_PARAM_API_TYPE, NULL);
+ if (!api)
+ return MPV_ERROR_INVALID_PARAMETER;
+
+ for (int n = 0; context_backends[n]; n++) {
+ const struct libmpv_gpu_context_fns *backend = context_backends[n];
+ if (strcmp(backend->api_name, api) == 0) {
+ p->context = talloc_zero(NULL, struct libmpv_gpu_context);
+ *p->context = (struct libmpv_gpu_context){
+ .global = ctx->global,
+ .log = ctx->log,
+ .fns = backend,
+ };
+ break;
+ }
+ }
+
+ if (!p->context)
+ return MPV_ERROR_NOT_IMPLEMENTED;
+
+ int err = p->context->fns->init(p->context, params);
+ if (err < 0)
+ return err;
+
+ for (int n = 0; params && params[n].type; n++) {
+ if (params[n].type > 0 &&
+ params[n].type < MP_ARRAY_SIZE(native_resource_map) &&
+ native_resource_map[params[n].type].name)
+ {
+ const struct native_resource_entry *entry =
+ &native_resource_map[params[n].type];
+ void *data = params[n].data;
+ if (entry->size)
+ data = talloc_memdup(p, data, entry->size);
+ ra_add_native_resource(p->context->ra_ctx->ra, entry->name, data);
+ }
+ }
+
+ p->renderer = gl_video_init(p->context->ra_ctx->ra, ctx->log, ctx->global);
+
+ ctx->hwdec_devs = hwdec_devices_create();
+ gl_video_init_hwdecs(p->renderer, p->context->ra_ctx, ctx->hwdec_devs, true);
+ ctx->driver_caps = VO_CAP_ROTATE90;
+ return 0;
+}
+
+static bool check_format(struct render_backend *ctx, int imgfmt)
+{
+ struct priv *p = ctx->priv;
+
+ return gl_video_check_format(p->renderer, imgfmt);
+}
+
+static int set_parameter(struct render_backend *ctx, mpv_render_param param)
+{
+ struct priv *p = ctx->priv;
+
+ switch (param.type) {
+ case MPV_RENDER_PARAM_ICC_PROFILE: {
+ mpv_byte_array *data = param.data;
+ gl_video_set_icc_profile(p->renderer, (bstr){data->data, data->size});
+ return 0;
+ }
+ case MPV_RENDER_PARAM_AMBIENT_LIGHT: {
+ int lux = *(int *)param.data;
+ gl_video_set_ambient_lux(p->renderer, lux);
+ return 0;
+ }
+ default:
+ return MPV_ERROR_NOT_IMPLEMENTED;
+ }
+}
+
+static void reconfig(struct render_backend *ctx, struct mp_image_params *params)
+{
+ struct priv *p = ctx->priv;
+
+ gl_video_config(p->renderer, params);
+}
+
+static void reset(struct render_backend *ctx)
+{
+ struct priv *p = ctx->priv;
+
+ gl_video_reset(p->renderer);
+}
+
+static void update_external(struct render_backend *ctx, struct vo *vo)
+{
+ struct priv *p = ctx->priv;
+
+ gl_video_set_osd_source(p->renderer, vo ? vo->osd : NULL);
+ if (vo)
+ gl_video_configure_queue(p->renderer, vo);
+}
+
+static void resize(struct render_backend *ctx, struct mp_rect *src,
+ struct mp_rect *dst, struct mp_osd_res *osd)
+{
+ struct priv *p = ctx->priv;
+
+ gl_video_resize(p->renderer, src, dst, osd);
+}
+
+static int get_target_size(struct render_backend *ctx, mpv_render_param *params,
+ int *out_w, int *out_h)
+{
+ struct priv *p = ctx->priv;
+
+ // Mapping the surface is cheap, better than adding new backend entrypoints.
+ struct ra_tex *tex;
+ int err = p->context->fns->wrap_fbo(p->context, params, &tex);
+ if (err < 0)
+ return err;
+ *out_w = tex->params.w;
+ *out_h = tex->params.h;
+ return 0;
+}
+
+static int render(struct render_backend *ctx, mpv_render_param *params,
+ struct vo_frame *frame)
+{
+ struct priv *p = ctx->priv;
+
+ // Mapping the surface is cheap, better than adding new backend entrypoints.
+ struct ra_tex *tex;
+ int err = p->context->fns->wrap_fbo(p->context, params, &tex);
+ if (err < 0)
+ return err;
+
+ int depth = *(int *)get_mpv_render_param(params, MPV_RENDER_PARAM_DEPTH,
+ &(int){0});
+ gl_video_set_fb_depth(p->renderer, depth);
+
+ bool flip = *(int *)get_mpv_render_param(params, MPV_RENDER_PARAM_FLIP_Y,
+ &(int){0});
+
+ struct ra_fbo target = {.tex = tex, .flip = flip};
+ gl_video_render_frame(p->renderer, frame, target, RENDER_FRAME_DEF);
+ p->context->fns->done_frame(p->context, frame->display_synced);
+
+ return 0;
+}
+
+static struct mp_image *get_image(struct render_backend *ctx, int imgfmt,
+ int w, int h, int stride_align, int flags)
+{
+ struct priv *p = ctx->priv;
+
+ return gl_video_get_image(p->renderer, imgfmt, w, h, stride_align, flags);
+}
+
+static void screenshot(struct render_backend *ctx, struct vo_frame *frame,
+ struct voctrl_screenshot *args)
+{
+ struct priv *p = ctx->priv;
+
+ gl_video_screenshot(p->renderer, frame, args);
+}
+
+static void perfdata(struct render_backend *ctx,
+ struct voctrl_performance_data *out)
+{
+ struct priv *p = ctx->priv;
+
+ gl_video_perfdata(p->renderer, out);
+}
+
+static void destroy(struct render_backend *ctx)
+{
+ struct priv *p = ctx->priv;
+
+ if (p->renderer)
+ gl_video_uninit(p->renderer);
+
+ hwdec_devices_destroy(ctx->hwdec_devs);
+
+ if (p->context) {
+ p->context->fns->destroy(p->context);
+ talloc_free(p->context->priv);
+ talloc_free(p->context);
+ }
+}
+
+const struct render_backend_fns render_backend_gpu = {
+ .init = init,
+ .check_format = check_format,
+ .set_parameter = set_parameter,
+ .reconfig = reconfig,
+ .reset = reset,
+ .update_external = update_external,
+ .resize = resize,
+ .get_target_size = get_target_size,
+ .render = render,
+ .get_image = get_image,
+ .screenshot = screenshot,
+ .perfdata = perfdata,
+ .destroy = destroy,
+};
diff --git a/video/out/gpu/libmpv_gpu.h b/video/out/gpu/libmpv_gpu.h
new file mode 100644
index 0000000..497dcc3
--- /dev/null
+++ b/video/out/gpu/libmpv_gpu.h
@@ -0,0 +1,40 @@
+#pragma once
+
+#include "video/out/libmpv.h"
+
+struct ra_tex;
+
+struct libmpv_gpu_context {
+ struct mpv_global *global;
+ struct mp_log *log;
+ const struct libmpv_gpu_context_fns *fns;
+
+ struct ra_ctx *ra_ctx;
+ void *priv;
+};
+
+// Manage backend specific interaction between libmpv and ra backend, that can't
+// be managed by ra itself (initialization and passing FBOs).
+struct libmpv_gpu_context_fns {
+ // The libmpv API type name, see MPV_RENDER_PARAM_API_TYPE.
+ const char *api_name;
+ // Pretty much works like render_backend_fns.init, except that the
+ // API type is already checked by the caller.
+ // Successful init must set ctx->ra.
+ int (*init)(struct libmpv_gpu_context *ctx, mpv_render_param *params);
+ // Wrap the surface passed to mpv_render_context_render() (via the params
+ // array) into a ra_tex and return it. Returns a libmpv error code, and sets
+ // *out to a temporary object on success. The returned object is valid until
+ // another wrap_fbo() or done_frame() is called.
+ // This does not need to care about generic attributes, like flipping.
+ int (*wrap_fbo)(struct libmpv_gpu_context *ctx, mpv_render_param *params,
+ struct ra_tex **out);
+ // Signal that the ra_tex object obtained with wrap_fbo is no longer used.
+ // For certain backends, this might also be used to signal the end of
+ // rendering (like OpenGL doing weird crap).
+ void (*done_frame)(struct libmpv_gpu_context *ctx, bool ds);
+ // Free all data in ctx->priv.
+ void (*destroy)(struct libmpv_gpu_context *ctx);
+};
+
+extern const struct libmpv_gpu_context_fns libmpv_gpu_context_gl;
diff --git a/video/out/gpu/osd.c b/video/out/gpu/osd.c
new file mode 100644
index 0000000..91505a9
--- /dev/null
+++ b/video/out/gpu/osd.c
@@ -0,0 +1,363 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+#include <limits.h>
+
+#include "common/common.h"
+#include "common/msg.h"
+#include "video/csputils.h"
+#include "video/mp_image.h"
+#include "osd.h"
+
+#define GLSL(x) gl_sc_add(sc, #x "\n");
+
+// glBlendFuncSeparate() arguments
+static const int blend_factors[SUBBITMAP_COUNT][4] = {
+ [SUBBITMAP_LIBASS] = {RA_BLEND_SRC_ALPHA, RA_BLEND_ONE_MINUS_SRC_ALPHA,
+ RA_BLEND_ONE, RA_BLEND_ONE_MINUS_SRC_ALPHA},
+ [SUBBITMAP_BGRA] = {RA_BLEND_ONE, RA_BLEND_ONE_MINUS_SRC_ALPHA,
+ RA_BLEND_ONE, RA_BLEND_ONE_MINUS_SRC_ALPHA},
+};
+
+struct vertex {
+ float position[2];
+ float texcoord[2];
+ uint8_t ass_color[4];
+};
+
+static const struct ra_renderpass_input vertex_vao[] = {
+ {"position", RA_VARTYPE_FLOAT, 2, 1, offsetof(struct vertex, position)},
+ {"texcoord" , RA_VARTYPE_FLOAT, 2, 1, offsetof(struct vertex, texcoord)},
+ {"ass_color", RA_VARTYPE_BYTE_UNORM, 4, 1, offsetof(struct vertex, ass_color)},
+};
+
+struct mpgl_osd_part {
+ enum sub_bitmap_format format;
+ int change_id;
+ struct ra_tex *texture;
+ int w, h;
+ int num_subparts;
+ int prev_num_subparts;
+ struct sub_bitmap *subparts;
+ int num_vertices;
+ struct vertex *vertices;
+};
+
+struct mpgl_osd {
+ struct mp_log *log;
+ struct osd_state *osd;
+ struct ra *ra;
+ struct mpgl_osd_part *parts[MAX_OSD_PARTS];
+ const struct ra_format *fmt_table[SUBBITMAP_COUNT];
+ bool formats[SUBBITMAP_COUNT];
+ bool change_flag; // for reporting to API user only
+ // temporary
+ int stereo_mode;
+ struct mp_osd_res osd_res;
+ void *scratch;
+};
+
+struct mpgl_osd *mpgl_osd_init(struct ra *ra, struct mp_log *log,
+ struct osd_state *osd)
+{
+ struct mpgl_osd *ctx = talloc_ptrtype(NULL, ctx);
+ *ctx = (struct mpgl_osd) {
+ .log = log,
+ .osd = osd,
+ .ra = ra,
+ .change_flag = true,
+ .scratch = talloc_zero_size(ctx, 1),
+ };
+
+ ctx->fmt_table[SUBBITMAP_LIBASS] = ra_find_unorm_format(ra, 1, 1);
+ ctx->fmt_table[SUBBITMAP_BGRA] = ra_find_unorm_format(ra, 1, 4);
+
+ for (int n = 0; n < MAX_OSD_PARTS; n++)
+ ctx->parts[n] = talloc_zero(ctx, struct mpgl_osd_part);
+
+ for (int n = 0; n < SUBBITMAP_COUNT; n++)
+ ctx->formats[n] = !!ctx->fmt_table[n];
+
+ return ctx;
+}
+
+void mpgl_osd_destroy(struct mpgl_osd *ctx)
+{
+ if (!ctx)
+ return;
+
+ for (int n = 0; n < MAX_OSD_PARTS; n++) {
+ struct mpgl_osd_part *p = ctx->parts[n];
+ ra_tex_free(ctx->ra, &p->texture);
+ }
+ talloc_free(ctx);
+}
+
+static int next_pow2(int v)
+{
+ for (int x = 0; x < 30; x++) {
+ if ((1 << x) >= v)
+ return 1 << x;
+ }
+ return INT_MAX;
+}
+
+static bool upload_osd(struct mpgl_osd *ctx, struct mpgl_osd_part *osd,
+ struct sub_bitmaps *imgs)
+{
+ struct ra *ra = ctx->ra;
+ bool ok = false;
+
+ assert(imgs->packed);
+
+ int req_w = next_pow2(imgs->packed_w);
+ int req_h = next_pow2(imgs->packed_h);
+
+ const struct ra_format *fmt = ctx->fmt_table[imgs->format];
+ assert(fmt);
+
+ if (!osd->texture || req_w > osd->w || req_h > osd->h ||
+ osd->format != imgs->format)
+ {
+ ra_tex_free(ra, &osd->texture);
+
+ osd->format = imgs->format;
+ osd->w = MPMAX(32, req_w);
+ osd->h = MPMAX(32, req_h);
+
+ MP_VERBOSE(ctx, "Reallocating OSD texture to %dx%d.\n", osd->w, osd->h);
+
+ if (osd->w > ra->max_texture_wh || osd->h > ra->max_texture_wh) {
+ MP_ERR(ctx, "OSD bitmaps do not fit on a surface with the maximum "
+ "supported size %dx%d.\n", ra->max_texture_wh,
+ ra->max_texture_wh);
+ goto done;
+ }
+
+ struct ra_tex_params params = {
+ .dimensions = 2,
+ .w = osd->w,
+ .h = osd->h,
+ .d = 1,
+ .format = fmt,
+ .render_src = true,
+ .src_linear = true,
+ .host_mutable = true,
+ };
+ osd->texture = ra_tex_create(ra, &params);
+ if (!osd->texture)
+ goto done;
+ }
+
+ struct ra_tex_upload_params params = {
+ .tex = osd->texture,
+ .src = imgs->packed->planes[0],
+ .invalidate = true,
+ .rc = &(struct mp_rect){0, 0, imgs->packed_w, imgs->packed_h},
+ .stride = imgs->packed->stride[0],
+ };
+
+ ok = ra->fns->tex_upload(ra, &params);
+
+done:
+ return ok;
+}
+
+static void gen_osd_cb(void *pctx, struct sub_bitmaps *imgs)
+{
+ struct mpgl_osd *ctx = pctx;
+
+ if (imgs->num_parts == 0 || !ctx->formats[imgs->format])
+ return;
+
+ struct mpgl_osd_part *osd = ctx->parts[imgs->render_index];
+
+ bool ok = true;
+ if (imgs->change_id != osd->change_id) {
+ if (!upload_osd(ctx, osd, imgs))
+ ok = false;
+
+ osd->change_id = imgs->change_id;
+ ctx->change_flag = true;
+ }
+ osd->num_subparts = ok ? imgs->num_parts : 0;
+
+ MP_TARRAY_GROW(osd, osd->subparts, osd->num_subparts);
+ memcpy(osd->subparts, imgs->parts,
+ osd->num_subparts * sizeof(osd->subparts[0]));
+}
+
+bool mpgl_osd_draw_prepare(struct mpgl_osd *ctx, int index,
+ struct gl_shader_cache *sc)
+{
+ assert(index >= 0 && index < MAX_OSD_PARTS);
+ struct mpgl_osd_part *part = ctx->parts[index];
+
+ enum sub_bitmap_format fmt = part->format;
+ if (!fmt || !part->num_subparts || !part->texture)
+ return false;
+
+ gl_sc_uniform_texture(sc, "osdtex", part->texture);
+ switch (fmt) {
+ case SUBBITMAP_BGRA: {
+ GLSL(color = texture(osdtex, texcoord).bgra;)
+ break;
+ }
+ case SUBBITMAP_LIBASS: {
+ GLSL(color =
+ vec4(ass_color.rgb, ass_color.a * texture(osdtex, texcoord).r);)
+ break;
+ }
+ default:
+ MP_ASSERT_UNREACHABLE();
+ }
+
+ return true;
+}
+
+static void write_quad(struct vertex *va, struct gl_transform t,
+ float x0, float y0, float x1, float y1,
+ float tx0, float ty0, float tx1, float ty1,
+ float tex_w, float tex_h, const uint8_t color[4])
+{
+ gl_transform_vec(t, &x0, &y0);
+ gl_transform_vec(t, &x1, &y1);
+
+#define COLOR_INIT {color[0], color[1], color[2], color[3]}
+ va[0] = (struct vertex){ {x0, y0}, {tx0 / tex_w, ty0 / tex_h}, COLOR_INIT };
+ va[1] = (struct vertex){ {x0, y1}, {tx0 / tex_w, ty1 / tex_h}, COLOR_INIT };
+ va[2] = (struct vertex){ {x1, y0}, {tx1 / tex_w, ty0 / tex_h}, COLOR_INIT };
+ va[3] = (struct vertex){ {x1, y1}, {tx1 / tex_w, ty1 / tex_h}, COLOR_INIT };
+ va[4] = va[2];
+ va[5] = va[1];
+#undef COLOR_INIT
+}
+
+static void generate_verts(struct mpgl_osd_part *part, struct gl_transform t)
+{
+ MP_TARRAY_GROW(part, part->vertices,
+ part->num_vertices + part->num_subparts * 6);
+
+ for (int n = 0; n < part->num_subparts; n++) {
+ struct sub_bitmap *b = &part->subparts[n];
+ struct vertex *va = &part->vertices[part->num_vertices];
+
+ // NOTE: the blend color is used with SUBBITMAP_LIBASS only, so it
+ // doesn't matter that we upload garbage for the other formats
+ uint32_t c = b->libass.color;
+ uint8_t color[4] = { c >> 24, (c >> 16) & 0xff,
+ (c >> 8) & 0xff, 255 - (c & 0xff) };
+
+ write_quad(va, t,
+ b->x, b->y, b->x + b->dw, b->y + b->dh,
+ b->src_x, b->src_y, b->src_x + b->w, b->src_y + b->h,
+ part->w, part->h, color);
+
+ part->num_vertices += 6;
+ }
+}
+
+// number of screen divisions per axis (x=0, y=1) for the current 3D mode
+static void get_3d_side_by_side(int stereo_mode, int div[2])
+{
+ div[0] = div[1] = 1;
+ switch (stereo_mode) {
+ case MP_STEREO3D_SBS2L:
+ case MP_STEREO3D_SBS2R: div[0] = 2; break;
+ case MP_STEREO3D_AB2R:
+ case MP_STEREO3D_AB2L: div[1] = 2; break;
+ }
+}
+
+void mpgl_osd_draw_finish(struct mpgl_osd *ctx, int index,
+ struct gl_shader_cache *sc, struct ra_fbo fbo)
+{
+ struct mpgl_osd_part *part = ctx->parts[index];
+
+ int div[2];
+ get_3d_side_by_side(ctx->stereo_mode, div);
+
+ part->num_vertices = 0;
+
+ for (int x = 0; x < div[0]; x++) {
+ for (int y = 0; y < div[1]; y++) {
+ struct gl_transform t;
+ gl_transform_ortho_fbo(&t, fbo);
+
+ float a_x = ctx->osd_res.w * x;
+ float a_y = ctx->osd_res.h * y;
+ t.t[0] += a_x * t.m[0][0] + a_y * t.m[1][0];
+ t.t[1] += a_x * t.m[0][1] + a_y * t.m[1][1];
+
+ generate_verts(part, t);
+ }
+ }
+
+ const int *factors = &blend_factors[part->format][0];
+ gl_sc_blend(sc, factors[0], factors[1], factors[2], factors[3]);
+
+ gl_sc_dispatch_draw(sc, fbo.tex, false, vertex_vao, MP_ARRAY_SIZE(vertex_vao),
+ sizeof(struct vertex), part->vertices, part->num_vertices);
+}
+
+static void set_res(struct mpgl_osd *ctx, struct mp_osd_res res, int stereo_mode)
+{
+ int div[2];
+ get_3d_side_by_side(stereo_mode, div);
+
+ res.w /= div[0];
+ res.h /= div[1];
+ ctx->osd_res = res;
+}
+
+void mpgl_osd_generate(struct mpgl_osd *ctx, struct mp_osd_res res, double pts,
+ int stereo_mode, int draw_flags)
+{
+ for (int n = 0; n < MAX_OSD_PARTS; n++)
+ ctx->parts[n]->num_subparts = 0;
+
+ set_res(ctx, res, stereo_mode);
+
+ osd_draw(ctx->osd, ctx->osd_res, pts, draw_flags, ctx->formats, gen_osd_cb, ctx);
+ ctx->stereo_mode = stereo_mode;
+
+ // Parts going away does not necessarily result in gen_osd_cb() being called
+ // (not even with num_parts==0), so check this separately.
+ for (int n = 0; n < MAX_OSD_PARTS; n++) {
+ struct mpgl_osd_part *part = ctx->parts[n];
+ if (part->num_subparts != part->prev_num_subparts)
+ ctx->change_flag = true;
+ part->prev_num_subparts = part->num_subparts;
+ }
+}
+
+// See osd_resize() for remarks. This function is an optional optimization too.
+void mpgl_osd_resize(struct mpgl_osd *ctx, struct mp_osd_res res, int stereo_mode)
+{
+ set_res(ctx, res, stereo_mode);
+ osd_resize(ctx->osd, ctx->osd_res);
+}
+
+bool mpgl_osd_check_change(struct mpgl_osd *ctx, struct mp_osd_res *res,
+ double pts)
+{
+ ctx->change_flag = false;
+ mpgl_osd_generate(ctx, *res, pts, 0, 0);
+ return ctx->change_flag;
+}
diff --git a/video/out/gpu/osd.h b/video/out/gpu/osd.h
new file mode 100644
index 0000000..00fbc49
--- /dev/null
+++ b/video/out/gpu/osd.h
@@ -0,0 +1,25 @@
+#ifndef MPLAYER_GL_OSD_H
+#define MPLAYER_GL_OSD_H
+
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "utils.h"
+#include "shader_cache.h"
+#include "sub/osd.h"
+
+struct mpgl_osd *mpgl_osd_init(struct ra *ra, struct mp_log *log,
+ struct osd_state *osd);
+void mpgl_osd_destroy(struct mpgl_osd *ctx);
+
+void mpgl_osd_generate(struct mpgl_osd *ctx, struct mp_osd_res res, double pts,
+ int stereo_mode, int draw_flags);
+void mpgl_osd_resize(struct mpgl_osd *ctx, struct mp_osd_res res, int stereo_mode);
+bool mpgl_osd_draw_prepare(struct mpgl_osd *ctx, int index,
+ struct gl_shader_cache *sc);
+void mpgl_osd_draw_finish(struct mpgl_osd *ctx, int index,
+ struct gl_shader_cache *sc, struct ra_fbo fbo);
+bool mpgl_osd_check_change(struct mpgl_osd *ctx, struct mp_osd_res *res,
+ double pts);
+
+#endif
diff --git a/video/out/gpu/ra.c b/video/out/gpu/ra.c
new file mode 100644
index 0000000..855f9b6
--- /dev/null
+++ b/video/out/gpu/ra.c
@@ -0,0 +1,424 @@
+#include "common/common.h"
+#include "common/msg.h"
+#include "video/img_format.h"
+
+#include "ra.h"
+
+void ra_add_native_resource(struct ra *ra, const char *name, void *data)
+{
+ struct ra_native_resource r = {
+ .name = name,
+ .data = data,
+ };
+ MP_TARRAY_APPEND(ra, ra->native_resources, ra->num_native_resources, r);
+}
+
+void *ra_get_native_resource(struct ra *ra, const char *name)
+{
+ for (int n = 0; n < ra->num_native_resources; n++) {
+ struct ra_native_resource *r = &ra->native_resources[n];
+ if (strcmp(r->name, name) == 0)
+ return r->data;
+ }
+
+ return NULL;
+}
+
+struct ra_tex *ra_tex_create(struct ra *ra, const struct ra_tex_params *params)
+{
+ switch (params->dimensions) {
+ case 1:
+ assert(params->h == 1 && params->d == 1);
+ break;
+ case 2:
+ assert(params->d == 1);
+ break;
+ default:
+ assert(params->dimensions >= 1 && params->dimensions <= 3);
+ }
+ return ra->fns->tex_create(ra, params);
+}
+
+void ra_tex_free(struct ra *ra, struct ra_tex **tex)
+{
+ if (*tex)
+ ra->fns->tex_destroy(ra, *tex);
+ *tex = NULL;
+}
+
+struct ra_buf *ra_buf_create(struct ra *ra, const struct ra_buf_params *params)
+{
+ return ra->fns->buf_create(ra, params);
+}
+
+void ra_buf_free(struct ra *ra, struct ra_buf **buf)
+{
+ if (*buf)
+ ra->fns->buf_destroy(ra, *buf);
+ *buf = NULL;
+}
+
+void ra_free(struct ra **ra)
+{
+ if (*ra)
+ (*ra)->fns->destroy(*ra);
+ talloc_free(*ra);
+ *ra = NULL;
+}
+
+size_t ra_vartype_size(enum ra_vartype type)
+{
+ switch (type) {
+ case RA_VARTYPE_INT: return sizeof(int);
+ case RA_VARTYPE_FLOAT: return sizeof(float);
+ case RA_VARTYPE_BYTE_UNORM: return 1;
+ default: return 0;
+ }
+}
+
+struct ra_layout ra_renderpass_input_layout(struct ra_renderpass_input *input)
+{
+ size_t el_size = ra_vartype_size(input->type);
+ if (!el_size)
+ return (struct ra_layout){0};
+
+ // host data is always tightly packed
+ return (struct ra_layout) {
+ .align = 1,
+ .stride = el_size * input->dim_v,
+ .size = el_size * input->dim_v * input->dim_m,
+ };
+}
+
+static struct ra_renderpass_input *dup_inputs(void *ta_parent,
+ const struct ra_renderpass_input *inputs, int num_inputs)
+{
+ struct ra_renderpass_input *res =
+ talloc_memdup(ta_parent, (void *)inputs, num_inputs * sizeof(inputs[0]));
+ for (int n = 0; n < num_inputs; n++)
+ res[n].name = talloc_strdup(res, res[n].name);
+ return res;
+}
+
+// Return a newly allocated deep-copy of params.
+struct ra_renderpass_params *ra_renderpass_params_copy(void *ta_parent,
+ const struct ra_renderpass_params *params)
+{
+ struct ra_renderpass_params *res = talloc_ptrtype(ta_parent, res);
+ *res = *params;
+ res->inputs = dup_inputs(res, res->inputs, res->num_inputs);
+ res->vertex_attribs =
+ dup_inputs(res, res->vertex_attribs, res->num_vertex_attribs);
+ res->cached_program = bstrdup(res, res->cached_program);
+ res->vertex_shader = talloc_strdup(res, res->vertex_shader);
+ res->frag_shader = talloc_strdup(res, res->frag_shader);
+ res->compute_shader = talloc_strdup(res, res->compute_shader);
+ return res;
+}
+
+struct glsl_fmt {
+ enum ra_ctype ctype;
+ int num_components;
+ int component_depth[4];
+ const char *glsl_format;
+};
+
+// List taken from the GLSL specification, sans snorm and sint formats
+static const struct glsl_fmt ra_glsl_fmts[] = {
+ {RA_CTYPE_FLOAT, 1, {16}, "r16f"},
+ {RA_CTYPE_FLOAT, 1, {32}, "r32f"},
+ {RA_CTYPE_FLOAT, 2, {16, 16}, "rg16f"},
+ {RA_CTYPE_FLOAT, 2, {32, 32}, "rg32f"},
+ {RA_CTYPE_FLOAT, 4, {16, 16, 16, 16}, "rgba16f"},
+ {RA_CTYPE_FLOAT, 4, {32, 32, 32, 32}, "rgba32f"},
+ {RA_CTYPE_FLOAT, 3, {11, 11, 10}, "r11f_g11f_b10f"},
+
+ {RA_CTYPE_UNORM, 1, {8}, "r8"},
+ {RA_CTYPE_UNORM, 1, {16}, "r16"},
+ {RA_CTYPE_UNORM, 2, {8, 8}, "rg8"},
+ {RA_CTYPE_UNORM, 2, {16, 16}, "rg16"},
+ {RA_CTYPE_UNORM, 4, {8, 8, 8, 8}, "rgba8"},
+ {RA_CTYPE_UNORM, 4, {16, 16, 16, 16}, "rgba16"},
+ {RA_CTYPE_UNORM, 4, {10, 10, 10, 2}, "rgb10_a2"},
+
+ {RA_CTYPE_UINT, 1, {8}, "r8ui"},
+ {RA_CTYPE_UINT, 1, {16}, "r16ui"},
+ {RA_CTYPE_UINT, 1, {32}, "r32ui"},
+ {RA_CTYPE_UINT, 2, {8, 8}, "rg8ui"},
+ {RA_CTYPE_UINT, 2, {16, 16}, "rg16ui"},
+ {RA_CTYPE_UINT, 2, {32, 32}, "rg32ui"},
+ {RA_CTYPE_UINT, 4, {8, 8, 8, 8}, "rgba8ui"},
+ {RA_CTYPE_UINT, 4, {16, 16, 16, 16}, "rgba16ui"},
+ {RA_CTYPE_UINT, 4, {32, 32, 32, 32}, "rgba32ui"},
+ {RA_CTYPE_UINT, 4, {10, 10, 10, 2}, "rgb10_a2ui"},
+};
+
+const char *ra_fmt_glsl_format(const struct ra_format *fmt)
+{
+ for (int n = 0; n < MP_ARRAY_SIZE(ra_glsl_fmts); n++) {
+ const struct glsl_fmt *gfmt = &ra_glsl_fmts[n];
+
+ if (fmt->ctype != gfmt->ctype)
+ continue;
+ if (fmt->num_components != gfmt->num_components)
+ continue;
+
+ for (int i = 0; i < fmt->num_components; i++) {
+ if (fmt->component_depth[i] != gfmt->component_depth[i])
+ goto next_fmt;
+ }
+
+ return gfmt->glsl_format;
+
+next_fmt: ; // equivalent to `continue`
+ }
+
+ return NULL;
+}
+
+// Return whether this is a tightly packed format with no external padding and
+// with the same bit size/depth in all components, and the shader returns
+// components in the same order as in memory.
+static bool ra_format_is_regular(const struct ra_format *fmt)
+{
+ if (!fmt->pixel_size || !fmt->num_components || !fmt->ordered)
+ return false;
+ for (int n = 1; n < fmt->num_components; n++) {
+ if (fmt->component_size[n] != fmt->component_size[0] ||
+ fmt->component_depth[n] != fmt->component_depth[0])
+ return false;
+ }
+ if (fmt->component_size[0] * fmt->num_components != fmt->pixel_size * 8)
+ return false;
+ return true;
+}
+
+// Return a regular filterable format using RA_CTYPE_UNORM.
+const struct ra_format *ra_find_unorm_format(struct ra *ra,
+ int bytes_per_component,
+ int n_components)
+{
+ for (int n = 0; n < ra->num_formats; n++) {
+ const struct ra_format *fmt = ra->formats[n];
+ if (fmt->ctype == RA_CTYPE_UNORM && fmt->num_components == n_components &&
+ fmt->pixel_size == bytes_per_component * n_components &&
+ fmt->component_depth[0] == bytes_per_component * 8 &&
+ fmt->linear_filter && ra_format_is_regular(fmt))
+ return fmt;
+ }
+ return NULL;
+}
+
+// Return a regular format using RA_CTYPE_UINT.
+const struct ra_format *ra_find_uint_format(struct ra *ra,
+ int bytes_per_component,
+ int n_components)
+{
+ for (int n = 0; n < ra->num_formats; n++) {
+ const struct ra_format *fmt = ra->formats[n];
+ if (fmt->ctype == RA_CTYPE_UINT && fmt->num_components == n_components &&
+ fmt->pixel_size == bytes_per_component * n_components &&
+ fmt->component_depth[0] == bytes_per_component * 8 &&
+ ra_format_is_regular(fmt))
+ return fmt;
+ }
+ return NULL;
+}
+
+// Find a float format of any precision that matches the C type of the same
+// size for upload.
+// May drop bits from the mantissa (such as selecting float16 even if
+// bytes_per_component == 32); prefers possibly faster formats first.
+static const struct ra_format *ra_find_float_format(struct ra *ra,
+ int bytes_per_component,
+ int n_components)
+{
+ // Assumes ra_format are ordered by performance.
+ // The >=16 check is to avoid catching fringe formats.
+ for (int n = 0; n < ra->num_formats; n++) {
+ const struct ra_format *fmt = ra->formats[n];
+ if (fmt->ctype == RA_CTYPE_FLOAT && fmt->num_components == n_components &&
+ fmt->pixel_size == bytes_per_component * n_components &&
+ fmt->component_depth[0] >= 16 &&
+ fmt->linear_filter && ra_format_is_regular(fmt))
+ return fmt;
+ }
+ return NULL;
+}
+
+// Return a filterable regular format that uses at least float16 internally, and
+// uses a normal C float for transfer on the CPU side. (This is just so we don't
+// need 32->16 bit conversion on CPU, which would be messy.)
+const struct ra_format *ra_find_float16_format(struct ra *ra, int n_components)
+{
+ return ra_find_float_format(ra, sizeof(float), n_components);
+}
+
+const struct ra_format *ra_find_named_format(struct ra *ra, const char *name)
+{
+ for (int n = 0; n < ra->num_formats; n++) {
+ const struct ra_format *fmt = ra->formats[n];
+ if (strcmp(fmt->name, name) == 0)
+ return fmt;
+ }
+ return NULL;
+}
+
+// Like ra_find_unorm_format(), but if no fixed point format is available,
+// return an unsigned integer format.
+static const struct ra_format *find_plane_format(struct ra *ra, int bytes,
+ int n_channels,
+ enum mp_component_type ctype)
+{
+ switch (ctype) {
+ case MP_COMPONENT_TYPE_UINT: {
+ const struct ra_format *f = ra_find_unorm_format(ra, bytes, n_channels);
+ if (f)
+ return f;
+ return ra_find_uint_format(ra, bytes, n_channels);
+ }
+ case MP_COMPONENT_TYPE_FLOAT:
+ return ra_find_float_format(ra, bytes, n_channels);
+ default: return NULL;
+ }
+}
+
+// Put a mapping of imgfmt to texture formats into *out. Basically it selects
+// the correct texture formats needed to represent an imgfmt in a shader, with
+// textures using the same memory organization as on the CPU.
+// Each plane is represented by a texture, and each texture has a RGBA
+// component order. out->components describes the meaning of them.
+// May return integer formats for >8 bit formats, if the driver has no
+// normalized 16 bit formats.
+// Returns false (and *out is not touched) if no format found.
+bool ra_get_imgfmt_desc(struct ra *ra, int imgfmt, struct ra_imgfmt_desc *out)
+{
+ struct ra_imgfmt_desc res = {.component_type = RA_CTYPE_UNKNOWN};
+
+ struct mp_regular_imgfmt regfmt;
+ if (mp_get_regular_imgfmt(&regfmt, imgfmt)) {
+ res.num_planes = regfmt.num_planes;
+ res.component_bits = regfmt.component_size * 8;
+ res.component_pad = regfmt.component_pad;
+ for (int n = 0; n < regfmt.num_planes; n++) {
+ struct mp_regular_imgfmt_plane *plane = &regfmt.planes[n];
+ res.planes[n] = find_plane_format(ra, regfmt.component_size,
+ plane->num_components,
+ regfmt.component_type);
+ if (!res.planes[n])
+ return false;
+ for (int i = 0; i < plane->num_components; i++)
+ res.components[n][i] = plane->components[i];
+ // Dropping LSBs when shifting will lead to dropped MSBs.
+ if (res.component_bits > res.planes[n]->component_depth[0] &&
+ res.component_pad < 0)
+ return false;
+ // Renderer restriction, but actually an unwanted corner case.
+ if (res.component_type != RA_CTYPE_UNKNOWN &&
+ res.component_type != res.planes[n]->ctype)
+ return false;
+ res.component_type = res.planes[n]->ctype;
+ }
+ res.chroma_w = 1 << regfmt.chroma_xs;
+ res.chroma_h = 1 << regfmt.chroma_ys;
+ goto supported;
+ }
+
+ for (int n = 0; n < ra->num_formats; n++) {
+ if (imgfmt && ra->formats[n]->special_imgfmt == imgfmt) {
+ res = *ra->formats[n]->special_imgfmt_desc;
+ goto supported;
+ }
+ }
+
+ // Unsupported format
+ return false;
+
+supported:
+
+ *out = res;
+ return true;
+}
+
+static const char *ctype_to_str(enum ra_ctype ctype)
+{
+ switch (ctype) {
+ case RA_CTYPE_UNORM: return "unorm";
+ case RA_CTYPE_UINT: return "uint ";
+ case RA_CTYPE_FLOAT: return "float";
+ default: return "unknown";
+ }
+}
+
+void ra_dump_tex_formats(struct ra *ra, int msgl)
+{
+ if (!mp_msg_test(ra->log, msgl))
+ return;
+ MP_MSG(ra, msgl, "Texture formats:\n");
+ MP_MSG(ra, msgl, " NAME COMP*TYPE SIZE DEPTH PER COMP.\n");
+ for (int n = 0; n < ra->num_formats; n++) {
+ const struct ra_format *fmt = ra->formats[n];
+ const char *ctype = ctype_to_str(fmt->ctype);
+ char cl[40] = "";
+ for (int i = 0; i < fmt->num_components; i++) {
+ mp_snprintf_cat(cl, sizeof(cl), "%s%d", i ? " " : "",
+ fmt->component_size[i]);
+ if (fmt->component_size[i] != fmt->component_depth[i])
+ mp_snprintf_cat(cl, sizeof(cl), "/%d", fmt->component_depth[i]);
+ }
+ MP_MSG(ra, msgl, " %-10s %d*%s %3dB %s %s %s %s {%s}\n", fmt->name,
+ fmt->num_components, ctype, fmt->pixel_size,
+ fmt->luminance_alpha ? "LA" : " ",
+ fmt->linear_filter ? "LF" : " ",
+ fmt->renderable ? "CR" : " ",
+ fmt->storable ? "ST" : " ", cl);
+ }
+ MP_MSG(ra, msgl, " LA = LUMINANCE_ALPHA hack format\n");
+ MP_MSG(ra, msgl, " LF = linear filterable\n");
+ MP_MSG(ra, msgl, " CR = can be used for render targets\n");
+ MP_MSG(ra, msgl, " ST = can be used for storable images\n");
+}
+
+void ra_dump_imgfmt_desc(struct ra *ra, const struct ra_imgfmt_desc *desc,
+ int msgl)
+{
+ char pl[80] = "";
+ char pf[80] = "";
+ for (int n = 0; n < desc->num_planes; n++) {
+ if (n > 0) {
+ mp_snprintf_cat(pl, sizeof(pl), "/");
+ mp_snprintf_cat(pf, sizeof(pf), "/");
+ }
+ char t[5] = {0};
+ for (int i = 0; i < 4; i++)
+ t[i] = "_rgba"[desc->components[n][i]];
+ for (int i = 3; i > 0 && t[i] == '_'; i--)
+ t[i] = '\0';
+ mp_snprintf_cat(pl, sizeof(pl), "%s", t);
+ mp_snprintf_cat(pf, sizeof(pf), "%s", desc->planes[n]->name);
+ }
+ MP_MSG(ra, msgl, "%d planes %dx%d %d/%d [%s] (%s) [%s]\n",
+ desc->num_planes, desc->chroma_w, desc->chroma_h,
+ desc->component_bits, desc->component_pad, pf, pl,
+ ctype_to_str(desc->component_type));
+}
+
+void ra_dump_img_formats(struct ra *ra, int msgl)
+{
+ if (!mp_msg_test(ra->log, msgl))
+ return;
+ MP_MSG(ra, msgl, "Image formats:\n");
+ for (int imgfmt = IMGFMT_START; imgfmt < IMGFMT_END; imgfmt++) {
+ const char *name = mp_imgfmt_to_name(imgfmt);
+ if (strcmp(name, "unknown") == 0)
+ continue;
+ MP_MSG(ra, msgl, " %s", name);
+ struct ra_imgfmt_desc desc;
+ if (ra_get_imgfmt_desc(ra, imgfmt, &desc)) {
+ MP_MSG(ra, msgl, " => ");
+ ra_dump_imgfmt_desc(ra, &desc, msgl);
+ } else {
+ MP_MSG(ra, msgl, "\n");
+ }
+ }
+}
diff --git a/video/out/gpu/ra.h b/video/out/gpu/ra.h
new file mode 100644
index 0000000..5f229f8
--- /dev/null
+++ b/video/out/gpu/ra.h
@@ -0,0 +1,559 @@
+#pragma once
+
+#include "common/common.h"
+#include "misc/bstr.h"
+
+// Handle for a rendering API backend.
+struct ra {
+ struct ra_fns *fns;
+ void *priv;
+
+ int glsl_version; // GLSL version (e.g. 300 => 3.0)
+ bool glsl_es; // use ES dialect
+ bool glsl_vulkan; // use vulkan dialect
+
+ struct mp_log *log;
+
+ // RA_CAP_* bit field. The RA backend must set supported features at init
+ // time.
+ uint64_t caps;
+
+ // Maximum supported width and height of a 2D texture. Set by the RA backend
+ // at init time.
+ int max_texture_wh;
+
+ // Maximum shared memory for compute shaders. Set by the RA backend at init
+ // time.
+ size_t max_shmem;
+
+ // Maximum number of threads in a compute work group. Set by the RA backend
+ // at init time.
+ size_t max_compute_group_threads;
+
+ // Maximum push constant size. Set by the RA backend at init time.
+ size_t max_pushc_size;
+
+ // Set of supported texture formats. Must be added by RA backend at init time.
+ // If there are equivalent formats with different caveats, the preferred
+ // formats should have a lower index. (E.g. GLES3 should put rg8 before la.)
+ struct ra_format **formats;
+ int num_formats;
+
+ // Accelerate texture uploads via an extra PBO even when
+ // RA_CAP_DIRECT_UPLOAD is supported. This is basically only relevant for
+ // OpenGL. Set by the RA user.
+ bool use_pbo;
+
+ // Array of native resources. For the most part an "escape" mechanism, and
+ // usually does not contain parameters required for basic functionality.
+ struct ra_native_resource *native_resources;
+ int num_native_resources;
+};
+
+// For passing through windowing system specific parameters and such. The
+// names are always internal (the libmpv render API uses mpv_render_param_type
+// and maps them to names internally).
+// For example, a name="x11" entry has a X11 display as (Display*)data.
+struct ra_native_resource {
+ const char *name;
+ void *data;
+};
+
+// Add a ra_native_resource entry. Both name and data pointers must stay valid
+// until ra termination.
+void ra_add_native_resource(struct ra *ra, const char *name, void *data);
+
+// Search ra->native_resources, returns NULL on failure.
+void *ra_get_native_resource(struct ra *ra, const char *name);
+
+enum {
+ RA_CAP_TEX_1D = 1 << 0, // supports 1D textures (as shader inputs)
+ RA_CAP_TEX_3D = 1 << 1, // supports 3D textures (as shader inputs)
+ RA_CAP_BLIT = 1 << 2, // supports ra_fns.blit
+ RA_CAP_COMPUTE = 1 << 3, // supports compute shaders
+ RA_CAP_DIRECT_UPLOAD = 1 << 4, // supports tex_upload without ra_buf
+ RA_CAP_BUF_RO = 1 << 5, // supports RA_VARTYPE_BUF_RO
+ RA_CAP_BUF_RW = 1 << 6, // supports RA_VARTYPE_BUF_RW
+ RA_CAP_NESTED_ARRAY = 1 << 7, // supports nested arrays
+ RA_CAP_GLOBAL_UNIFORM = 1 << 8, // supports using "naked" uniforms (not UBO)
+ RA_CAP_GATHER = 1 << 9, // supports textureGather in GLSL
+ RA_CAP_FRAGCOORD = 1 << 10, // supports reading from gl_FragCoord
+ RA_CAP_PARALLEL_COMPUTE = 1 << 11, // supports parallel compute shaders
+ RA_CAP_NUM_GROUPS = 1 << 12, // supports gl_NumWorkGroups
+ RA_CAP_SLOW_DR = 1 << 13, // direct rendering is assumed to be slow
+};
+
+enum ra_ctype {
+ RA_CTYPE_UNKNOWN = 0, // also used for inconsistent multi-component formats
+ RA_CTYPE_UNORM, // unsigned normalized integer (fixed point) formats
+ RA_CTYPE_UINT, // full integer formats
+ RA_CTYPE_FLOAT, // float formats (signed, any bit size)
+};
+
+// All formats must be useable as texture formats. All formats must be byte
+// aligned (all pixels start and end on a byte boundary), at least as far CPU
+// transfers are concerned.
+struct ra_format {
+ // All fields are read-only after creation.
+ const char *name; // symbolic name for user interaction/debugging
+ void *priv;
+ enum ra_ctype ctype; // data type of each component
+ bool ordered; // components are sequential in memory, and returned
+ // by the shader in memory order (the shader can
+ // return arbitrary values for unused components)
+ int num_components; // component count, 0 if not applicable, max. 4
+ int component_size[4]; // in bits, all entries 0 if not applicable
+ int component_depth[4]; // bits in use for each component, 0 if not applicable
+ // (_must_ be set if component_size[] includes padding,
+ // and the real procession as seen by shader is lower)
+ int pixel_size; // in bytes, total pixel size (0 if opaque)
+ bool luminance_alpha; // pre-GL_ARB_texture_rg hack for 2 component textures
+ // if this is set, shader must use .ra instead of .rg
+ // only applies to 2-component textures
+ bool linear_filter; // linear filtering available from shader
+ bool renderable; // can be used for render targets
+ bool storable; // can be used for storage images
+ bool dummy_format; // is not a real ra_format but a fake one (e.g. FBO).
+ // dummy formats cannot be used to create textures
+
+ // If not 0, the format represents some sort of packed fringe format, whose
+ // shader representation is given by the special_imgfmt_desc pointer.
+ int special_imgfmt;
+ const struct ra_imgfmt_desc *special_imgfmt_desc;
+
+ // This gives the GLSL image format corresponding to the format, if any.
+ // (e.g. rgba16ui)
+ const char *glsl_format;
+};
+
+struct ra_tex_params {
+ int dimensions; // 1-3 for 1D-3D textures
+ // Size of the texture. 1D textures require h=d=1, 2D textures require d=1.
+ int w, h, d;
+ const struct ra_format *format;
+ bool render_src; // must be useable as source texture in a shader
+ bool render_dst; // must be useable as target texture in a shader
+ bool storage_dst; // must be usable as a storage image (RA_VARTYPE_IMG_W)
+ bool blit_src; // must be usable as a blit source
+ bool blit_dst; // must be usable as a blit destination
+ bool host_mutable; // texture may be updated with tex_upload
+ bool downloadable; // texture can be read with tex_download
+ // When used as render source texture.
+ bool src_linear; // if false, use nearest sampling (whether this can
+ // be true depends on ra_format.linear_filter)
+ bool src_repeat; // if false, clamp texture coordinates to edge
+ // if true, repeat texture coordinates
+ bool non_normalized; // hack for GL_TEXTURE_RECTANGLE OSX idiocy
+ // always set to false, except in OSX code
+ bool external_oes; // hack for GL_TEXTURE_EXTERNAL_OES idiocy
+ // If non-NULL, the texture will be created with these contents. Using
+ // this does *not* require setting host_mutable. Otherwise, the initial
+ // data is undefined.
+ void *initial_data;
+};
+
+// Conflates the following typical GPU API concepts:
+// - texture itself
+// - sampler state
+// - staging buffers for texture upload
+// - framebuffer objects
+// - wrappers for swapchain framebuffers
+// - synchronization needed for upload/rendering/etc.
+struct ra_tex {
+ // All fields are read-only after creation.
+ struct ra_tex_params params;
+ void *priv;
+};
+
+struct ra_tex_upload_params {
+ struct ra_tex *tex; // Texture to upload to
+ bool invalidate; // Discard pre-existing data not in the region uploaded
+ // Uploading from buffer:
+ struct ra_buf *buf; // Buffer to upload from (mutually exclusive with `src`)
+ size_t buf_offset; // Start of data within buffer (bytes)
+ // Uploading directly: (Note: If RA_CAP_DIRECT_UPLOAD is not set, then this
+ // will be internally translated to a tex_upload buffer by the RA)
+ const void *src; // Address of data
+ // For 2D textures only:
+ struct mp_rect *rc; // Region to upload. NULL means entire image
+ ptrdiff_t stride; // The size of a horizontal line in bytes (*not* texels!)
+};
+
+struct ra_tex_download_params {
+ struct ra_tex *tex; // Texture to download from
+ // Downloading directly (set by caller, data written to by callee):
+ void *dst; // Address of data (packed with no alignment)
+ ptrdiff_t stride; // The size of a horizontal line in bytes (*not* texels!)
+};
+
+// Buffer usage type. This restricts what types of operations may be performed
+// on a buffer.
+enum ra_buf_type {
+ RA_BUF_TYPE_INVALID,
+ RA_BUF_TYPE_TEX_UPLOAD, // texture upload buffer (pixel buffer object)
+ RA_BUF_TYPE_SHADER_STORAGE, // shader buffer (SSBO), for RA_VARTYPE_BUF_RW
+ RA_BUF_TYPE_UNIFORM, // uniform buffer (UBO), for RA_VARTYPE_BUF_RO
+ RA_BUF_TYPE_VERTEX, // not publicly usable (RA-internal usage)
+ RA_BUF_TYPE_SHARED_MEMORY, // device memory for sharing with external API
+};
+
+struct ra_buf_params {
+ enum ra_buf_type type;
+ size_t size;
+ bool host_mapped; // create a read-writable persistent mapping (ra_buf.data)
+ bool host_mutable; // contents may be updated via buf_update()
+ // If non-NULL, the buffer will be created with these contents. Otherwise,
+ // the initial data is undefined.
+ void *initial_data;
+};
+
+// A generic buffer, which can be used for many purposes (texture upload,
+// storage buffer, uniform buffer, etc.)
+struct ra_buf {
+ // All fields are read-only after creation.
+ struct ra_buf_params params;
+ void *data; // for persistently mapped buffers, points to the first byte
+ void *priv;
+};
+
+// Type of a shader uniform variable, or a vertex attribute. In all cases,
+// vectors are matrices are done by having more than 1 value.
+enum ra_vartype {
+ RA_VARTYPE_INVALID,
+ RA_VARTYPE_INT, // C: int, GLSL: int, ivec*
+ RA_VARTYPE_FLOAT, // C: float, GLSL: float, vec*, mat*
+ RA_VARTYPE_TEX, // C: ra_tex*, GLSL: various sampler types
+ // ra_tex.params.render_src must be true
+ RA_VARTYPE_IMG_W, // C: ra_tex*, GLSL: various image types
+ // write-only (W) image for compute shaders
+ // ra_tex.params.storage_dst must be true
+ RA_VARTYPE_BYTE_UNORM, // C: uint8_t, GLSL: int, vec* (vertex data only)
+ RA_VARTYPE_BUF_RO, // C: ra_buf*, GLSL: uniform buffer block
+ // buf type must be RA_BUF_TYPE_UNIFORM
+ RA_VARTYPE_BUF_RW, // C: ra_buf*, GLSL: shader storage buffer block
+ // buf type must be RA_BUF_TYPE_SHADER_STORAGE
+ RA_VARTYPE_COUNT
+};
+
+// Returns the host size of a ra_vartype, or 0 for abstract vartypes (e.g. tex)
+size_t ra_vartype_size(enum ra_vartype type);
+
+// Represents a uniform, texture input parameter, and similar things.
+struct ra_renderpass_input {
+ const char *name; // name as used in the shader
+ enum ra_vartype type;
+ // The total number of values is given by dim_v * dim_m.
+ int dim_v; // vector dimension (1 for non-vector and non-matrix)
+ int dim_m; // additional matrix dimension (dim_v x dim_m)
+ // Vertex data: byte offset of the attribute into the vertex struct
+ size_t offset;
+ // RA_VARTYPE_TEX: texture unit
+ // RA_VARTYPE_IMG_W: image unit
+ // RA_VARTYPE_BUF_* buffer binding point
+ // Other uniforms: unused
+ // Bindings must be unique within each namespace, as specified by
+ // desc_namespace()
+ int binding;
+};
+
+// Represents the layout requirements of an input value
+struct ra_layout {
+ size_t align; // the alignment requirements (always a power of two)
+ size_t stride; // the delta between two rows of an array/matrix
+ size_t size; // the total size of the input
+};
+
+// Returns the host layout of a render pass input. Returns {0} for renderpass
+// inputs without a corresponding host representation (e.g. textures/buffers)
+struct ra_layout ra_renderpass_input_layout(struct ra_renderpass_input *input);
+
+enum ra_blend {
+ RA_BLEND_ZERO,
+ RA_BLEND_ONE,
+ RA_BLEND_SRC_ALPHA,
+ RA_BLEND_ONE_MINUS_SRC_ALPHA,
+};
+
+enum ra_renderpass_type {
+ RA_RENDERPASS_TYPE_INVALID,
+ RA_RENDERPASS_TYPE_RASTER, // vertex+fragment shader
+ RA_RENDERPASS_TYPE_COMPUTE, // compute shader
+};
+
+// Static part of a rendering pass. It conflates the following:
+// - compiled shader and its list of uniforms
+// - vertex attributes and its shader mappings
+// - blending parameters
+// (For Vulkan, this would be shader module + pipeline state.)
+// Upon creation, the values of dynamic values such as uniform contents (whose
+// initial values are not provided here) are required to be 0.
+struct ra_renderpass_params {
+ enum ra_renderpass_type type;
+
+ // Uniforms, including texture/sampler inputs.
+ struct ra_renderpass_input *inputs;
+ int num_inputs;
+ size_t push_constants_size; // must be <= ra.max_pushc_size and a multiple of 4
+
+ // Highly implementation-specific byte array storing a compiled version
+ // of the program. Can be used to speed up shader compilation. A backend
+ // xan read this in renderpass_create, or set this on the newly created
+ // ra_renderpass params field.
+ bstr cached_program;
+
+ // --- type==RA_RENDERPASS_TYPE_RASTER only
+
+ // Describes the format of the vertex data. When using ra.glsl_vulkan,
+ // the order of this array must match the vertex attribute locations.
+ struct ra_renderpass_input *vertex_attribs;
+ int num_vertex_attribs;
+ int vertex_stride;
+
+ // Format of the target texture
+ const struct ra_format *target_format;
+
+ // Shader text, in GLSL. (Yes, you need a GLSL compiler.)
+ // These are complete shaders, including prelude and declarations.
+ const char *vertex_shader;
+ const char *frag_shader;
+
+ // Target blending mode. If enable_blend is false, the blend_ fields can
+ // be ignored.
+ bool enable_blend;
+ enum ra_blend blend_src_rgb;
+ enum ra_blend blend_dst_rgb;
+ enum ra_blend blend_src_alpha;
+ enum ra_blend blend_dst_alpha;
+
+ // If true, the contents of `target` not written to will become undefined
+ bool invalidate_target;
+
+ // --- type==RA_RENDERPASS_TYPE_COMPUTE only
+
+ // Shader text, like vertex_shader/frag_shader.
+ const char *compute_shader;
+};
+
+struct ra_renderpass_params *ra_renderpass_params_copy(void *ta_parent,
+ const struct ra_renderpass_params *params);
+
+// Conflates the following typical GPU API concepts:
+// - various kinds of shaders
+// - rendering pipelines
+// - descriptor sets, uniforms, other bindings
+// - all synchronization necessary
+// - the current values of all uniforms (this one makes it relatively stateful
+// from an API perspective)
+struct ra_renderpass {
+ // All fields are read-only after creation.
+ struct ra_renderpass_params params;
+ void *priv;
+};
+
+// An input value (see ra_renderpass_input).
+struct ra_renderpass_input_val {
+ int index; // index into ra_renderpass_params.inputs[]
+ void *data; // pointer to data according to ra_renderpass_input
+ // (e.g. type==RA_VARTYPE_FLOAT+dim_v=3,dim_m=3 => float[9])
+};
+
+// Parameters for performing a rendering pass (basically the dynamic params).
+// These change potentially every time.
+struct ra_renderpass_run_params {
+ struct ra_renderpass *pass;
+
+ // Generally this lists parameters only which changed since the last
+ // invocation and need to be updated. The ra_renderpass instance is
+ // supposed to keep unchanged values from the previous run.
+ // For non-primitive types like textures, these entries are always added,
+ // even if they do not change.
+ struct ra_renderpass_input_val *values;
+ int num_values;
+ void *push_constants; // must be set if params.push_constants_size > 0
+
+ // --- pass->params.type==RA_RENDERPASS_TYPE_RASTER only
+
+ // target->params.render_dst must be true, and target->params.format must
+ // match pass->params.target_format.
+ struct ra_tex *target;
+ struct mp_rect viewport;
+ struct mp_rect scissors;
+
+ // (The primitive type is always a triangle list.)
+ void *vertex_data;
+ int vertex_count; // number of vertex elements, not bytes
+
+ // --- pass->params.type==RA_RENDERPASS_TYPE_COMPUTE only
+
+ // Number of work groups to be run in X/Y/Z dimensions.
+ int compute_groups[3];
+};
+
+// This is an opaque type provided by the implementation, but we want to at
+// least give it a saner name than void* for code readability purposes.
+typedef void ra_timer;
+
+// Rendering API entrypoints. (Note: there are some additional hidden features
+// you need to take care of. For example, hwdec mapping will be provided
+// separately from ra, but might need to call into ra private code.)
+struct ra_fns {
+ void (*destroy)(struct ra *ra);
+
+ // Create a texture (with undefined contents). Return NULL on failure.
+ // This is a rare operation, and normally textures and even FBOs for
+ // temporary rendering intermediate data are cached.
+ struct ra_tex *(*tex_create)(struct ra *ra,
+ const struct ra_tex_params *params);
+
+ void (*tex_destroy)(struct ra *ra, struct ra_tex *tex);
+
+ // Upload data to a texture. This is an extremely common operation. When
+ // using a buffer, the contents of the buffer must exactly match the image
+ // - conversions between bit depth etc. are not supported. The buffer *may*
+ // be marked as "in use" while this operation is going on, and the contents
+ // must not be touched again by the API user until buf_poll returns true.
+ // Returns whether successful.
+ bool (*tex_upload)(struct ra *ra, const struct ra_tex_upload_params *params);
+
+ // Copy data from the texture to memory. ra_tex_params.downloadable must
+ // have been set to true on texture creation.
+ bool (*tex_download)(struct ra *ra, struct ra_tex_download_params *params);
+
+ // Create a buffer. This can be used as a persistently mapped buffer,
+ // a uniform buffer, a shader storage buffer or possibly others.
+ // Not all usage types must be supported; may return NULL if unavailable.
+ struct ra_buf *(*buf_create)(struct ra *ra,
+ const struct ra_buf_params *params);
+
+ void (*buf_destroy)(struct ra *ra, struct ra_buf *buf);
+
+ // Update the contents of a buffer, starting at a given offset (*must* be a
+ // multiple of 4) and up to a given size, with the contents of *data. This
+ // is an extremely common operation. Calling this while the buffer is
+ // considered "in use" is an error. (See: buf_poll)
+ void (*buf_update)(struct ra *ra, struct ra_buf *buf, ptrdiff_t offset,
+ const void *data, size_t size);
+
+ // Returns if a buffer is currently "in use" or not. Updating the contents
+ // of a buffer (via buf_update or writing to buf->data) while it is still
+ // in use is an error and may result in graphical corruption. Optional, if
+ // NULL then all buffers are always usable.
+ bool (*buf_poll)(struct ra *ra, struct ra_buf *buf);
+
+ // Returns the layout requirements of a uniform buffer element. Optional,
+ // but must be implemented if RA_CAP_BUF_RO is supported.
+ struct ra_layout (*uniform_layout)(struct ra_renderpass_input *inp);
+
+ // Returns the layout requirements of a push constant element. Optional,
+ // but must be implemented if ra.max_pushc_size > 0.
+ struct ra_layout (*push_constant_layout)(struct ra_renderpass_input *inp);
+
+ // Returns an abstract namespace index for a given renderpass input type.
+ // This will always be a value >= 0 and < RA_VARTYPE_COUNT. This is used to
+ // figure out which inputs may share the same value of `binding`.
+ int (*desc_namespace)(struct ra *ra, enum ra_vartype type);
+
+ // Clear the dst with the given color (rgba) and within the given scissor.
+ // dst must have dst->params.render_dst==true. Content outside of the
+ // scissor is preserved.
+ void (*clear)(struct ra *ra, struct ra_tex *dst, float color[4],
+ struct mp_rect *scissor);
+
+ // Copy a sub-rectangle from one texture to another. The source/dest region
+ // is always within the texture bounds. Areas outside the dest region are
+ // preserved. The formats of the textures must be loosely compatible. The
+ // dst texture can be a swapchain framebuffer, but src can not. Only 2D
+ // textures are supported.
+ // The textures must have blit_src and blit_dst set, respectively.
+ // Rectangles with negative width/height lead to flipping, different src/dst
+ // sizes lead to point scaling. Coordinates are always in pixels.
+ // Optional. Only available if RA_CAP_BLIT is set (if it's not set, it must
+ // not be called, even if it's non-NULL).
+ void (*blit)(struct ra *ra, struct ra_tex *dst, struct ra_tex *src,
+ struct mp_rect *dst_rc, struct mp_rect *src_rc);
+
+ // Compile a shader and create a pipeline. This is a rare operation.
+ // The params pointer and anything it points to must stay valid until
+ // renderpass_destroy.
+ struct ra_renderpass *(*renderpass_create)(struct ra *ra,
+ const struct ra_renderpass_params *params);
+
+ void (*renderpass_destroy)(struct ra *ra, struct ra_renderpass *pass);
+
+ // Perform a render pass, basically drawing a list of triangles to a FBO.
+ // This is an extremely common operation.
+ void (*renderpass_run)(struct ra *ra,
+ const struct ra_renderpass_run_params *params);
+
+ // Create a timer object. Returns NULL on failure, or if timers are
+ // unavailable for some reason. Optional.
+ ra_timer *(*timer_create)(struct ra *ra);
+
+ void (*timer_destroy)(struct ra *ra, ra_timer *timer);
+
+ // Start recording a timer. Note that valid usage requires you to pair
+ // every start with a stop. Trying to start a timer twice, or trying to
+ // stop a timer before having started it, consistutes invalid usage.
+ void (*timer_start)(struct ra *ra, ra_timer *timer);
+
+ // Stop recording a timer. This also returns any results that have been
+ // measured since the last usage of this ra_timer. It's important to note
+ // that GPU timer measurement are asynchronous, so this function does not
+ // always produce a value - and the values it does produce are typically
+ // delayed by a few frames. When no value is available, this returns 0.
+ uint64_t (*timer_stop)(struct ra *ra, ra_timer *timer);
+
+ // Associates a marker with any past error messages, for debugging
+ // purposes. Optional.
+ void (*debug_marker)(struct ra *ra, const char *msg);
+};
+
+struct ra_tex *ra_tex_create(struct ra *ra, const struct ra_tex_params *params);
+void ra_tex_free(struct ra *ra, struct ra_tex **tex);
+
+struct ra_buf *ra_buf_create(struct ra *ra, const struct ra_buf_params *params);
+void ra_buf_free(struct ra *ra, struct ra_buf **buf);
+
+void ra_free(struct ra **ra);
+
+const struct ra_format *ra_find_unorm_format(struct ra *ra,
+ int bytes_per_component,
+ int n_components);
+const struct ra_format *ra_find_uint_format(struct ra *ra,
+ int bytes_per_component,
+ int n_components);
+const struct ra_format *ra_find_float16_format(struct ra *ra, int n_components);
+const struct ra_format *ra_find_named_format(struct ra *ra, const char *name);
+
+struct ra_imgfmt_desc {
+ int num_planes;
+ const struct ra_format *planes[4];
+ // Chroma pixel size (1x1 is 4:4:4)
+ uint8_t chroma_w, chroma_h;
+ // Component storage size in bits (possibly padded). For formats with
+ // different sizes per component, this is arbitrary. For padded formats
+ // like P010 or YUV420P10, padding is included.
+ int component_bits;
+ // Like mp_regular_imgfmt.component_pad.
+ int component_pad;
+ // == planes[n].ctype (RA_CTYPE_UNKNOWN if not applicable)
+ enum ra_ctype component_type;
+ // For each texture and each texture output (rgba order) describe what
+ // component it returns.
+ // The values are like the values in mp_regular_imgfmt_plane.components[].
+ // Access as components[plane_nr][component_index]. Set unused items to 0.
+ // For ra_format.luminance_alpha, this returns 1/2 ("rg") instead of 1/4
+ // ("ra"). the logic is that the texture format has 2 channels, thus the
+ // data must be returned in the first two components. The renderer fixes
+ // this later.
+ uint8_t components[4][4];
+};
+
+const char *ra_fmt_glsl_format(const struct ra_format *fmt);
+
+bool ra_get_imgfmt_desc(struct ra *ra, int imgfmt, struct ra_imgfmt_desc *out);
+
+void ra_dump_tex_formats(struct ra *ra, int msgl);
+void ra_dump_imgfmt_desc(struct ra *ra, const struct ra_imgfmt_desc *desc,
+ int msgl);
+void ra_dump_img_formats(struct ra *ra, int msgl);
diff --git a/video/out/gpu/shader_cache.c b/video/out/gpu/shader_cache.c
new file mode 100644
index 0000000..3e05173
--- /dev/null
+++ b/video/out/gpu/shader_cache.c
@@ -0,0 +1,1056 @@
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <assert.h>
+
+#include <libavutil/sha.h>
+#include <libavutil/mem.h>
+
+#include "osdep/io.h"
+
+#include "common/common.h"
+#include "options/path.h"
+#include "stream/stream.h"
+#include "shader_cache.h"
+#include "utils.h"
+
+// Force cache flush if more than this number of shaders is created.
+#define SC_MAX_ENTRIES 256
+
+union uniform_val {
+ float f[9]; // RA_VARTYPE_FLOAT
+ int i[4]; // RA_VARTYPE_INT
+ struct ra_tex *tex; // RA_VARTYPE_TEX, RA_VARTYPE_IMG_*
+ struct ra_buf *buf; // RA_VARTYPE_BUF_*
+};
+
+enum sc_uniform_type {
+ SC_UNIFORM_TYPE_GLOBAL = 0, // global uniform (RA_CAP_GLOBAL_UNIFORM)
+ SC_UNIFORM_TYPE_UBO = 1, // uniform buffer (RA_CAP_BUF_RO)
+ SC_UNIFORM_TYPE_PUSHC = 2, // push constant (ra.max_pushc_size)
+};
+
+struct sc_uniform {
+ enum sc_uniform_type type;
+ struct ra_renderpass_input input;
+ const char *glsl_type;
+ union uniform_val v;
+ char *buffer_format;
+ // for SC_UNIFORM_TYPE_UBO/PUSHC:
+ struct ra_layout layout;
+ size_t offset; // byte offset within the buffer
+};
+
+struct sc_cached_uniform {
+ union uniform_val v;
+ int index; // for ra_renderpass_input_val
+ bool set; // whether the uniform has ever been set
+};
+
+struct sc_entry {
+ struct ra_renderpass *pass;
+ struct sc_cached_uniform *cached_uniforms;
+ int num_cached_uniforms;
+ bstr total;
+ struct timer_pool *timer;
+ struct ra_buf *ubo;
+ int ubo_index; // for ra_renderpass_input_val.index
+ void *pushc;
+};
+
+struct gl_shader_cache {
+ struct ra *ra;
+ struct mp_log *log;
+
+ // permanent
+ char **exts;
+ int num_exts;
+
+ // this is modified during use (gl_sc_add() etc.) and reset for each shader
+ bstr prelude_text;
+ bstr header_text;
+ bstr text;
+
+ // Next binding point (texture unit, image unit, buffer binding, etc.)
+ // In OpenGL these are separate for each input type
+ int next_binding[RA_VARTYPE_COUNT];
+ bool next_uniform_dynamic;
+
+ struct ra_renderpass_params params;
+
+ struct sc_entry **entries;
+ int num_entries;
+
+ struct sc_entry *current_shader; // set by gl_sc_generate()
+
+ struct sc_uniform *uniforms;
+ int num_uniforms;
+
+ int ubo_binding;
+ size_t ubo_size;
+ size_t pushc_size;
+
+ struct ra_renderpass_input_val *values;
+ int num_values;
+
+ // For checking that the user is calling gl_sc_reset() properly.
+ bool needs_reset;
+
+ bool error_state; // true if an error occurred
+
+ // temporary buffers (avoids frequent reallocations)
+ bstr tmp[6];
+
+ // For the disk-cache.
+ char *cache_dir;
+ struct mpv_global *global; // can be NULL
+};
+
+struct gl_shader_cache *gl_sc_create(struct ra *ra, struct mpv_global *global,
+ struct mp_log *log)
+{
+ struct gl_shader_cache *sc = talloc_ptrtype(NULL, sc);
+ *sc = (struct gl_shader_cache){
+ .ra = ra,
+ .global = global,
+ .log = log,
+ };
+ gl_sc_reset(sc);
+ return sc;
+}
+
+// Reset the previous pass. This must be called after gl_sc_generate and before
+// starting a new shader. It may also be called on errors.
+void gl_sc_reset(struct gl_shader_cache *sc)
+{
+ sc->prelude_text.len = 0;
+ sc->header_text.len = 0;
+ sc->text.len = 0;
+ for (int n = 0; n < sc->num_uniforms; n++)
+ talloc_free((void *)sc->uniforms[n].input.name);
+ sc->num_uniforms = 0;
+ sc->ubo_binding = 0;
+ sc->ubo_size = 0;
+ sc->pushc_size = 0;
+ for (int i = 0; i < RA_VARTYPE_COUNT; i++)
+ sc->next_binding[i] = 0;
+ sc->next_uniform_dynamic = false;
+ sc->current_shader = NULL;
+ sc->params = (struct ra_renderpass_params){0};
+ sc->needs_reset = false;
+}
+
+static void sc_flush_cache(struct gl_shader_cache *sc)
+{
+ MP_DBG(sc, "flushing shader cache\n");
+
+ for (int n = 0; n < sc->num_entries; n++) {
+ struct sc_entry *e = sc->entries[n];
+ ra_buf_free(sc->ra, &e->ubo);
+ if (e->pass)
+ sc->ra->fns->renderpass_destroy(sc->ra, e->pass);
+ timer_pool_destroy(e->timer);
+ talloc_free(e);
+ }
+ sc->num_entries = 0;
+}
+
+void gl_sc_destroy(struct gl_shader_cache *sc)
+{
+ if (!sc)
+ return;
+ gl_sc_reset(sc);
+ sc_flush_cache(sc);
+ talloc_free(sc);
+}
+
+bool gl_sc_error_state(struct gl_shader_cache *sc)
+{
+ return sc->error_state;
+}
+
+void gl_sc_reset_error(struct gl_shader_cache *sc)
+{
+ sc->error_state = false;
+}
+
+void gl_sc_enable_extension(struct gl_shader_cache *sc, char *name)
+{
+ for (int n = 0; n < sc->num_exts; n++) {
+ if (strcmp(sc->exts[n], name) == 0)
+ return;
+ }
+ MP_TARRAY_APPEND(sc, sc->exts, sc->num_exts, talloc_strdup(sc, name));
+}
+
+#define bstr_xappend0(sc, b, s) bstr_xappend(sc, b, bstr0(s))
+
+void gl_sc_add(struct gl_shader_cache *sc, const char *text)
+{
+ bstr_xappend0(sc, &sc->text, text);
+}
+
+void gl_sc_addf(struct gl_shader_cache *sc, const char *textf, ...)
+{
+ va_list ap;
+ va_start(ap, textf);
+ bstr_xappend_vasprintf(sc, &sc->text, textf, ap);
+ va_end(ap);
+}
+
+void gl_sc_hadd(struct gl_shader_cache *sc, const char *text)
+{
+ bstr_xappend0(sc, &sc->header_text, text);
+}
+
+void gl_sc_haddf(struct gl_shader_cache *sc, const char *textf, ...)
+{
+ va_list ap;
+ va_start(ap, textf);
+ bstr_xappend_vasprintf(sc, &sc->header_text, textf, ap);
+ va_end(ap);
+}
+
+void gl_sc_hadd_bstr(struct gl_shader_cache *sc, struct bstr text)
+{
+ bstr_xappend(sc, &sc->header_text, text);
+}
+
+void gl_sc_paddf(struct gl_shader_cache *sc, const char *textf, ...)
+{
+ va_list ap;
+ va_start(ap, textf);
+ bstr_xappend_vasprintf(sc, &sc->prelude_text, textf, ap);
+ va_end(ap);
+}
+
+static struct sc_uniform *find_uniform(struct gl_shader_cache *sc,
+ const char *name)
+{
+ struct sc_uniform new = {
+ .input = {
+ .dim_v = 1,
+ .dim_m = 1,
+ },
+ };
+
+ for (int n = 0; n < sc->num_uniforms; n++) {
+ struct sc_uniform *u = &sc->uniforms[n];
+ if (strcmp(u->input.name, name) == 0) {
+ const char *allocname = u->input.name;
+ *u = new;
+ u->input.name = allocname;
+ return u;
+ }
+ }
+
+ // not found -> add it
+ new.input.name = talloc_strdup(NULL, name);
+ MP_TARRAY_APPEND(sc, sc->uniforms, sc->num_uniforms, new);
+ return &sc->uniforms[sc->num_uniforms - 1];
+}
+
+static int gl_sc_next_binding(struct gl_shader_cache *sc, enum ra_vartype type)
+{
+ return sc->next_binding[sc->ra->fns->desc_namespace(sc->ra, type)]++;
+}
+
+void gl_sc_uniform_dynamic(struct gl_shader_cache *sc)
+{
+ sc->next_uniform_dynamic = true;
+}
+
+// Updates the metadata for the given sc_uniform. Assumes sc_uniform->input
+// and glsl_type/buffer_format are already set.
+static void update_uniform_params(struct gl_shader_cache *sc, struct sc_uniform *u)
+{
+ bool dynamic = sc->next_uniform_dynamic;
+ sc->next_uniform_dynamic = false;
+
+ // Try not using push constants for "large" values like matrices, since
+ // this is likely to both exceed the VGPR budget as well as the pushc size
+ // budget
+ bool try_pushc = u->input.dim_m == 1 || dynamic;
+
+ // Attempt using push constants first
+ if (try_pushc && sc->ra->glsl_vulkan && sc->ra->max_pushc_size) {
+ struct ra_layout layout = sc->ra->fns->push_constant_layout(&u->input);
+ size_t offset = MP_ALIGN_UP(sc->pushc_size, layout.align);
+ // Push constants have limited size, so make sure we don't exceed this
+ size_t new_size = offset + layout.size;
+ if (new_size <= sc->ra->max_pushc_size) {
+ u->type = SC_UNIFORM_TYPE_PUSHC;
+ u->layout = layout;
+ u->offset = offset;
+ sc->pushc_size = new_size;
+ return;
+ }
+ }
+
+ // Attempt using uniform buffer next. The GLSL version 440 check is due
+ // to explicit offsets on UBO entries. In theory we could leave away
+ // the offsets and support UBOs for older GL as well, but this is a nice
+ // safety net for driver bugs (and also rules out potentially buggy drivers)
+ // Also avoid UBOs for highly dynamic stuff since that requires synchronizing
+ // the UBO writes every frame
+ bool try_ubo = !(sc->ra->caps & RA_CAP_GLOBAL_UNIFORM) || !dynamic;
+ if (try_ubo && sc->ra->glsl_version >= 440 && (sc->ra->caps & RA_CAP_BUF_RO)) {
+ u->type = SC_UNIFORM_TYPE_UBO;
+ u->layout = sc->ra->fns->uniform_layout(&u->input);
+ u->offset = MP_ALIGN_UP(sc->ubo_size, u->layout.align);
+ sc->ubo_size = u->offset + u->layout.size;
+ return;
+ }
+
+ // If all else fails, use global uniforms
+ assert(sc->ra->caps & RA_CAP_GLOBAL_UNIFORM);
+ u->type = SC_UNIFORM_TYPE_GLOBAL;
+}
+
+void gl_sc_uniform_texture(struct gl_shader_cache *sc, char *name,
+ struct ra_tex *tex)
+{
+ const char *glsl_type = "sampler2D";
+ if (tex->params.dimensions == 1) {
+ glsl_type = "sampler1D";
+ } else if (tex->params.dimensions == 3) {
+ glsl_type = "sampler3D";
+ } else if (tex->params.non_normalized) {
+ glsl_type = "sampler2DRect";
+ } else if (tex->params.external_oes) {
+ glsl_type = "samplerExternalOES";
+ } else if (tex->params.format->ctype == RA_CTYPE_UINT) {
+ glsl_type = sc->ra->glsl_es ? "highp usampler2D" : "usampler2D";
+ }
+
+ struct sc_uniform *u = find_uniform(sc, name);
+ u->input.type = RA_VARTYPE_TEX;
+ u->glsl_type = glsl_type;
+ u->input.binding = gl_sc_next_binding(sc, u->input.type);
+ u->v.tex = tex;
+}
+
+void gl_sc_uniform_image2D_wo(struct gl_shader_cache *sc, const char *name,
+ struct ra_tex *tex)
+{
+ gl_sc_enable_extension(sc, "GL_ARB_shader_image_load_store");
+
+ struct sc_uniform *u = find_uniform(sc, name);
+ u->input.type = RA_VARTYPE_IMG_W;
+ u->glsl_type = sc->ra->glsl_es ? "writeonly highp image2D" : "writeonly image2D";
+ u->input.binding = gl_sc_next_binding(sc, u->input.type);
+ u->v.tex = tex;
+}
+
+void gl_sc_ssbo(struct gl_shader_cache *sc, char *name, struct ra_buf *buf,
+ char *format, ...)
+{
+ assert(sc->ra->caps & RA_CAP_BUF_RW);
+ gl_sc_enable_extension(sc, "GL_ARB_shader_storage_buffer_object");
+
+ struct sc_uniform *u = find_uniform(sc, name);
+ u->input.type = RA_VARTYPE_BUF_RW;
+ u->glsl_type = "";
+ u->input.binding = gl_sc_next_binding(sc, u->input.type);
+ u->v.buf = buf;
+
+ va_list ap;
+ va_start(ap, format);
+ u->buffer_format = ta_vasprintf(sc, format, ap);
+ va_end(ap);
+}
+
+void gl_sc_uniform_f(struct gl_shader_cache *sc, char *name, float f)
+{
+ struct sc_uniform *u = find_uniform(sc, name);
+ u->input.type = RA_VARTYPE_FLOAT;
+ u->glsl_type = "float";
+ update_uniform_params(sc, u);
+ u->v.f[0] = f;
+}
+
+void gl_sc_uniform_i(struct gl_shader_cache *sc, char *name, int i)
+{
+ struct sc_uniform *u = find_uniform(sc, name);
+ u->input.type = RA_VARTYPE_INT;
+ u->glsl_type = "int";
+ update_uniform_params(sc, u);
+ u->v.i[0] = i;
+}
+
+void gl_sc_uniform_vec2(struct gl_shader_cache *sc, char *name, float f[2])
+{
+ struct sc_uniform *u = find_uniform(sc, name);
+ u->input.type = RA_VARTYPE_FLOAT;
+ u->input.dim_v = 2;
+ u->glsl_type = "vec2";
+ update_uniform_params(sc, u);
+ u->v.f[0] = f[0];
+ u->v.f[1] = f[1];
+}
+
+void gl_sc_uniform_vec3(struct gl_shader_cache *sc, char *name, float f[3])
+{
+ struct sc_uniform *u = find_uniform(sc, name);
+ u->input.type = RA_VARTYPE_FLOAT;
+ u->input.dim_v = 3;
+ u->glsl_type = "vec3";
+ update_uniform_params(sc, u);
+ u->v.f[0] = f[0];
+ u->v.f[1] = f[1];
+ u->v.f[2] = f[2];
+}
+
+static void transpose2x2(float r[2 * 2])
+{
+ MPSWAP(float, r[0+2*1], r[1+2*0]);
+}
+
+void gl_sc_uniform_mat2(struct gl_shader_cache *sc, char *name,
+ bool transpose, float *v)
+{
+ struct sc_uniform *u = find_uniform(sc, name);
+ u->input.type = RA_VARTYPE_FLOAT;
+ u->input.dim_v = 2;
+ u->input.dim_m = 2;
+ u->glsl_type = "mat2";
+ update_uniform_params(sc, u);
+ for (int n = 0; n < 4; n++)
+ u->v.f[n] = v[n];
+ if (transpose)
+ transpose2x2(&u->v.f[0]);
+}
+
+static void transpose3x3(float r[3 * 3])
+{
+ MPSWAP(float, r[0+3*1], r[1+3*0]);
+ MPSWAP(float, r[0+3*2], r[2+3*0]);
+ MPSWAP(float, r[1+3*2], r[2+3*1]);
+}
+
+void gl_sc_uniform_mat3(struct gl_shader_cache *sc, char *name,
+ bool transpose, float *v)
+{
+ struct sc_uniform *u = find_uniform(sc, name);
+ u->input.type = RA_VARTYPE_FLOAT;
+ u->input.dim_v = 3;
+ u->input.dim_m = 3;
+ u->glsl_type = "mat3";
+ update_uniform_params(sc, u);
+ for (int n = 0; n < 9; n++)
+ u->v.f[n] = v[n];
+ if (transpose)
+ transpose3x3(&u->v.f[0]);
+}
+
+void gl_sc_blend(struct gl_shader_cache *sc,
+ enum ra_blend blend_src_rgb,
+ enum ra_blend blend_dst_rgb,
+ enum ra_blend blend_src_alpha,
+ enum ra_blend blend_dst_alpha)
+{
+ sc->params.enable_blend = true;
+ sc->params.blend_src_rgb = blend_src_rgb;
+ sc->params.blend_dst_rgb = blend_dst_rgb;
+ sc->params.blend_src_alpha = blend_src_alpha;
+ sc->params.blend_dst_alpha = blend_dst_alpha;
+}
+
+const char *gl_sc_bvec(struct gl_shader_cache *sc, int dims)
+{
+ static const char *bvecs[] = {
+ [1] = "bool",
+ [2] = "bvec2",
+ [3] = "bvec3",
+ [4] = "bvec4",
+ };
+
+ static const char *vecs[] = {
+ [1] = "float",
+ [2] = "vec2",
+ [3] = "vec3",
+ [4] = "vec4",
+ };
+
+ assert(dims > 0 && dims < MP_ARRAY_SIZE(bvecs));
+ return sc->ra->glsl_version >= 130 ? bvecs[dims] : vecs[dims];
+}
+
+static const char *vao_glsl_type(const struct ra_renderpass_input *e)
+{
+ // pretty dumb... too dumb, but works for us
+ switch (e->dim_v) {
+ case 1: return "float";
+ case 2: return "vec2";
+ case 3: return "vec3";
+ case 4: return "vec4";
+ default: MP_ASSERT_UNREACHABLE();
+ }
+}
+
+static void update_ubo(struct ra *ra, struct ra_buf *ubo, struct sc_uniform *u)
+{
+ uintptr_t src = (uintptr_t) &u->v;
+ size_t dst = u->offset;
+ struct ra_layout src_layout = ra_renderpass_input_layout(&u->input);
+ struct ra_layout dst_layout = u->layout;
+
+ for (int i = 0; i < u->input.dim_m; i++) {
+ ra->fns->buf_update(ra, ubo, dst, (void *)src, src_layout.stride);
+ src += src_layout.stride;
+ dst += dst_layout.stride;
+ }
+}
+
+static void update_pushc(struct ra *ra, void *pushc, struct sc_uniform *u)
+{
+ uintptr_t src = (uintptr_t) &u->v;
+ uintptr_t dst = (uintptr_t) pushc + (ptrdiff_t) u->offset;
+ struct ra_layout src_layout = ra_renderpass_input_layout(&u->input);
+ struct ra_layout dst_layout = u->layout;
+
+ for (int i = 0; i < u->input.dim_m; i++) {
+ memcpy((void *)dst, (void *)src, src_layout.stride);
+ src += src_layout.stride;
+ dst += dst_layout.stride;
+ }
+}
+
+static void update_uniform(struct gl_shader_cache *sc, struct sc_entry *e,
+ struct sc_uniform *u, int n)
+{
+ struct sc_cached_uniform *un = &e->cached_uniforms[n];
+ struct ra_layout layout = ra_renderpass_input_layout(&u->input);
+ if (layout.size > 0 && un->set && memcmp(&un->v, &u->v, layout.size) == 0)
+ return;
+
+ un->v = u->v;
+ un->set = true;
+
+ static const char *desc[] = {
+ [SC_UNIFORM_TYPE_UBO] = "UBO",
+ [SC_UNIFORM_TYPE_PUSHC] = "PC",
+ [SC_UNIFORM_TYPE_GLOBAL] = "global",
+ };
+ MP_TRACE(sc, "Updating %s uniform '%s'\n", desc[u->type], u->input.name);
+
+ switch (u->type) {
+ case SC_UNIFORM_TYPE_GLOBAL: {
+ struct ra_renderpass_input_val value = {
+ .index = un->index,
+ .data = &un->v,
+ };
+ MP_TARRAY_APPEND(sc, sc->values, sc->num_values, value);
+ break;
+ }
+ case SC_UNIFORM_TYPE_UBO:
+ assert(e->ubo);
+ update_ubo(sc->ra, e->ubo, u);
+ break;
+ case SC_UNIFORM_TYPE_PUSHC:
+ assert(e->pushc);
+ update_pushc(sc->ra, e->pushc, u);
+ break;
+ default: MP_ASSERT_UNREACHABLE();
+ }
+}
+
+void gl_sc_set_cache_dir(struct gl_shader_cache *sc, char *dir)
+{
+ talloc_free(sc->cache_dir);
+ if (dir && dir[0]) {
+ dir = mp_get_user_path(NULL, sc->global, dir);
+ } else {
+ dir = mp_find_user_file(NULL, sc->global, "cache", "");
+ }
+ sc->cache_dir = talloc_strdup(sc, dir);
+ talloc_free(dir);
+}
+
+static bool create_pass(struct gl_shader_cache *sc, struct sc_entry *entry)
+{
+ bool ret = false;
+
+ void *tmp = talloc_new(NULL);
+ struct ra_renderpass_params params = sc->params;
+
+ const char *cache_header = "mpv shader cache v1\n";
+ char *cache_filename = NULL;
+ char *cache_dir = NULL;
+
+ if (sc->cache_dir && sc->cache_dir[0]) {
+ // Try to load it from a disk cache.
+ cache_dir = mp_get_user_path(tmp, sc->global, sc->cache_dir);
+
+ struct AVSHA *sha = av_sha_alloc();
+ MP_HANDLE_OOM(sha);
+ av_sha_init(sha, 256);
+ av_sha_update(sha, entry->total.start, entry->total.len);
+
+ uint8_t hash[256 / 8];
+ av_sha_final(sha, hash);
+ av_free(sha);
+
+ char hashstr[256 / 8 * 2 + 1];
+ for (int n = 0; n < 256 / 8; n++)
+ snprintf(hashstr + n * 2, sizeof(hashstr) - n * 2, "%02X", hash[n]);
+
+ cache_filename = mp_path_join(tmp, cache_dir, hashstr);
+ if (stat(cache_filename, &(struct stat){0}) == 0) {
+ MP_DBG(sc, "Trying to load shader from disk...\n");
+ struct bstr cachedata =
+ stream_read_file(cache_filename, tmp, sc->global, 1000000000);
+ if (bstr_eatstart0(&cachedata, cache_header))
+ params.cached_program = cachedata;
+ }
+ }
+
+ // If using a UBO, also make sure to add it as an input value so the RA
+ // can see it
+ if (sc->ubo_size) {
+ entry->ubo_index = sc->params.num_inputs;
+ struct ra_renderpass_input ubo_input = {
+ .name = "UBO",
+ .type = RA_VARTYPE_BUF_RO,
+ .dim_v = 1,
+ .dim_m = 1,
+ .binding = sc->ubo_binding,
+ };
+ MP_TARRAY_APPEND(sc, params.inputs, params.num_inputs, ubo_input);
+ }
+
+ if (sc->pushc_size) {
+ params.push_constants_size = MP_ALIGN_UP(sc->pushc_size, 4);
+ entry->pushc = talloc_zero_size(entry, params.push_constants_size);
+ }
+
+ if (sc->ubo_size) {
+ struct ra_buf_params ubo_params = {
+ .type = RA_BUF_TYPE_UNIFORM,
+ .size = sc->ubo_size,
+ .host_mutable = true,
+ };
+
+ entry->ubo = ra_buf_create(sc->ra, &ubo_params);
+ if (!entry->ubo) {
+ MP_ERR(sc, "Failed creating uniform buffer!\n");
+ goto error;
+ }
+ }
+
+ entry->pass = sc->ra->fns->renderpass_create(sc->ra, &params);
+ if (!entry->pass)
+ goto error;
+
+ if (entry->pass && cache_filename) {
+ bstr nc = entry->pass->params.cached_program;
+ if (nc.len && !bstr_equals(params.cached_program, nc)) {
+ mp_mkdirp(cache_dir);
+
+ MP_DBG(sc, "Writing shader cache file: %s\n", cache_filename);
+ FILE *out = fopen(cache_filename, "wb");
+ if (out) {
+ fwrite(cache_header, strlen(cache_header), 1, out);
+ fwrite(nc.start, nc.len, 1, out);
+ fclose(out);
+ }
+ }
+ }
+
+ ret = true;
+
+error:
+ talloc_free(tmp);
+ return ret;
+}
+
+#define ADD(x, ...) bstr_xappend_asprintf(sc, (x), __VA_ARGS__)
+#define ADD_BSTR(x, s) bstr_xappend(sc, (x), (s))
+
+static void add_uniforms(struct gl_shader_cache *sc, bstr *dst)
+{
+ // Add all of the UBO entries separately as members of their own buffer
+ if (sc->ubo_size > 0) {
+ ADD(dst, "layout(std140, binding=%d) uniform UBO {\n", sc->ubo_binding);
+ for (int n = 0; n < sc->num_uniforms; n++) {
+ struct sc_uniform *u = &sc->uniforms[n];
+ if (u->type != SC_UNIFORM_TYPE_UBO)
+ continue;
+ ADD(dst, "layout(offset=%zu) %s %s;\n", u->offset, u->glsl_type,
+ u->input.name);
+ }
+ ADD(dst, "};\n");
+ }
+
+ // Ditto for push constants
+ if (sc->pushc_size > 0) {
+ ADD(dst, "layout(std430, push_constant) uniform PushC {\n");
+ for (int n = 0; n < sc->num_uniforms; n++) {
+ struct sc_uniform *u = &sc->uniforms[n];
+ if (u->type != SC_UNIFORM_TYPE_PUSHC)
+ continue;
+ ADD(dst, "layout(offset=%zu) %s %s;\n", u->offset, u->glsl_type,
+ u->input.name);
+ }
+ ADD(dst, "};\n");
+ }
+
+ for (int n = 0; n < sc->num_uniforms; n++) {
+ struct sc_uniform *u = &sc->uniforms[n];
+ if (u->type != SC_UNIFORM_TYPE_GLOBAL)
+ continue;
+ switch (u->input.type) {
+ case RA_VARTYPE_INT:
+ case RA_VARTYPE_FLOAT:
+ assert(sc->ra->caps & RA_CAP_GLOBAL_UNIFORM);
+ MP_FALLTHROUGH;
+ case RA_VARTYPE_TEX:
+ // Vulkan requires explicitly assigning the bindings in the shader
+ // source. For OpenGL it's optional, but requires higher GL version
+ // so we don't do it (and instead have ra_gl update the bindings
+ // after program creation).
+ if (sc->ra->glsl_vulkan)
+ ADD(dst, "layout(binding=%d) ", u->input.binding);
+ ADD(dst, "uniform %s %s;\n", u->glsl_type, u->input.name);
+ break;
+ case RA_VARTYPE_BUF_RO:
+ ADD(dst, "layout(std140, binding=%d) uniform %s { %s };\n",
+ u->input.binding, u->input.name, u->buffer_format);
+ break;
+ case RA_VARTYPE_BUF_RW:
+ ADD(dst, "layout(std430, binding=%d) restrict coherent buffer %s { %s };\n",
+ u->input.binding, u->input.name, u->buffer_format);
+ break;
+ case RA_VARTYPE_IMG_W: {
+ // For better compatibility, we have to explicitly label the
+ // type of data we will be reading/writing to this image.
+ const char *fmt = u->v.tex->params.format->glsl_format;
+
+ if (sc->ra->glsl_vulkan) {
+ if (fmt) {
+ ADD(dst, "layout(binding=%d, %s) ", u->input.binding, fmt);
+ } else {
+ ADD(dst, "layout(binding=%d) ", u->input.binding);
+ }
+ } else if (fmt) {
+ ADD(dst, "layout(%s) ", fmt);
+ }
+ ADD(dst, "uniform restrict %s %s;\n", u->glsl_type, u->input.name);
+ }
+ }
+ }
+}
+
+// 1. Generate vertex and fragment shaders from the fragment shader text added
+// with gl_sc_add(). The generated shader program is cached (based on the
+// text), so actual compilation happens only the first time.
+// 2. Update the uniforms and textures set with gl_sc_uniform_*.
+// 3. Make the new shader program current (glUseProgram()).
+// After that, you render, and then you call gc_sc_reset(), which does:
+// 1. Unbind the program and all textures.
+// 2. Reset the sc state and prepare for a new shader program. (All uniforms
+// and fragment operations needed for the next program have to be re-added.)
+static void gl_sc_generate(struct gl_shader_cache *sc,
+ enum ra_renderpass_type type,
+ const struct ra_format *target_format,
+ const struct ra_renderpass_input *vao,
+ int vao_len, size_t vertex_stride)
+{
+ int glsl_version = sc->ra->glsl_version;
+ int glsl_es = sc->ra->glsl_es ? glsl_version : 0;
+
+ sc->params.type = type;
+
+ // gl_sc_reset() must be called after ending the previous render process,
+ // and before starting a new one.
+ assert(!sc->needs_reset);
+ sc->needs_reset = true;
+
+ // If using a UBO, pick a binding (needed for shader generation)
+ if (sc->ubo_size)
+ sc->ubo_binding = gl_sc_next_binding(sc, RA_VARTYPE_BUF_RO);
+
+ for (int n = 0; n < MP_ARRAY_SIZE(sc->tmp); n++)
+ sc->tmp[n].len = 0;
+
+ // set up shader text (header + uniforms + body)
+ bstr *header = &sc->tmp[0];
+ ADD(header, "#version %d%s\n", glsl_version, glsl_es >= 300 ? " es" : "");
+ if (type == RA_RENDERPASS_TYPE_COMPUTE) {
+ // This extension cannot be enabled in fragment shader. Enable it as
+ // an exception for compute shader.
+ ADD(header, "#extension GL_ARB_compute_shader : enable\n");
+ }
+ for (int n = 0; n < sc->num_exts; n++)
+ ADD(header, "#extension %s : enable\n", sc->exts[n]);
+ if (glsl_es) {
+ ADD(header, "#ifdef GL_FRAGMENT_PRECISION_HIGH\n");
+ ADD(header, "precision highp float;\n");
+ ADD(header, "#else\n");
+ ADD(header, "precision mediump float;\n");
+ ADD(header, "#endif\n");
+
+ ADD(header, "precision mediump sampler2D;\n");
+ if (sc->ra->caps & RA_CAP_TEX_3D)
+ ADD(header, "precision mediump sampler3D;\n");
+ }
+
+ if (glsl_version >= 130) {
+ ADD(header, "#define tex1D texture\n");
+ ADD(header, "#define tex3D texture\n");
+ } else {
+ ADD(header, "#define tex1D texture1D\n");
+ ADD(header, "#define tex3D texture3D\n");
+ ADD(header, "#define texture texture2D\n");
+ }
+
+ // Additional helpers.
+ ADD(header, "#define LUT_POS(x, lut_size)"
+ " mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n");
+
+ char *vert_in = glsl_version >= 130 ? "in" : "attribute";
+ char *vert_out = glsl_version >= 130 ? "out" : "varying";
+ char *frag_in = glsl_version >= 130 ? "in" : "varying";
+
+ struct bstr *vert = NULL, *frag = NULL, *comp = NULL;
+
+ if (type == RA_RENDERPASS_TYPE_RASTER) {
+ // vertex shader: we don't use the vertex shader, so just setup a
+ // dummy, which passes through the vertex array attributes.
+ bstr *vert_head = &sc->tmp[1];
+ ADD_BSTR(vert_head, *header);
+ bstr *vert_body = &sc->tmp[2];
+ ADD(vert_body, "void main() {\n");
+ bstr *frag_vaos = &sc->tmp[3];
+ for (int n = 0; n < vao_len; n++) {
+ const struct ra_renderpass_input *e = &vao[n];
+ const char *glsl_type = vao_glsl_type(e);
+ char loc[32] = {0};
+ if (sc->ra->glsl_vulkan)
+ snprintf(loc, sizeof(loc), "layout(location=%d) ", n);
+ if (strcmp(e->name, "position") == 0) {
+ // setting raster pos. requires setting gl_Position magic variable
+ assert(e->dim_v == 2 && e->type == RA_VARTYPE_FLOAT);
+ ADD(vert_head, "%s%s vec2 vertex_position;\n", loc, vert_in);
+ ADD(vert_body, "gl_Position = vec4(vertex_position, 1.0, 1.0);\n");
+ } else {
+ ADD(vert_head, "%s%s %s vertex_%s;\n", loc, vert_in, glsl_type, e->name);
+ ADD(vert_head, "%s%s %s %s;\n", loc, vert_out, glsl_type, e->name);
+ ADD(vert_body, "%s = vertex_%s;\n", e->name, e->name);
+ ADD(frag_vaos, "%s%s %s %s;\n", loc, frag_in, glsl_type, e->name);
+ }
+ }
+ ADD(vert_body, "}\n");
+ vert = vert_head;
+ ADD_BSTR(vert, *vert_body);
+
+ // fragment shader; still requires adding used uniforms and VAO elements
+ frag = &sc->tmp[4];
+ ADD_BSTR(frag, *header);
+ if (glsl_version >= 130) {
+ ADD(frag, "%sout vec4 out_color;\n",
+ sc->ra->glsl_vulkan ? "layout(location=0) " : "");
+ }
+ ADD_BSTR(frag, *frag_vaos);
+ add_uniforms(sc, frag);
+
+ ADD_BSTR(frag, sc->prelude_text);
+ ADD_BSTR(frag, sc->header_text);
+
+ ADD(frag, "void main() {\n");
+ // we require _all_ frag shaders to write to a "vec4 color"
+ ADD(frag, "vec4 color = vec4(0.0, 0.0, 0.0, 1.0);\n");
+ ADD_BSTR(frag, sc->text);
+ if (glsl_version >= 130) {
+ ADD(frag, "out_color = color;\n");
+ } else {
+ ADD(frag, "gl_FragColor = color;\n");
+ }
+ ADD(frag, "}\n");
+
+ // We need to fix the format of the render dst at renderpass creation
+ // time
+ assert(target_format);
+ sc->params.target_format = target_format;
+ }
+
+ if (type == RA_RENDERPASS_TYPE_COMPUTE) {
+ comp = &sc->tmp[4];
+ ADD_BSTR(comp, *header);
+
+ add_uniforms(sc, comp);
+
+ ADD_BSTR(comp, sc->prelude_text);
+ ADD_BSTR(comp, sc->header_text);
+
+ ADD(comp, "void main() {\n");
+ ADD(comp, "vec4 color = vec4(0.0, 0.0, 0.0, 1.0);\n"); // convenience
+ ADD_BSTR(comp, sc->text);
+ ADD(comp, "}\n");
+ }
+
+ bstr *hash_total = &sc->tmp[5];
+
+ ADD(hash_total, "type %d\n", sc->params.type);
+
+ if (frag) {
+ ADD_BSTR(hash_total, *frag);
+ sc->params.frag_shader = frag->start;
+ }
+ ADD(hash_total, "\n");
+ if (vert) {
+ ADD_BSTR(hash_total, *vert);
+ sc->params.vertex_shader = vert->start;
+ }
+ ADD(hash_total, "\n");
+ if (comp) {
+ ADD_BSTR(hash_total, *comp);
+ sc->params.compute_shader = comp->start;
+ }
+ ADD(hash_total, "\n");
+
+ if (sc->params.enable_blend) {
+ ADD(hash_total, "blend %d %d %d %d\n",
+ sc->params.blend_src_rgb, sc->params.blend_dst_rgb,
+ sc->params.blend_src_alpha, sc->params.blend_dst_alpha);
+ }
+
+ if (sc->params.target_format)
+ ADD(hash_total, "format %s\n", sc->params.target_format->name);
+
+ struct sc_entry *entry = NULL;
+ for (int n = 0; n < sc->num_entries; n++) {
+ struct sc_entry *cur = sc->entries[n];
+ if (bstr_equals(cur->total, *hash_total)) {
+ entry = cur;
+ break;
+ }
+ }
+ if (!entry) {
+ if (sc->num_entries == SC_MAX_ENTRIES)
+ sc_flush_cache(sc);
+ entry = talloc_ptrtype(NULL, entry);
+ *entry = (struct sc_entry){
+ .total = bstrdup(entry, *hash_total),
+ .timer = timer_pool_create(sc->ra),
+ };
+
+ // The vertex shader uses mangled names for the vertex attributes, so
+ // that the fragment shader can use the "real" names. But the shader is
+ // expecting the vertex attribute names (at least with older GLSL
+ // targets for GL).
+ sc->params.vertex_stride = vertex_stride;
+ for (int n = 0; n < vao_len; n++) {
+ struct ra_renderpass_input attrib = vao[n];
+ attrib.name = talloc_asprintf(entry, "vertex_%s", attrib.name);
+ MP_TARRAY_APPEND(sc, sc->params.vertex_attribs,
+ sc->params.num_vertex_attribs, attrib);
+ }
+
+ for (int n = 0; n < sc->num_uniforms; n++) {
+ struct sc_cached_uniform u = {0};
+ if (sc->uniforms[n].type == SC_UNIFORM_TYPE_GLOBAL) {
+ // global uniforms need to be made visible to the ra_renderpass
+ u.index = sc->params.num_inputs;
+ MP_TARRAY_APPEND(sc, sc->params.inputs, sc->params.num_inputs,
+ sc->uniforms[n].input);
+ }
+ MP_TARRAY_APPEND(entry, entry->cached_uniforms,
+ entry->num_cached_uniforms, u);
+ }
+ if (!create_pass(sc, entry))
+ sc->error_state = true;
+ MP_TARRAY_APPEND(sc, sc->entries, sc->num_entries, entry);
+ }
+
+ if (!entry->pass) {
+ sc->current_shader = NULL;
+ return;
+ }
+
+ assert(sc->num_uniforms == entry->num_cached_uniforms);
+
+ sc->num_values = 0;
+ for (int n = 0; n < sc->num_uniforms; n++)
+ update_uniform(sc, entry, &sc->uniforms[n], n);
+
+ // If we're using a UBO, make sure to bind it as well
+ if (sc->ubo_size) {
+ struct ra_renderpass_input_val ubo_val = {
+ .index = entry->ubo_index,
+ .data = &entry->ubo,
+ };
+ MP_TARRAY_APPEND(sc, sc->values, sc->num_values, ubo_val);
+ }
+
+ sc->current_shader = entry;
+}
+
+struct mp_pass_perf gl_sc_dispatch_draw(struct gl_shader_cache *sc,
+ struct ra_tex *target, bool discard,
+ const struct ra_renderpass_input *vao,
+ int vao_len, size_t vertex_stride,
+ void *vertices, size_t num_vertices)
+{
+ struct timer_pool *timer = NULL;
+
+ sc->params.invalidate_target = discard;
+ gl_sc_generate(sc, RA_RENDERPASS_TYPE_RASTER, target->params.format,
+ vao, vao_len, vertex_stride);
+ if (!sc->current_shader)
+ goto error;
+
+ timer = sc->current_shader->timer;
+
+ struct mp_rect full_rc = {0, 0, target->params.w, target->params.h};
+
+ struct ra_renderpass_run_params run = {
+ .pass = sc->current_shader->pass,
+ .values = sc->values,
+ .num_values = sc->num_values,
+ .push_constants = sc->current_shader->pushc,
+ .target = target,
+ .vertex_data = vertices,
+ .vertex_count = num_vertices,
+ .viewport = full_rc,
+ .scissors = full_rc,
+ };
+
+ timer_pool_start(timer);
+ sc->ra->fns->renderpass_run(sc->ra, &run);
+ timer_pool_stop(timer);
+
+error:
+ gl_sc_reset(sc);
+ return timer_pool_measure(timer);
+}
+
+struct mp_pass_perf gl_sc_dispatch_compute(struct gl_shader_cache *sc,
+ int w, int h, int d)
+{
+ struct timer_pool *timer = NULL;
+
+ gl_sc_generate(sc, RA_RENDERPASS_TYPE_COMPUTE, NULL, NULL, 0, 0);
+ if (!sc->current_shader)
+ goto error;
+
+ timer = sc->current_shader->timer;
+
+ struct ra_renderpass_run_params run = {
+ .pass = sc->current_shader->pass,
+ .values = sc->values,
+ .num_values = sc->num_values,
+ .push_constants = sc->current_shader->pushc,
+ .compute_groups = {w, h, d},
+ };
+
+ timer_pool_start(timer);
+ sc->ra->fns->renderpass_run(sc->ra, &run);
+ timer_pool_stop(timer);
+
+error:
+ gl_sc_reset(sc);
+ return timer_pool_measure(timer);
+}
diff --git a/video/out/gpu/shader_cache.h b/video/out/gpu/shader_cache.h
new file mode 100644
index 0000000..7c51c7a
--- /dev/null
+++ b/video/out/gpu/shader_cache.h
@@ -0,0 +1,66 @@
+#pragma once
+
+#include "common/common.h"
+#include "misc/bstr.h"
+#include "ra.h"
+
+// For mp_pass_perf
+#include "video/out/vo.h"
+
+struct mp_log;
+struct mpv_global;
+struct gl_shader_cache;
+
+struct gl_shader_cache *gl_sc_create(struct ra *ra, struct mpv_global *global,
+ struct mp_log *log);
+void gl_sc_destroy(struct gl_shader_cache *sc);
+bool gl_sc_error_state(struct gl_shader_cache *sc);
+void gl_sc_reset_error(struct gl_shader_cache *sc);
+void gl_sc_add(struct gl_shader_cache *sc, const char *text);
+void gl_sc_addf(struct gl_shader_cache *sc, const char *textf, ...)
+ PRINTF_ATTRIBUTE(2, 3);
+void gl_sc_hadd(struct gl_shader_cache *sc, const char *text);
+void gl_sc_haddf(struct gl_shader_cache *sc, const char *textf, ...)
+ PRINTF_ATTRIBUTE(2, 3);
+void gl_sc_hadd_bstr(struct gl_shader_cache *sc, struct bstr text);
+void gl_sc_paddf(struct gl_shader_cache *sc, const char *textf, ...)
+ PRINTF_ATTRIBUTE(2, 3);
+
+// A hint that the next data-type (i.e. non-binding) uniform is expected to
+// change frequently. This refers to the _f, _i, _vecN etc. uniform types.
+void gl_sc_uniform_dynamic(struct gl_shader_cache *sc);
+void gl_sc_uniform_texture(struct gl_shader_cache *sc, char *name,
+ struct ra_tex *tex);
+void gl_sc_uniform_image2D_wo(struct gl_shader_cache *sc, const char *name,
+ struct ra_tex *tex);
+void gl_sc_ssbo(struct gl_shader_cache *sc, char *name, struct ra_buf *buf,
+ char *format, ...) PRINTF_ATTRIBUTE(4, 5);
+void gl_sc_uniform_f(struct gl_shader_cache *sc, char *name, float f);
+void gl_sc_uniform_i(struct gl_shader_cache *sc, char *name, int f);
+void gl_sc_uniform_vec2(struct gl_shader_cache *sc, char *name, float f[2]);
+void gl_sc_uniform_vec3(struct gl_shader_cache *sc, char *name, float f[3]);
+void gl_sc_uniform_mat2(struct gl_shader_cache *sc, char *name,
+ bool transpose, float *v);
+void gl_sc_uniform_mat3(struct gl_shader_cache *sc, char *name,
+ bool transpose, float *v);
+
+// Return the correct bvecN() variant for using mix() in this GLSL version
+const char *gl_sc_bvec(struct gl_shader_cache *sc, int dims);
+
+void gl_sc_blend(struct gl_shader_cache *sc,
+ enum ra_blend blend_src_rgb,
+ enum ra_blend blend_dst_rgb,
+ enum ra_blend blend_src_alpha,
+ enum ra_blend blend_dst_alpha);
+void gl_sc_enable_extension(struct gl_shader_cache *sc, char *name);
+struct mp_pass_perf gl_sc_dispatch_draw(struct gl_shader_cache *sc,
+ struct ra_tex *target, bool discard,
+ const struct ra_renderpass_input *vao,
+ int vao_len, size_t vertex_stride,
+ void *ptr, size_t num);
+struct mp_pass_perf gl_sc_dispatch_compute(struct gl_shader_cache *sc,
+ int w, int h, int d);
+// The application can call this on errors, to reset the current shader. This
+// is normally done implicitly by gl_sc_dispatch_*
+void gl_sc_reset(struct gl_shader_cache *sc);
+void gl_sc_set_cache_dir(struct gl_shader_cache *sc, char *dir);
diff --git a/video/out/gpu/spirv.c b/video/out/gpu/spirv.c
new file mode 100644
index 0000000..67088bc
--- /dev/null
+++ b/video/out/gpu/spirv.c
@@ -0,0 +1,70 @@
+#include "common/msg.h"
+#include "options/m_config.h"
+
+#include "spirv.h"
+#include "config.h"
+
+extern const struct spirv_compiler_fns spirv_shaderc;
+
+// in probe-order
+enum {
+ SPIRV_AUTO = 0,
+ SPIRV_SHADERC, // generally preferred, but not packaged everywhere
+};
+
+static const struct spirv_compiler_fns *compilers[] = {
+#if HAVE_SHADERC
+ [SPIRV_SHADERC] = &spirv_shaderc,
+#endif
+};
+
+static const struct m_opt_choice_alternatives compiler_choices[] = {
+ {"auto", SPIRV_AUTO},
+#if HAVE_SHADERC
+ {"shaderc", SPIRV_SHADERC},
+#endif
+ {0}
+};
+
+struct spirv_opts {
+ int compiler;
+};
+
+#define OPT_BASE_STRUCT struct spirv_opts
+const struct m_sub_options spirv_conf = {
+ .opts = (const struct m_option[]) {
+ {"spirv-compiler", OPT_CHOICE_C(compiler, compiler_choices)},
+ {0}
+ },
+ .size = sizeof(struct spirv_opts),
+};
+
+bool spirv_compiler_init(struct ra_ctx *ctx)
+{
+ void *tmp = talloc_new(NULL);
+ struct spirv_opts *opts = mp_get_config_group(tmp, ctx->global, &spirv_conf);
+ int compiler = opts->compiler;
+ talloc_free(tmp);
+
+ for (int i = SPIRV_AUTO+1; i < MP_ARRAY_SIZE(compilers); i++) {
+ if (compiler != SPIRV_AUTO && i != compiler)
+ continue;
+ if (!compilers[i])
+ continue;
+
+ ctx->spirv = talloc_zero(ctx, struct spirv_compiler);
+ ctx->spirv->log = ctx->log,
+ ctx->spirv->fns = compilers[i];
+
+ const char *name = m_opt_choice_str(compiler_choices, i);
+ strncpy(ctx->spirv->name, name, sizeof(ctx->spirv->name) - 1);
+ MP_VERBOSE(ctx, "Initializing SPIR-V compiler '%s'\n", name);
+ if (ctx->spirv->fns->init(ctx))
+ return true;
+ talloc_free(ctx->spirv);
+ ctx->spirv = NULL;
+ }
+
+ MP_ERR(ctx, "Failed initializing SPIR-V compiler!\n");
+ return false;
+}
diff --git a/video/out/gpu/spirv.h b/video/out/gpu/spirv.h
new file mode 100644
index 0000000..e3dbd4f
--- /dev/null
+++ b/video/out/gpu/spirv.h
@@ -0,0 +1,41 @@
+#pragma once
+
+#include "common/msg.h"
+#include "common/common.h"
+#include "context.h"
+
+enum glsl_shader {
+ GLSL_SHADER_VERTEX,
+ GLSL_SHADER_FRAGMENT,
+ GLSL_SHADER_COMPUTE,
+};
+
+#define SPIRV_NAME_MAX_LEN 32
+
+struct spirv_compiler {
+ char name[SPIRV_NAME_MAX_LEN];
+ const struct spirv_compiler_fns *fns;
+ struct mp_log *log;
+ void *priv;
+
+ const char *required_ext; // or NULL
+ int glsl_version; // GLSL version supported
+ int compiler_version; // for cache invalidation, may be left as 0
+ int ra_caps; // RA_CAP_* provided by this implementation, if any
+};
+
+struct spirv_compiler_fns {
+ // Compile GLSL to SPIR-V, under GL_KHR_vulkan_glsl semantics.
+ bool (*compile_glsl)(struct spirv_compiler *spirv, void *tactx,
+ enum glsl_shader type, const char *glsl,
+ struct bstr *out_spirv);
+
+ // Called by spirv_compiler_init / ra_ctx_destroy. These don't need to
+ // allocate/free ctx->spirv, that is done by the caller
+ bool (*init)(struct ra_ctx *ctx);
+ void (*uninit)(struct ra_ctx *ctx); // optional
+};
+
+// Initializes ctx->spirv to a valid SPIR-V compiler, or returns false on
+// failure. Cleanup will be handled by ra_ctx_destroy.
+bool spirv_compiler_init(struct ra_ctx *ctx);
diff --git a/video/out/gpu/spirv_shaderc.c b/video/out/gpu/spirv_shaderc.c
new file mode 100644
index 0000000..f285631
--- /dev/null
+++ b/video/out/gpu/spirv_shaderc.c
@@ -0,0 +1,125 @@
+#include "common/msg.h"
+
+#include "context.h"
+#include "spirv.h"
+
+#include <shaderc/shaderc.h>
+
+struct priv {
+ shaderc_compiler_t compiler;
+ shaderc_compile_options_t opts;
+};
+
+static void shaderc_uninit(struct ra_ctx *ctx)
+{
+ struct priv *p = ctx->spirv->priv;
+ if (!p)
+ return;
+
+ shaderc_compile_options_release(p->opts);
+ shaderc_compiler_release(p->compiler);
+}
+
+static bool shaderc_init(struct ra_ctx *ctx)
+{
+ struct priv *p = ctx->spirv->priv = talloc_zero(ctx->spirv, struct priv);
+
+ p->compiler = shaderc_compiler_initialize();
+ if (!p->compiler)
+ goto error;
+ p->opts = shaderc_compile_options_initialize();
+ if (!p->opts)
+ goto error;
+
+ shaderc_compile_options_set_optimization_level(p->opts,
+ shaderc_optimization_level_performance);
+ if (ctx->opts.debug)
+ shaderc_compile_options_set_generate_debug_info(p->opts);
+
+ int ver, rev;
+ shaderc_get_spv_version(&ver, &rev);
+ ctx->spirv->compiler_version = ver * 100 + rev; // forwards compatibility
+ ctx->spirv->glsl_version = 450; // impossible to query?
+ return true;
+
+error:
+ shaderc_uninit(ctx);
+ return false;
+}
+
+static shaderc_compilation_result_t compile(struct priv *p,
+ enum glsl_shader type,
+ const char *glsl, bool debug)
+{
+ static const shaderc_shader_kind kinds[] = {
+ [GLSL_SHADER_VERTEX] = shaderc_glsl_vertex_shader,
+ [GLSL_SHADER_FRAGMENT] = shaderc_glsl_fragment_shader,
+ [GLSL_SHADER_COMPUTE] = shaderc_glsl_compute_shader,
+ };
+
+ if (debug) {
+ return shaderc_compile_into_spv_assembly(p->compiler, glsl, strlen(glsl),
+ kinds[type], "input", "main", p->opts);
+ } else {
+ return shaderc_compile_into_spv(p->compiler, glsl, strlen(glsl),
+ kinds[type], "input", "main", p->opts);
+ }
+}
+
+static bool shaderc_compile(struct spirv_compiler *spirv, void *tactx,
+ enum glsl_shader type, const char *glsl,
+ struct bstr *out_spirv)
+{
+ struct priv *p = spirv->priv;
+
+ shaderc_compilation_result_t res = compile(p, type, glsl, false);
+ int errs = shaderc_result_get_num_errors(res),
+ warn = shaderc_result_get_num_warnings(res),
+ msgl = errs ? MSGL_ERR : warn ? MSGL_WARN : MSGL_V;
+
+ const char *msg = shaderc_result_get_error_message(res);
+ if (msg[0])
+ MP_MSG(spirv, msgl, "shaderc output:\n%s", msg);
+
+ int s = shaderc_result_get_compilation_status(res);
+ bool success = s == shaderc_compilation_status_success;
+
+ static const char *results[] = {
+ [shaderc_compilation_status_success] = "success",
+ [shaderc_compilation_status_invalid_stage] = "invalid stage",
+ [shaderc_compilation_status_compilation_error] = "error",
+ [shaderc_compilation_status_internal_error] = "internal error",
+ [shaderc_compilation_status_null_result_object] = "no result",
+ [shaderc_compilation_status_invalid_assembly] = "invalid assembly",
+ };
+
+ const char *status = s < MP_ARRAY_SIZE(results) ? results[s] : "unknown";
+ MP_MSG(spirv, msgl, "shaderc compile status '%s' (%d errors, %d warnings)\n",
+ status, errs, warn);
+
+ if (success) {
+ void *bytes = (void *) shaderc_result_get_bytes(res);
+ out_spirv->len = shaderc_result_get_length(res);
+ out_spirv->start = talloc_memdup(tactx, bytes, out_spirv->len);
+ }
+
+ // Also print SPIR-V disassembly for debugging purposes. Unfortunately
+ // there doesn't seem to be a way to get this except compiling the shader
+ // a second time..
+ if (mp_msg_test(spirv->log, MSGL_TRACE)) {
+ shaderc_compilation_result_t dis = compile(p, type, glsl, true);
+ MP_TRACE(spirv, "Generated SPIR-V:\n%.*s",
+ (int)shaderc_result_get_length(dis),
+ shaderc_result_get_bytes(dis));
+ shaderc_result_release(dis);
+ }
+
+ shaderc_result_release(res);
+ return success;
+}
+
+const struct spirv_compiler_fns spirv_shaderc = {
+ .compile_glsl = shaderc_compile,
+ .init = shaderc_init,
+ .uninit = shaderc_uninit,
+};
diff --git a/video/out/gpu/user_shaders.c b/video/out/gpu/user_shaders.c
new file mode 100644
index 0000000..708de87
--- /dev/null
+++ b/video/out/gpu/user_shaders.c
@@ -0,0 +1,463 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+#include <math.h>
+
+#include "common/msg.h"
+#include "misc/ctype.h"
+#include "user_shaders.h"
+
+static bool parse_rpn_szexpr(struct bstr line, struct szexp out[MAX_SZEXP_SIZE])
+{
+ int pos = 0;
+
+ while (line.len > 0) {
+ struct bstr word = bstr_strip(bstr_splitchar(line, &line, ' '));
+ if (word.len == 0)
+ continue;
+
+ if (pos >= MAX_SZEXP_SIZE)
+ return false;
+
+ struct szexp *exp = &out[pos++];
+
+ if (bstr_eatend0(&word, ".w") || bstr_eatend0(&word, ".width")) {
+ exp->tag = SZEXP_VAR_W;
+ exp->val.varname = word;
+ continue;
+ }
+
+ if (bstr_eatend0(&word, ".h") || bstr_eatend0(&word, ".height")) {
+ exp->tag = SZEXP_VAR_H;
+ exp->val.varname = word;
+ continue;
+ }
+
+ switch (word.start[0]) {
+ case '+': exp->tag = SZEXP_OP2; exp->val.op = SZEXP_OP_ADD; continue;
+ case '-': exp->tag = SZEXP_OP2; exp->val.op = SZEXP_OP_SUB; continue;
+ case '*': exp->tag = SZEXP_OP2; exp->val.op = SZEXP_OP_MUL; continue;
+ case '/': exp->tag = SZEXP_OP2; exp->val.op = SZEXP_OP_DIV; continue;
+ case '%': exp->tag = SZEXP_OP2; exp->val.op = SZEXP_OP_MOD; continue;
+ case '!': exp->tag = SZEXP_OP1; exp->val.op = SZEXP_OP_NOT; continue;
+ case '>': exp->tag = SZEXP_OP2; exp->val.op = SZEXP_OP_GT; continue;
+ case '<': exp->tag = SZEXP_OP2; exp->val.op = SZEXP_OP_LT; continue;
+ case '=': exp->tag = SZEXP_OP2; exp->val.op = SZEXP_OP_EQ; continue;
+ }
+
+ if (mp_isdigit(word.start[0])) {
+ exp->tag = SZEXP_CONST;
+ if (bstr_sscanf(word, "%f", &exp->val.cval) != 1)
+ return false;
+ continue;
+ }
+
+ // Some sort of illegal expression
+ return false;
+ }
+
+ return true;
+}
+
+// Returns whether successful. 'result' is left untouched on failure
+bool eval_szexpr(struct mp_log *log, void *priv,
+ bool (*lookup)(void *priv, struct bstr var, float size[2]),
+ struct szexp expr[MAX_SZEXP_SIZE], float *result)
+{
+ float stack[MAX_SZEXP_SIZE] = {0};
+ int idx = 0; // points to next element to push
+
+ for (int i = 0; i < MAX_SZEXP_SIZE; i++) {
+ switch (expr[i].tag) {
+ case SZEXP_END:
+ goto done;
+
+ case SZEXP_CONST:
+ // Since our SZEXPs are bound by MAX_SZEXP_SIZE, it should be
+ // impossible to overflow the stack
+ assert(idx < MAX_SZEXP_SIZE);
+ stack[idx++] = expr[i].val.cval;
+ continue;
+
+ case SZEXP_OP1:
+ if (idx < 1) {
+ mp_warn(log, "Stack underflow in RPN expression!\n");
+ return false;
+ }
+
+ switch (expr[i].val.op) {
+ case SZEXP_OP_NOT: stack[idx-1] = !stack[idx-1]; break;
+ default: MP_ASSERT_UNREACHABLE();
+ }
+ continue;
+
+ case SZEXP_OP2:
+ if (idx < 2) {
+ mp_warn(log, "Stack underflow in RPN expression!\n");
+ return false;
+ }
+
+ // Pop the operands in reverse order
+ float op2 = stack[--idx];
+ float op1 = stack[--idx];
+ float res = 0.0;
+ switch (expr[i].val.op) {
+ case SZEXP_OP_ADD: res = op1 + op2; break;
+ case SZEXP_OP_SUB: res = op1 - op2; break;
+ case SZEXP_OP_MUL: res = op1 * op2; break;
+ case SZEXP_OP_DIV: res = op1 / op2; break;
+ case SZEXP_OP_MOD: res = fmodf(op1, op2); break;
+ case SZEXP_OP_GT: res = op1 > op2; break;
+ case SZEXP_OP_LT: res = op1 < op2; break;
+ case SZEXP_OP_EQ: res = op1 == op2; break;
+ default: MP_ASSERT_UNREACHABLE();
+ }
+
+ if (!isfinite(res)) {
+ mp_warn(log, "Illegal operation in RPN expression!\n");
+ return false;
+ }
+
+ stack[idx++] = res;
+ continue;
+
+ case SZEXP_VAR_W:
+ case SZEXP_VAR_H: {
+ struct bstr name = expr[i].val.varname;
+ float size[2];
+
+ if (!lookup(priv, name, size)) {
+ mp_warn(log, "Variable %.*s not found in RPN expression!\n",
+ BSTR_P(name));
+ return false;
+ }
+
+ stack[idx++] = (expr[i].tag == SZEXP_VAR_W) ? size[0] : size[1];
+ continue;
+ }
+ }
+ }
+
+done:
+ // Return the single stack element
+ if (idx != 1) {
+ mp_warn(log, "Malformed stack after RPN expression!\n");
+ return false;
+ }
+
+ *result = stack[0];
+ return true;
+}
+
+static bool parse_hook(struct mp_log *log, struct bstr *body,
+ struct gl_user_shader_hook *out)
+{
+ *out = (struct gl_user_shader_hook){
+ .pass_desc = bstr0("(unknown)"),
+ .offset = identity_trans,
+ .align_offset = false,
+ .width = {{ SZEXP_VAR_W, { .varname = bstr0("HOOKED") }}},
+ .height = {{ SZEXP_VAR_H, { .varname = bstr0("HOOKED") }}},
+ .cond = {{ SZEXP_CONST, { .cval = 1.0 }}},
+ };
+
+ int hook_idx = 0;
+ int bind_idx = 0;
+
+ // Parse all headers
+ while (true) {
+ struct bstr rest;
+ struct bstr line = bstr_strip(bstr_getline(*body, &rest));
+
+ // Check for the presence of the magic line beginning
+ if (!bstr_eatstart0(&line, "//!"))
+ break;
+
+ *body = rest;
+
+ // Parse the supported commands
+ if (bstr_eatstart0(&line, "HOOK")) {
+ if (hook_idx == SHADER_MAX_HOOKS) {
+ mp_err(log, "Passes may only hook up to %d textures!\n",
+ SHADER_MAX_HOOKS);
+ return false;
+ }
+ out->hook_tex[hook_idx++] = bstr_strip(line);
+ continue;
+ }
+
+ if (bstr_eatstart0(&line, "BIND")) {
+ if (bind_idx == SHADER_MAX_BINDS) {
+ mp_err(log, "Passes may only bind up to %d textures!\n",
+ SHADER_MAX_BINDS);
+ return false;
+ }
+ out->bind_tex[bind_idx++] = bstr_strip(line);
+ continue;
+ }
+
+ if (bstr_eatstart0(&line, "SAVE")) {
+ out->save_tex = bstr_strip(line);
+ continue;
+ }
+
+ if (bstr_eatstart0(&line, "DESC")) {
+ out->pass_desc = bstr_strip(line);
+ continue;
+ }
+
+ if (bstr_eatstart0(&line, "OFFSET")) {
+ line = bstr_strip(line);
+ if (bstr_equals0(line, "ALIGN")) {
+ out->align_offset = true;
+ } else {
+ float ox, oy;
+ if (bstr_sscanf(line, "%f %f", &ox, &oy) != 2) {
+ mp_err(log, "Error while parsing OFFSET!\n");
+ return false;
+ }
+ out->offset.t[0] = ox;
+ out->offset.t[1] = oy;
+ }
+ continue;
+ }
+
+ if (bstr_eatstart0(&line, "WIDTH")) {
+ if (!parse_rpn_szexpr(line, out->width)) {
+ mp_err(log, "Error while parsing WIDTH!\n");
+ return false;
+ }
+ continue;
+ }
+
+ if (bstr_eatstart0(&line, "HEIGHT")) {
+ if (!parse_rpn_szexpr(line, out->height)) {
+ mp_err(log, "Error while parsing HEIGHT!\n");
+ return false;
+ }
+ continue;
+ }
+
+ if (bstr_eatstart0(&line, "WHEN")) {
+ if (!parse_rpn_szexpr(line, out->cond)) {
+ mp_err(log, "Error while parsing WHEN!\n");
+ return false;
+ }
+ continue;
+ }
+
+ if (bstr_eatstart0(&line, "COMPONENTS")) {
+ if (bstr_sscanf(line, "%d", &out->components) != 1) {
+ mp_err(log, "Error while parsing COMPONENTS!\n");
+ return false;
+ }
+ continue;
+ }
+
+ if (bstr_eatstart0(&line, "COMPUTE")) {
+ struct compute_info *ci = &out->compute;
+ int num = bstr_sscanf(line, "%d %d %d %d", &ci->block_w, &ci->block_h,
+ &ci->threads_w, &ci->threads_h);
+
+ if (num == 2 || num == 4) {
+ ci->active = true;
+ ci->directly_writes = true;
+ } else {
+ mp_err(log, "Error while parsing COMPUTE!\n");
+ return false;
+ }
+ continue;
+ }
+
+ // Unknown command type
+ mp_err(log, "Unrecognized command '%.*s'!\n", BSTR_P(line));
+ return false;
+ }
+
+ // The rest of the file up until the next magic line beginning (if any)
+ // shall be the shader body
+ if (bstr_split_tok(*body, "//!", &out->pass_body, body)) {
+ // Make sure the magic line is part of the rest
+ body->start -= 3;
+ body->len += 3;
+ }
+
+ // Sanity checking
+ if (hook_idx == 0)
+ mp_warn(log, "Pass has no hooked textures (will be ignored)!\n");
+
+ return true;
+}
+
+static bool parse_tex(struct mp_log *log, struct ra *ra, struct bstr *body,
+ struct gl_user_shader_tex *out)
+{
+ *out = (struct gl_user_shader_tex){
+ .name = bstr0("USER_TEX"),
+ .params = {
+ .dimensions = 2,
+ .w = 1, .h = 1, .d = 1,
+ .render_src = true,
+ .src_linear = true,
+ },
+ };
+ struct ra_tex_params *p = &out->params;
+
+ while (true) {
+ struct bstr rest;
+ struct bstr line = bstr_strip(bstr_getline(*body, &rest));
+
+ if (!bstr_eatstart0(&line, "//!"))
+ break;
+
+ *body = rest;
+
+ if (bstr_eatstart0(&line, "TEXTURE")) {
+ out->name = bstr_strip(line);
+ continue;
+ }
+
+ if (bstr_eatstart0(&line, "SIZE")) {
+ p->dimensions = bstr_sscanf(line, "%d %d %d", &p->w, &p->h, &p->d);
+ if (p->dimensions < 1 || p->dimensions > 3 ||
+ p->w < 1 || p->h < 1 || p->d < 1)
+ {
+ mp_err(log, "Error while parsing SIZE!\n");
+ return false;
+ }
+ continue;
+ }
+
+ if (bstr_eatstart0(&line, "FORMAT ")) {
+ p->format = NULL;
+ for (int n = 0; n < ra->num_formats; n++) {
+ const struct ra_format *fmt = ra->formats[n];
+ if (bstr_equals0(line, fmt->name)) {
+ p->format = fmt;
+ break;
+ }
+ }
+ // (pixel_size==0 is for opaque formats)
+ if (!p->format || !p->format->pixel_size) {
+ mp_err(log, "Unrecognized/unavailable FORMAT name: '%.*s'!\n",
+ BSTR_P(line));
+ return false;
+ }
+ continue;
+ }
+
+ if (bstr_eatstart0(&line, "FILTER")) {
+ line = bstr_strip(line);
+ if (bstr_equals0(line, "LINEAR")) {
+ p->src_linear = true;
+ } else if (bstr_equals0(line, "NEAREST")) {
+ p->src_linear = false;
+ } else {
+ mp_err(log, "Unrecognized FILTER: '%.*s'!\n", BSTR_P(line));
+ return false;
+ }
+ continue;
+ }
+
+ if (bstr_eatstart0(&line, "BORDER")) {
+ line = bstr_strip(line);
+ if (bstr_equals0(line, "CLAMP")) {
+ p->src_repeat = false;
+ } else if (bstr_equals0(line, "REPEAT")) {
+ p->src_repeat = true;
+ } else {
+ mp_err(log, "Unrecognized BORDER: '%.*s'!\n", BSTR_P(line));
+ return false;
+ }
+ continue;
+ }
+
+ mp_err(log, "Unrecognized command '%.*s'!\n", BSTR_P(line));
+ return false;
+ }
+
+ if (!p->format) {
+ mp_err(log, "No FORMAT specified.\n");
+ return false;
+ }
+
+ if (p->src_linear && !p->format->linear_filter) {
+ mp_err(log, "The specified texture format cannot be filtered!\n");
+ return false;
+ }
+
+ // Decode the rest of the section (up to the next //! marker) as raw hex
+ // data for the texture
+ struct bstr hexdata;
+ if (bstr_split_tok(*body, "//!", &hexdata, body)) {
+ // Make sure the magic line is part of the rest
+ body->start -= 3;
+ body->len += 3;
+ }
+
+ struct bstr tex;
+ if (!bstr_decode_hex(NULL, bstr_strip(hexdata), &tex)) {
+ mp_err(log, "Error while parsing TEXTURE body: must be a valid "
+ "hexadecimal sequence, on a single line!\n");
+ return false;
+ }
+
+ int expected_len = p->w * p->h * p->d * p->format->pixel_size;
+ if (tex.len != expected_len) {
+ mp_err(log, "Shader TEXTURE size mismatch: got %zd bytes, expected %d!\n",
+ tex.len, expected_len);
+ talloc_free(tex.start);
+ return false;
+ }
+
+ p->initial_data = tex.start;
+ return true;
+}
+
+void parse_user_shader(struct mp_log *log, struct ra *ra, struct bstr shader,
+ void *priv,
+ bool (*dohook)(void *p, struct gl_user_shader_hook hook),
+ bool (*dotex)(void *p, struct gl_user_shader_tex tex))
+{
+ if (!dohook || !dotex || !shader.len)
+ return;
+
+ // Skip all garbage (e.g. comments) before the first header
+ int pos = bstr_find(shader, bstr0("//!"));
+ if (pos < 0) {
+ mp_warn(log, "Shader appears to contain no headers!\n");
+ return;
+ }
+ shader = bstr_cut(shader, pos);
+
+ // Loop over the file
+ while (shader.len > 0)
+ {
+ // Peek at the first header to dispatch the right type
+ if (bstr_startswith0(shader, "//!TEXTURE")) {
+ struct gl_user_shader_tex t;
+ if (!parse_tex(log, ra, &shader, &t) || !dotex(priv, t))
+ return;
+ continue;
+ }
+
+ struct gl_user_shader_hook h;
+ if (!parse_hook(log, &shader, &h) || !dohook(priv, h))
+ return;
+ }
+}
diff --git a/video/out/gpu/user_shaders.h b/video/out/gpu/user_shaders.h
new file mode 100644
index 0000000..4bb7c22
--- /dev/null
+++ b/video/out/gpu/user_shaders.h
@@ -0,0 +1,99 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MP_GL_USER_SHADERS_H
+#define MP_GL_USER_SHADERS_H
+
+#include "utils.h"
+#include "ra.h"
+
+#define SHADER_MAX_HOOKS 16
+#define SHADER_MAX_BINDS 16
+#define MAX_SZEXP_SIZE 32
+
+enum szexp_op {
+ SZEXP_OP_ADD,
+ SZEXP_OP_SUB,
+ SZEXP_OP_MUL,
+ SZEXP_OP_DIV,
+ SZEXP_OP_MOD,
+ SZEXP_OP_NOT,
+ SZEXP_OP_GT,
+ SZEXP_OP_LT,
+ SZEXP_OP_EQ,
+};
+
+enum szexp_tag {
+ SZEXP_END = 0, // End of an RPN expression
+ SZEXP_CONST, // Push a constant value onto the stack
+ SZEXP_VAR_W, // Get the width/height of a named texture (variable)
+ SZEXP_VAR_H,
+ SZEXP_OP2, // Pop two elements and push the result of a dyadic operation
+ SZEXP_OP1, // Pop one element and push the result of a monadic operation
+};
+
+struct szexp {
+ enum szexp_tag tag;
+ union {
+ float cval;
+ struct bstr varname;
+ enum szexp_op op;
+ } val;
+};
+
+struct compute_info {
+ bool active;
+ int block_w, block_h; // Block size (each block corresponds to one WG)
+ int threads_w, threads_h; // How many threads form a working group
+ bool directly_writes; // If true, shader is assumed to imageStore(out_image)
+};
+
+struct gl_user_shader_hook {
+ struct bstr pass_desc;
+ struct bstr hook_tex[SHADER_MAX_HOOKS];
+ struct bstr bind_tex[SHADER_MAX_BINDS];
+ struct bstr save_tex;
+ struct bstr pass_body;
+ struct gl_transform offset;
+ bool align_offset;
+ struct szexp width[MAX_SZEXP_SIZE];
+ struct szexp height[MAX_SZEXP_SIZE];
+ struct szexp cond[MAX_SZEXP_SIZE];
+ int components;
+ struct compute_info compute;
+};
+
+struct gl_user_shader_tex {
+ struct bstr name;
+ struct ra_tex_params params;
+ // for video.c
+ struct ra_tex *tex;
+};
+
+// Parse the next shader block from `body`. The callbacks are invoked on every
+// valid shader block parsed.
+void parse_user_shader(struct mp_log *log, struct ra *ra, struct bstr shader,
+ void *priv,
+ bool (*dohook)(void *p, struct gl_user_shader_hook hook),
+ bool (*dotex)(void *p, struct gl_user_shader_tex tex));
+
+// Evaluate a szexp, given a lookup function for named textures
+bool eval_szexpr(struct mp_log *log, void *priv,
+ bool (*lookup)(void *priv, struct bstr var, float size[2]),
+ struct szexp expr[MAX_SZEXP_SIZE], float *result);
+
+#endif
diff --git a/video/out/gpu/utils.c b/video/out/gpu/utils.c
new file mode 100644
index 0000000..8a1aacf
--- /dev/null
+++ b/video/out/gpu/utils.c
@@ -0,0 +1,349 @@
+#include "common/msg.h"
+#include "video/out/vo.h"
+#include "utils.h"
+
+// Standard parallel 2D projection, except y1 < y0 means that the coordinate
+// system is flipped, not the projection.
+void gl_transform_ortho(struct gl_transform *t, float x0, float x1,
+ float y0, float y1)
+{
+ if (y1 < y0) {
+ float tmp = y0;
+ y0 = tmp - y1;
+ y1 = tmp;
+ }
+
+ t->m[0][0] = 2.0f / (x1 - x0);
+ t->m[0][1] = 0.0f;
+ t->m[1][0] = 0.0f;
+ t->m[1][1] = 2.0f / (y1 - y0);
+ t->t[0] = -(x1 + x0) / (x1 - x0);
+ t->t[1] = -(y1 + y0) / (y1 - y0);
+}
+
+// Apply the effects of one transformation to another, transforming it in the
+// process. In other words: post-composes t onto x
+void gl_transform_trans(struct gl_transform t, struct gl_transform *x)
+{
+ struct gl_transform xt = *x;
+ x->m[0][0] = t.m[0][0] * xt.m[0][0] + t.m[0][1] * xt.m[1][0];
+ x->m[1][0] = t.m[1][0] * xt.m[0][0] + t.m[1][1] * xt.m[1][0];
+ x->m[0][1] = t.m[0][0] * xt.m[0][1] + t.m[0][1] * xt.m[1][1];
+ x->m[1][1] = t.m[1][0] * xt.m[0][1] + t.m[1][1] * xt.m[1][1];
+ gl_transform_vec(t, &x->t[0], &x->t[1]);
+}
+
+void gl_transform_ortho_fbo(struct gl_transform *t, struct ra_fbo fbo)
+{
+ int y_dir = fbo.flip ? -1 : 1;
+ gl_transform_ortho(t, 0, fbo.tex->params.w, 0, fbo.tex->params.h * y_dir);
+}
+
+float gl_video_scale_ambient_lux(float lmin, float lmax,
+ float rmin, float rmax, float lux)
+{
+ assert(lmax > lmin);
+
+ float num = (rmax - rmin) * (log10(lux) - log10(lmin));
+ float den = log10(lmax) - log10(lmin);
+ float result = num / den + rmin;
+
+ // clamp the result
+ float max = MPMAX(rmax, rmin);
+ float min = MPMIN(rmax, rmin);
+ return MPMAX(MPMIN(result, max), min);
+}
+
+void ra_buf_pool_uninit(struct ra *ra, struct ra_buf_pool *pool)
+{
+ for (int i = 0; i < pool->num_buffers; i++)
+ ra_buf_free(ra, &pool->buffers[i]);
+
+ talloc_free(pool->buffers);
+ *pool = (struct ra_buf_pool){0};
+}
+
+static bool ra_buf_params_compatible(const struct ra_buf_params *new,
+ const struct ra_buf_params *old)
+{
+ return new->type == old->type &&
+ new->size <= old->size &&
+ new->host_mapped == old->host_mapped &&
+ new->host_mutable == old->host_mutable;
+}
+
+static bool ra_buf_pool_grow(struct ra *ra, struct ra_buf_pool *pool)
+{
+ struct ra_buf *buf = ra_buf_create(ra, &pool->current_params);
+ if (!buf)
+ return false;
+
+ MP_TARRAY_INSERT_AT(NULL, pool->buffers, pool->num_buffers, pool->index, buf);
+ MP_VERBOSE(ra, "Resized buffer pool of type %u to size %d\n",
+ pool->current_params.type, pool->num_buffers);
+ return true;
+}
+
+struct ra_buf *ra_buf_pool_get(struct ra *ra, struct ra_buf_pool *pool,
+ const struct ra_buf_params *params)
+{
+ assert(!params->initial_data);
+
+ if (!ra_buf_params_compatible(params, &pool->current_params)) {
+ ra_buf_pool_uninit(ra, pool);
+ pool->current_params = *params;
+ }
+
+ // Make sure we have at least one buffer available
+ if (!pool->buffers && !ra_buf_pool_grow(ra, pool))
+ return NULL;
+
+ // Make sure the next buffer is available for use
+ if (!ra->fns->buf_poll(ra, pool->buffers[pool->index]) &&
+ !ra_buf_pool_grow(ra, pool))
+ {
+ return NULL;
+ }
+
+ struct ra_buf *buf = pool->buffers[pool->index++];
+ pool->index %= pool->num_buffers;
+
+ return buf;
+}
+
+bool ra_tex_upload_pbo(struct ra *ra, struct ra_buf_pool *pbo,
+ const struct ra_tex_upload_params *params)
+{
+ if (params->buf)
+ return ra->fns->tex_upload(ra, params);
+
+ struct ra_tex *tex = params->tex;
+ size_t row_size = tex->params.dimensions == 2 ? params->stride :
+ tex->params.w * tex->params.format->pixel_size;
+
+ int height = tex->params.h;
+ if (tex->params.dimensions == 2 && params->rc)
+ height = mp_rect_h(*params->rc);
+
+ struct ra_buf_params bufparams = {
+ .type = RA_BUF_TYPE_TEX_UPLOAD,
+ .size = row_size * height * tex->params.d,
+ .host_mutable = true,
+ };
+
+ struct ra_buf *buf = ra_buf_pool_get(ra, pbo, &bufparams);
+ if (!buf)
+ return false;
+
+ ra->fns->buf_update(ra, buf, 0, params->src, bufparams.size);
+
+ struct ra_tex_upload_params newparams = *params;
+ newparams.buf = buf;
+ newparams.src = NULL;
+
+ return ra->fns->tex_upload(ra, &newparams);
+}
+
+struct ra_layout std140_layout(struct ra_renderpass_input *inp)
+{
+ size_t el_size = ra_vartype_size(inp->type);
+
+ // std140 packing rules:
+ // 1. The alignment of generic values is their size in bytes
+ // 2. The alignment of vectors is the vector length * the base count, with
+ // the exception of vec3 which is always aligned like vec4
+ // 3. The alignment of arrays is that of the element size rounded up to
+ // the nearest multiple of vec4
+ // 4. Matrices are treated like arrays of vectors
+ // 5. Arrays/matrices are laid out with a stride equal to the alignment
+ size_t stride = el_size * inp->dim_v;
+ size_t align = stride;
+ if (inp->dim_v == 3)
+ align += el_size;
+ if (inp->dim_m > 1)
+ stride = align = MP_ALIGN_UP(stride, sizeof(float[4]));
+
+ return (struct ra_layout) {
+ .align = align,
+ .stride = stride,
+ .size = stride * inp->dim_m,
+ };
+}
+
+struct ra_layout std430_layout(struct ra_renderpass_input *inp)
+{
+ size_t el_size = ra_vartype_size(inp->type);
+
+ // std430 packing rules: like std140, except arrays/matrices are always
+ // "tightly" packed, even arrays/matrices of vec3s
+ size_t stride = el_size * inp->dim_v;
+ size_t align = stride;
+ if (inp->dim_v == 3 && inp->dim_m == 1)
+ align += el_size;
+
+ return (struct ra_layout) {
+ .align = align,
+ .stride = stride,
+ .size = stride * inp->dim_m,
+ };
+}
+
+// Resize a texture to a new desired size and format if necessary
+bool ra_tex_resize(struct ra *ra, struct mp_log *log, struct ra_tex **tex,
+ int w, int h, const struct ra_format *fmt)
+{
+ if (*tex) {
+ struct ra_tex_params cur_params = (*tex)->params;
+ if (cur_params.w == w && cur_params.h == h && cur_params.format == fmt)
+ return true;
+ }
+
+ mp_dbg(log, "Resizing texture: %dx%d\n", w, h);
+
+ if (!fmt || !fmt->renderable || !fmt->linear_filter) {
+ mp_err(log, "Format %s not supported.\n", fmt ? fmt->name : "(unset)");
+ return false;
+ }
+
+ ra_tex_free(ra, tex);
+ struct ra_tex_params params = {
+ .dimensions = 2,
+ .w = w,
+ .h = h,
+ .d = 1,
+ .format = fmt,
+ .src_linear = true,
+ .render_src = true,
+ .render_dst = true,
+ .storage_dst = fmt->storable,
+ .blit_src = true,
+ };
+
+ *tex = ra_tex_create(ra, &params);
+ if (!*tex)
+ mp_err(log, "Error: texture could not be created.\n");
+
+ return *tex;
+}
+
+struct timer_pool {
+ struct ra *ra;
+ ra_timer *timer;
+ bool running; // detect invalid usage
+
+ uint64_t samples[VO_PERF_SAMPLE_COUNT];
+ int sample_idx;
+ int sample_count;
+
+ uint64_t sum;
+ uint64_t peak;
+};
+
+struct timer_pool *timer_pool_create(struct ra *ra)
+{
+ if (!ra->fns->timer_create)
+ return NULL;
+
+ ra_timer *timer = ra->fns->timer_create(ra);
+ if (!timer)
+ return NULL;
+
+ struct timer_pool *pool = talloc(NULL, struct timer_pool);
+ if (!pool) {
+ ra->fns->timer_destroy(ra, timer);
+ return NULL;
+ }
+
+ *pool = (struct timer_pool){ .ra = ra, .timer = timer };
+ return pool;
+}
+
+void timer_pool_destroy(struct timer_pool *pool)
+{
+ if (!pool)
+ return;
+
+ pool->ra->fns->timer_destroy(pool->ra, pool->timer);
+ talloc_free(pool);
+}
+
+void timer_pool_start(struct timer_pool *pool)
+{
+ if (!pool)
+ return;
+
+ assert(!pool->running);
+ pool->ra->fns->timer_start(pool->ra, pool->timer);
+ pool->running = true;
+}
+
+void timer_pool_stop(struct timer_pool *pool)
+{
+ if (!pool)
+ return;
+
+ assert(pool->running);
+ uint64_t res = pool->ra->fns->timer_stop(pool->ra, pool->timer);
+ pool->running = false;
+
+ if (res) {
+ // Input res into the buffer and grab the previous value
+ uint64_t old = pool->samples[pool->sample_idx];
+ pool->sample_count = MPMIN(pool->sample_count + 1, VO_PERF_SAMPLE_COUNT);
+ pool->samples[pool->sample_idx++] = res;
+ pool->sample_idx %= VO_PERF_SAMPLE_COUNT;
+ pool->sum = pool->sum + res - old;
+
+ // Update peak if necessary
+ if (res >= pool->peak) {
+ pool->peak = res;
+ } else if (pool->peak == old) {
+ // It's possible that the last peak was the value we just removed,
+ // if so we need to scan for the new peak
+ uint64_t peak = res;
+ for (int i = 0; i < VO_PERF_SAMPLE_COUNT; i++)
+ peak = MPMAX(peak, pool->samples[i]);
+ pool->peak = peak;
+ }
+ }
+}
+
+struct mp_pass_perf timer_pool_measure(struct timer_pool *pool)
+{
+ if (!pool)
+ return (struct mp_pass_perf){0};
+
+ struct mp_pass_perf res = {
+ .peak = pool->peak,
+ .count = pool->sample_count,
+ };
+
+ int idx = pool->sample_idx - pool->sample_count + VO_PERF_SAMPLE_COUNT;
+ for (int i = 0; i < res.count; i++) {
+ idx %= VO_PERF_SAMPLE_COUNT;
+ res.samples[i] = pool->samples[idx++];
+ }
+
+ if (res.count > 0) {
+ res.last = res.samples[res.count - 1];
+ res.avg = pool->sum / res.count;
+ }
+
+ return res;
+}
+
+void mp_log_source(struct mp_log *log, int lev, const char *src)
+{
+ int line = 1;
+ if (!src)
+ return;
+ while (*src) {
+ const char *end = strchr(src, '\n');
+ const char *next = end + 1;
+ if (!end)
+ next = end = src + strlen(src);
+ mp_msg(log, lev, "[%3d] %.*s\n", line, (int)(end - src), src);
+ line++;
+ src = next;
+ }
+}
diff --git a/video/out/gpu/utils.h b/video/out/gpu/utils.h
new file mode 100644
index 0000000..215873e
--- /dev/null
+++ b/video/out/gpu/utils.h
@@ -0,0 +1,108 @@
+#pragma once
+
+#include <stdbool.h>
+#include <math.h>
+
+#include "ra.h"
+#include "context.h"
+
+// A 3x2 matrix, with the translation part separate.
+struct gl_transform {
+ // row-major, e.g. in mathematical notation:
+ // | m[0][0] m[0][1] |
+ // | m[1][0] m[1][1] |
+ float m[2][2];
+ float t[2];
+};
+
+static const struct gl_transform identity_trans = {
+ .m = {{1.0, 0.0}, {0.0, 1.0}},
+ .t = {0.0, 0.0},
+};
+
+void gl_transform_ortho(struct gl_transform *t, float x0, float x1,
+ float y0, float y1);
+
+// This treats m as an affine transformation, in other words m[2][n] gets
+// added to the output.
+static inline void gl_transform_vec(struct gl_transform t, float *x, float *y)
+{
+ float vx = *x, vy = *y;
+ *x = vx * t.m[0][0] + vy * t.m[0][1] + t.t[0];
+ *y = vx * t.m[1][0] + vy * t.m[1][1] + t.t[1];
+}
+
+struct mp_rect_f {
+ float x0, y0, x1, y1;
+};
+
+// Semantic equality (fuzzy comparison)
+static inline bool mp_rect_f_seq(struct mp_rect_f a, struct mp_rect_f b)
+{
+ return fabs(a.x0 - b.x0) < 1e-6 && fabs(a.x1 - b.x1) < 1e-6 &&
+ fabs(a.y0 - b.y0) < 1e-6 && fabs(a.y1 - b.y1) < 1e-6;
+}
+
+static inline void gl_transform_rect(struct gl_transform t, struct mp_rect_f *r)
+{
+ gl_transform_vec(t, &r->x0, &r->y0);
+ gl_transform_vec(t, &r->x1, &r->y1);
+}
+
+static inline bool gl_transform_eq(struct gl_transform a, struct gl_transform b)
+{
+ for (int x = 0; x < 2; x++) {
+ for (int y = 0; y < 2; y++) {
+ if (a.m[x][y] != b.m[x][y])
+ return false;
+ }
+ }
+
+ return a.t[0] == b.t[0] && a.t[1] == b.t[1];
+}
+
+void gl_transform_trans(struct gl_transform t, struct gl_transform *x);
+
+void gl_transform_ortho_fbo(struct gl_transform *t, struct ra_fbo fbo);
+
+float gl_video_scale_ambient_lux(float lmin, float lmax,
+ float rmin, float rmax, float lux);
+
+// A pool of buffers, which can grow as needed
+struct ra_buf_pool {
+ struct ra_buf_params current_params;
+ struct ra_buf **buffers;
+ int num_buffers;
+ int index;
+};
+
+void ra_buf_pool_uninit(struct ra *ra, struct ra_buf_pool *pool);
+
+// Note: params->initial_data is *not* supported
+struct ra_buf *ra_buf_pool_get(struct ra *ra, struct ra_buf_pool *pool,
+ const struct ra_buf_params *params);
+
+// Helper that wraps ra_tex_upload using texture upload buffers to ensure that
+// params->buf is always set. This is intended for RA-internal usage.
+bool ra_tex_upload_pbo(struct ra *ra, struct ra_buf_pool *pbo,
+ const struct ra_tex_upload_params *params);
+
+// Layout rules for GLSL's packing modes
+struct ra_layout std140_layout(struct ra_renderpass_input *inp);
+struct ra_layout std430_layout(struct ra_renderpass_input *inp);
+
+bool ra_tex_resize(struct ra *ra, struct mp_log *log, struct ra_tex **tex,
+ int w, int h, const struct ra_format *fmt);
+
+// A wrapper around ra_timer that does result pooling, averaging etc.
+struct timer_pool;
+
+struct timer_pool *timer_pool_create(struct ra *ra);
+void timer_pool_destroy(struct timer_pool *pool);
+void timer_pool_start(struct timer_pool *pool);
+void timer_pool_stop(struct timer_pool *pool);
+struct mp_pass_perf timer_pool_measure(struct timer_pool *pool);
+
+// print a multi line string with line numbers (e.g. for shader sources)
+// log, lev: module and log level, as in mp_msg()
+void mp_log_source(struct mp_log *log, int lev, const char *src);
diff --git a/video/out/gpu/video.c b/video/out/gpu/video.c
new file mode 100644
index 0000000..852ee78
--- /dev/null
+++ b/video/out/gpu/video.c
@@ -0,0 +1,4364 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+#include <float.h>
+#include <math.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include <libavutil/common.h>
+#include <libavutil/lfg.h>
+
+#include "video.h"
+
+#include "misc/bstr.h"
+#include "options/m_config.h"
+#include "options/path.h"
+#include "common/global.h"
+#include "options/options.h"
+#include "utils.h"
+#include "hwdec.h"
+#include "osd.h"
+#include "ra.h"
+#include "stream/stream.h"
+#include "video_shaders.h"
+#include "user_shaders.h"
+#include "error_diffusion.h"
+#include "video/out/filter_kernels.h"
+#include "video/out/aspect.h"
+#include "video/out/dither.h"
+#include "video/out/vo.h"
+
+// scale/cscale arguments that map directly to shader filter routines.
+// Note that the convolution filters are not included in this list.
+static const char *const fixed_scale_filters[] = {
+ "bilinear",
+ "bicubic_fast",
+ "oversample",
+ NULL
+};
+static const char *const fixed_tscale_filters[] = {
+ "oversample",
+ "linear",
+ NULL
+};
+
+// must be sorted, and terminated with 0
+int filter_sizes[] =
+ {2, 4, 6, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 0};
+int tscale_sizes[] = {2, 4, 6, 8, 0};
+
+struct vertex_pt {
+ float x, y;
+};
+
+struct texplane {
+ struct ra_tex *tex;
+ int w, h;
+ bool flipped;
+};
+
+struct video_image {
+ struct texplane planes[4];
+ struct mp_image *mpi; // original input image
+ uint64_t id; // unique ID identifying mpi contents
+ bool hwdec_mapped;
+};
+
+enum plane_type {
+ PLANE_NONE = 0,
+ PLANE_RGB,
+ PLANE_LUMA,
+ PLANE_CHROMA,
+ PLANE_ALPHA,
+ PLANE_XYZ,
+};
+
+static const char *plane_names[] = {
+ [PLANE_NONE] = "unknown",
+ [PLANE_RGB] = "rgb",
+ [PLANE_LUMA] = "luma",
+ [PLANE_CHROMA] = "chroma",
+ [PLANE_ALPHA] = "alpha",
+ [PLANE_XYZ] = "xyz",
+};
+
+// A self-contained description of a source image which can be bound to a
+// texture unit and sampled from. Contains metadata about how it's to be used
+struct image {
+ enum plane_type type; // must be set to something non-zero
+ int components; // number of relevant coordinates
+ float multiplier; // multiplier to be used when sampling
+ struct ra_tex *tex;
+ int w, h; // logical size (after transformation)
+ struct gl_transform transform; // rendering transformation
+ int padding; // number of leading padding components (e.g. 2 = rg is padding)
+};
+
+// A named image, for user scripting purposes
+struct saved_img {
+ const char *name;
+ struct image img;
+};
+
+// A texture hook. This is some operation that transforms a named texture as
+// soon as it's generated
+struct tex_hook {
+ const char *save_tex;
+ const char *hook_tex[SHADER_MAX_HOOKS];
+ const char *bind_tex[SHADER_MAX_BINDS];
+ int components; // how many components are relevant (0 = same as input)
+ bool align_offset; // whether to align hooked tex with reference.
+ void *priv; // this gets talloc_freed when the tex_hook is removed
+ void (*hook)(struct gl_video *p, struct image img, // generates GLSL
+ struct gl_transform *trans, void *priv);
+ bool (*cond)(struct gl_video *p, struct image img, void *priv);
+};
+
+struct surface {
+ struct ra_tex *tex;
+ uint64_t id;
+ double pts;
+};
+
+#define SURFACES_MAX 10
+
+struct cached_file {
+ char *path;
+ struct bstr body;
+};
+
+struct pass_info {
+ struct bstr desc;
+ struct mp_pass_perf perf;
+};
+
+struct dr_buffer {
+ struct ra_buf *buf;
+ // The mpi reference will keep the data from being recycled (or from other
+ // references gaining write access) while the GPU is accessing the buffer.
+ struct mp_image *mpi;
+};
+
+struct gl_video {
+ struct ra *ra;
+
+ struct mpv_global *global;
+ struct mp_log *log;
+ struct gl_video_opts opts;
+ struct m_config_cache *opts_cache;
+ struct gl_lcms *cms;
+
+ int fb_depth; // actual bits available in GL main framebuffer
+ struct m_color clear_color;
+ bool force_clear_color;
+
+ struct gl_shader_cache *sc;
+
+ struct osd_state *osd_state;
+ struct mpgl_osd *osd;
+ double osd_pts;
+
+ struct ra_tex *lut_3d_texture;
+ bool use_lut_3d;
+ int lut_3d_size[3];
+
+ struct ra_tex *dither_texture;
+
+ struct mp_image_params real_image_params; // configured format
+ struct mp_image_params image_params; // texture format (mind hwdec case)
+ struct ra_imgfmt_desc ra_format; // texture format
+ int plane_count;
+
+ bool is_gray;
+ bool has_alpha;
+ char color_swizzle[5];
+ bool use_integer_conversion;
+
+ struct video_image image;
+
+ struct dr_buffer *dr_buffers;
+ int num_dr_buffers;
+
+ bool using_dr_path;
+
+ bool dumb_mode;
+ bool forced_dumb_mode;
+
+ // Cached vertex array, to avoid re-allocation per frame. For simplicity,
+ // our vertex format is simply a list of `vertex_pt`s, since this greatly
+ // simplifies offset calculation at the cost of (unneeded) flexibility.
+ struct vertex_pt *tmp_vertex;
+ struct ra_renderpass_input *vao;
+ int vao_len;
+
+ const struct ra_format *fbo_format;
+ struct ra_tex *merge_tex[4];
+ struct ra_tex *scale_tex[4];
+ struct ra_tex *integer_tex[4];
+ struct ra_tex *indirect_tex;
+ struct ra_tex *blend_subs_tex;
+ struct ra_tex *error_diffusion_tex[2];
+ struct ra_tex *screen_tex;
+ struct ra_tex *output_tex;
+ struct ra_tex **hook_textures;
+ int num_hook_textures;
+ int idx_hook_textures;
+
+ struct ra_buf *hdr_peak_ssbo;
+ struct surface surfaces[SURFACES_MAX];
+
+ // user pass descriptions and textures
+ struct tex_hook *tex_hooks;
+ int num_tex_hooks;
+ struct gl_user_shader_tex *user_textures;
+ int num_user_textures;
+
+ int surface_idx;
+ int surface_now;
+ int frames_drawn;
+ bool is_interpolated;
+ bool output_tex_valid;
+
+ // state for configured scalers
+ struct scaler scaler[SCALER_COUNT];
+
+ struct mp_csp_equalizer_state *video_eq;
+
+ struct mp_rect src_rect; // displayed part of the source video
+ struct mp_rect dst_rect; // video rectangle on output window
+ struct mp_osd_res osd_rect; // OSD size/margins
+
+ // temporary during rendering
+ struct compute_info pass_compute; // compute shader metadata for this pass
+ struct image *pass_imgs; // bound images for this pass
+ int num_pass_imgs;
+ struct saved_img *saved_imgs; // saved (named) images for this frame
+ int num_saved_imgs;
+
+ // effective current texture metadata - this will essentially affect the
+ // next render pass target, as well as implicitly tracking what needs to
+ // be done with the image
+ int texture_w, texture_h;
+ struct gl_transform texture_offset; // texture transform without rotation
+ int components;
+ bool use_linear;
+ float user_gamma;
+
+ // pass info / metrics
+ struct pass_info pass_fresh[VO_PASS_PERF_MAX];
+ struct pass_info pass_redraw[VO_PASS_PERF_MAX];
+ struct pass_info *pass;
+ int pass_idx;
+ struct timer_pool *upload_timer;
+ struct timer_pool *blit_timer;
+ struct timer_pool *osd_timer;
+
+ int frames_uploaded;
+ int frames_rendered;
+ AVLFG lfg;
+
+ // Cached because computing it can take relatively long
+ int last_dither_matrix_size;
+ float *last_dither_matrix;
+
+ struct cached_file *files;
+ int num_files;
+
+ struct ra_hwdec_ctx hwdec_ctx;
+ struct ra_hwdec_mapper *hwdec_mapper;
+ struct ra_hwdec *hwdec_overlay;
+ bool hwdec_active;
+
+ bool dsi_warned;
+ bool broken_frame; // temporary error state
+
+ bool colorspace_override_warned;
+ bool correct_downscaling_warned;
+};
+
+static const struct gl_video_opts gl_video_opts_def = {
+ .dither_algo = DITHER_FRUIT,
+ .dither_size = 6,
+ .temporal_dither_period = 1,
+ .error_diffusion = "sierra-lite",
+ .fbo_format = "auto",
+ .sigmoid_center = 0.75,
+ .sigmoid_slope = 6.5,
+ .scaler = {
+ {{"lanczos", .params={NAN, NAN}}, {.params = {NAN, NAN}}}, // scale
+ {{"hermite", .params={NAN, NAN}}, {.params = {NAN, NAN}}}, // dscale
+ {{NULL, .params={NAN, NAN}}, {.params = {NAN, NAN}}}, // cscale
+ {{"oversample", .params={NAN, NAN}}, {.params = {NAN, NAN}}}, // tscale
+ },
+ .scaler_resizes_only = true,
+ .correct_downscaling = true,
+ .linear_downscaling = true,
+ .sigmoid_upscaling = true,
+ .interpolation_threshold = 0.01,
+ .alpha_mode = ALPHA_BLEND_TILES,
+ .background = {0, 0, 0, 255},
+ .gamma = 1.0f,
+ .tone_map = {
+ .curve = TONE_MAPPING_AUTO,
+ .curve_param = NAN,
+ .max_boost = 1.0,
+ .decay_rate = 20.0,
+ .scene_threshold_low = 1.0,
+ .scene_threshold_high = 3.0,
+ .contrast_smoothness = 3.5,
+ },
+ .early_flush = -1,
+ .shader_cache = true,
+ .hwdec_interop = "auto",
+};
+
+static int validate_scaler_opt(struct mp_log *log, const m_option_t *opt,
+ struct bstr name, const char **value);
+
+static int validate_window_opt(struct mp_log *log, const m_option_t *opt,
+ struct bstr name, const char **value);
+
+static int validate_error_diffusion_opt(struct mp_log *log, const m_option_t *opt,
+ struct bstr name, const char **value);
+
+#define OPT_BASE_STRUCT struct gl_video_opts
+
+// Use for options which use NAN for defaults.
+#define OPT_FLOATDEF(field) \
+ OPT_FLOAT(field), \
+ .flags = M_OPT_DEFAULT_NAN
+
+#define SCALER_OPTS(n, i) \
+ {n, OPT_STRING_VALIDATE(scaler[i].kernel.name, validate_scaler_opt)}, \
+ {n"-param1", OPT_FLOATDEF(scaler[i].kernel.params[0])}, \
+ {n"-param2", OPT_FLOATDEF(scaler[i].kernel.params[1])}, \
+ {n"-blur", OPT_FLOAT(scaler[i].kernel.blur)}, \
+ {n"-cutoff", OPT_REMOVED("Hard-coded as 0.001")}, \
+ {n"-taper", OPT_FLOAT(scaler[i].kernel.taper), M_RANGE(0.0, 1.0)}, \
+ {n"-wparam", OPT_FLOATDEF(scaler[i].window.params[0])}, \
+ {n"-wblur", OPT_REMOVED("Just adjust filter radius directly")}, \
+ {n"-wtaper", OPT_FLOAT(scaler[i].window.taper), M_RANGE(0.0, 1.0)}, \
+ {n"-clamp", OPT_FLOAT(scaler[i].clamp), M_RANGE(0.0, 1.0)}, \
+ {n"-radius", OPT_FLOAT(scaler[i].radius), M_RANGE(0.5, 16.0)}, \
+ {n"-antiring", OPT_FLOAT(scaler[i].antiring), M_RANGE(0.0, 1.0)}, \
+ {n"-window", OPT_STRING_VALIDATE(scaler[i].window.name, validate_window_opt)}
+
+const struct m_sub_options gl_video_conf = {
+ .opts = (const m_option_t[]) {
+ {"gpu-dumb-mode", OPT_CHOICE(dumb_mode,
+ {"auto", 0}, {"yes", 1}, {"no", -1})},
+ {"gamma-factor", OPT_FLOAT(gamma), M_RANGE(0.1, 2.0),
+ .deprecation_message = "no replacement"},
+ {"gamma-auto", OPT_BOOL(gamma_auto),
+ .deprecation_message = "no replacement"},
+ {"target-prim", OPT_CHOICE_C(target_prim, mp_csp_prim_names)},
+ {"target-trc", OPT_CHOICE_C(target_trc, mp_csp_trc_names)},
+ {"target-peak", OPT_CHOICE(target_peak, {"auto", 0}),
+ M_RANGE(10, 10000)},
+ {"target-contrast", OPT_CHOICE(target_contrast, {"auto", 0}, {"inf", -1}),
+ M_RANGE(10, 1000000)},
+ {"target-gamut", OPT_CHOICE_C(target_gamut, mp_csp_prim_names)},
+ {"tone-mapping", OPT_CHOICE(tone_map.curve,
+ {"auto", TONE_MAPPING_AUTO},
+ {"clip", TONE_MAPPING_CLIP},
+ {"mobius", TONE_MAPPING_MOBIUS},
+ {"reinhard", TONE_MAPPING_REINHARD},
+ {"hable", TONE_MAPPING_HABLE},
+ {"gamma", TONE_MAPPING_GAMMA},
+ {"linear", TONE_MAPPING_LINEAR},
+ {"spline", TONE_MAPPING_SPLINE},
+ {"bt.2390", TONE_MAPPING_BT_2390},
+ {"bt.2446a", TONE_MAPPING_BT_2446A},
+ {"st2094-40", TONE_MAPPING_ST2094_40},
+ {"st2094-10", TONE_MAPPING_ST2094_10})},
+ {"tone-mapping-param", OPT_FLOATDEF(tone_map.curve_param)},
+ {"inverse-tone-mapping", OPT_BOOL(tone_map.inverse)},
+ {"tone-mapping-max-boost", OPT_FLOAT(tone_map.max_boost),
+ M_RANGE(1.0, 10.0)},
+ {"tone-mapping-visualize", OPT_BOOL(tone_map.visualize)},
+ {"gamut-mapping-mode", OPT_CHOICE(tone_map.gamut_mode,
+ {"auto", GAMUT_AUTO},
+ {"clip", GAMUT_CLIP},
+ {"perceptual", GAMUT_PERCEPTUAL},
+ {"relative", GAMUT_RELATIVE},
+ {"saturation", GAMUT_SATURATION},
+ {"absolute", GAMUT_ABSOLUTE},
+ {"desaturate", GAMUT_DESATURATE},
+ {"darken", GAMUT_DARKEN},
+ {"warn", GAMUT_WARN},
+ {"linear", GAMUT_LINEAR})},
+ {"hdr-compute-peak", OPT_CHOICE(tone_map.compute_peak,
+ {"auto", 0},
+ {"yes", 1},
+ {"no", -1})},
+ {"hdr-peak-percentile", OPT_FLOAT(tone_map.peak_percentile),
+ M_RANGE(0.0, 100.0)},
+ {"hdr-peak-decay-rate", OPT_FLOAT(tone_map.decay_rate),
+ M_RANGE(0.0, 1000.0)},
+ {"hdr-scene-threshold-low", OPT_FLOAT(tone_map.scene_threshold_low),
+ M_RANGE(0, 20.0)},
+ {"hdr-scene-threshold-high", OPT_FLOAT(tone_map.scene_threshold_high),
+ M_RANGE(0, 20.0)},
+ {"hdr-contrast-recovery", OPT_FLOAT(tone_map.contrast_recovery),
+ M_RANGE(0, 2.0)},
+ {"hdr-contrast-smoothness", OPT_FLOAT(tone_map.contrast_smoothness),
+ M_RANGE(1.0, 100.0)},
+ {"opengl-pbo", OPT_BOOL(pbo)},
+ SCALER_OPTS("scale", SCALER_SCALE),
+ SCALER_OPTS("dscale", SCALER_DSCALE),
+ SCALER_OPTS("cscale", SCALER_CSCALE),
+ SCALER_OPTS("tscale", SCALER_TSCALE),
+ {"scaler-lut-size", OPT_REMOVED("hard-coded as 8")},
+ {"scaler-resizes-only", OPT_BOOL(scaler_resizes_only)},
+ {"correct-downscaling", OPT_BOOL(correct_downscaling)},
+ {"linear-downscaling", OPT_BOOL(linear_downscaling)},
+ {"linear-upscaling", OPT_BOOL(linear_upscaling)},
+ {"sigmoid-upscaling", OPT_BOOL(sigmoid_upscaling)},
+ {"sigmoid-center", OPT_FLOAT(sigmoid_center), M_RANGE(0.0, 1.0)},
+ {"sigmoid-slope", OPT_FLOAT(sigmoid_slope), M_RANGE(1.0, 20.0)},
+ {"fbo-format", OPT_STRING(fbo_format)},
+ {"dither-depth", OPT_CHOICE(dither_depth, {"no", -1}, {"auto", 0}),
+ M_RANGE(-1, 16)},
+ {"dither", OPT_CHOICE(dither_algo,
+ {"fruit", DITHER_FRUIT},
+ {"ordered", DITHER_ORDERED},
+ {"error-diffusion", DITHER_ERROR_DIFFUSION},
+ {"no", DITHER_NONE})},
+ {"dither-size-fruit", OPT_INT(dither_size), M_RANGE(2, 8)},
+ {"temporal-dither", OPT_BOOL(temporal_dither)},
+ {"temporal-dither-period", OPT_INT(temporal_dither_period),
+ M_RANGE(1, 128)},
+ {"error-diffusion",
+ OPT_STRING_VALIDATE(error_diffusion, validate_error_diffusion_opt)},
+ {"alpha", OPT_CHOICE(alpha_mode,
+ {"no", ALPHA_NO},
+ {"yes", ALPHA_YES},
+ {"blend", ALPHA_BLEND},
+ {"blend-tiles", ALPHA_BLEND_TILES})},
+ {"opengl-rectangle-textures", OPT_BOOL(use_rectangle)},
+ {"background", OPT_COLOR(background)},
+ {"interpolation", OPT_BOOL(interpolation)},
+ {"interpolation-threshold", OPT_FLOAT(interpolation_threshold)},
+ {"blend-subtitles", OPT_CHOICE(blend_subs,
+ {"no", BLEND_SUBS_NO},
+ {"yes", BLEND_SUBS_YES},
+ {"video", BLEND_SUBS_VIDEO})},
+ {"glsl-shaders", OPT_PATHLIST(user_shaders), .flags = M_OPT_FILE},
+ {"glsl-shader", OPT_CLI_ALIAS("glsl-shaders-append")},
+ {"glsl-shader-opts", OPT_KEYVALUELIST(user_shader_opts)},
+ {"deband", OPT_BOOL(deband)},
+ {"deband", OPT_SUBSTRUCT(deband_opts, deband_conf)},
+ {"sharpen", OPT_FLOAT(unsharp)},
+ {"gpu-tex-pad-x", OPT_INT(tex_pad_x), M_RANGE(0, 4096)},
+ {"gpu-tex-pad-y", OPT_INT(tex_pad_y), M_RANGE(0, 4096)},
+ {"", OPT_SUBSTRUCT(icc_opts, mp_icc_conf)},
+ {"gpu-shader-cache", OPT_BOOL(shader_cache)},
+ {"gpu-shader-cache-dir", OPT_STRING(shader_cache_dir), .flags = M_OPT_FILE},
+ {"gpu-hwdec-interop",
+ OPT_STRING_VALIDATE(hwdec_interop, ra_hwdec_validate_opt)},
+ {"gamut-warning", OPT_REMOVED("Replaced by --gamut-mapping-mode=warn")},
+ {"gamut-clipping", OPT_REMOVED("Replaced by --gamut-mapping-mode=desaturate")},
+ {"tone-mapping-desaturate", OPT_REMOVED("Replaced by --tone-mapping-mode")},
+ {"tone-mapping-desaturate-exponent", OPT_REMOVED("Replaced by --tone-mapping-mode")},
+ {"tone-mapping-crosstalk", OPT_REMOVED("Hard-coded as 0.04")},
+ {"tone-mapping-mode", OPT_REMOVED("no replacement")},
+ {0}
+ },
+ .size = sizeof(struct gl_video_opts),
+ .defaults = &gl_video_opts_def,
+};
+
+static void uninit_rendering(struct gl_video *p);
+static void uninit_scaler(struct gl_video *p, struct scaler *scaler);
+static void check_gl_features(struct gl_video *p);
+static bool pass_upload_image(struct gl_video *p, struct mp_image *mpi, uint64_t id);
+static const char *handle_scaler_opt(const char *name, bool tscale);
+static void reinit_from_options(struct gl_video *p);
+static void get_scale_factors(struct gl_video *p, bool transpose_rot, double xy[2]);
+static void gl_video_setup_hooks(struct gl_video *p);
+static void gl_video_update_options(struct gl_video *p);
+
+#define GLSL(x) gl_sc_add(p->sc, #x "\n");
+#define GLSLF(...) gl_sc_addf(p->sc, __VA_ARGS__)
+#define GLSLHF(...) gl_sc_haddf(p->sc, __VA_ARGS__)
+#define PRELUDE(...) gl_sc_paddf(p->sc, __VA_ARGS__)
+
+static struct bstr load_cached_file(struct gl_video *p, const char *path)
+{
+ if (!path || !path[0])
+ return (struct bstr){0};
+ for (int n = 0; n < p->num_files; n++) {
+ if (strcmp(p->files[n].path, path) == 0)
+ return p->files[n].body;
+ }
+ // not found -> load it
+ char *fname = mp_get_user_path(NULL, p->global, path);
+ struct bstr s = stream_read_file(fname, p, p->global, 1000000000); // 1GB
+ talloc_free(fname);
+ if (s.len) {
+ struct cached_file new = {
+ .path = talloc_strdup(p, path),
+ .body = s,
+ };
+ MP_TARRAY_APPEND(p, p->files, p->num_files, new);
+ return new.body;
+ }
+ return (struct bstr){0};
+}
+
+static void debug_check_gl(struct gl_video *p, const char *msg)
+{
+ if (p->ra->fns->debug_marker)
+ p->ra->fns->debug_marker(p->ra, msg);
+}
+
+static void gl_video_reset_surfaces(struct gl_video *p)
+{
+ for (int i = 0; i < SURFACES_MAX; i++) {
+ p->surfaces[i].id = 0;
+ p->surfaces[i].pts = MP_NOPTS_VALUE;
+ }
+ p->surface_idx = 0;
+ p->surface_now = 0;
+ p->frames_drawn = 0;
+ p->output_tex_valid = false;
+}
+
+static void gl_video_reset_hooks(struct gl_video *p)
+{
+ for (int i = 0; i < p->num_tex_hooks; i++)
+ talloc_free(p->tex_hooks[i].priv);
+
+ for (int i = 0; i < p->num_user_textures; i++)
+ ra_tex_free(p->ra, &p->user_textures[i].tex);
+
+ p->num_tex_hooks = 0;
+ p->num_user_textures = 0;
+}
+
+static inline int surface_wrap(int id)
+{
+ id = id % SURFACES_MAX;
+ return id < 0 ? id + SURFACES_MAX : id;
+}
+
+static void reinit_osd(struct gl_video *p)
+{
+ mpgl_osd_destroy(p->osd);
+ p->osd = NULL;
+ if (p->osd_state)
+ p->osd = mpgl_osd_init(p->ra, p->log, p->osd_state);
+}
+
+static void uninit_rendering(struct gl_video *p)
+{
+ for (int n = 0; n < SCALER_COUNT; n++)
+ uninit_scaler(p, &p->scaler[n]);
+
+ ra_tex_free(p->ra, &p->dither_texture);
+
+ for (int n = 0; n < 4; n++) {
+ ra_tex_free(p->ra, &p->merge_tex[n]);
+ ra_tex_free(p->ra, &p->scale_tex[n]);
+ ra_tex_free(p->ra, &p->integer_tex[n]);
+ }
+
+ ra_tex_free(p->ra, &p->indirect_tex);
+ ra_tex_free(p->ra, &p->blend_subs_tex);
+ ra_tex_free(p->ra, &p->screen_tex);
+ ra_tex_free(p->ra, &p->output_tex);
+
+ for (int n = 0; n < 2; n++)
+ ra_tex_free(p->ra, &p->error_diffusion_tex[n]);
+
+ for (int n = 0; n < SURFACES_MAX; n++)
+ ra_tex_free(p->ra, &p->surfaces[n].tex);
+
+ for (int n = 0; n < p->num_hook_textures; n++)
+ ra_tex_free(p->ra, &p->hook_textures[n]);
+
+ gl_video_reset_surfaces(p);
+ gl_video_reset_hooks(p);
+
+ gl_sc_reset_error(p->sc);
+}
+
+bool gl_video_gamma_auto_enabled(struct gl_video *p)
+{
+ return p->opts.gamma_auto;
+}
+
+struct mp_colorspace gl_video_get_output_colorspace(struct gl_video *p)
+{
+ return (struct mp_colorspace) {
+ .primaries = p->opts.target_prim,
+ .gamma = p->opts.target_trc,
+ .hdr.max_luma = p->opts.target_peak,
+ };
+}
+
+// Warning: profile.start must point to a ta allocation, and the function
+// takes over ownership.
+void gl_video_set_icc_profile(struct gl_video *p, bstr icc_data)
+{
+ if (gl_lcms_set_memory_profile(p->cms, icc_data))
+ reinit_from_options(p);
+}
+
+bool gl_video_icc_auto_enabled(struct gl_video *p)
+{
+ return p->opts.icc_opts ? p->opts.icc_opts->profile_auto : false;
+}
+
+static bool gl_video_get_lut3d(struct gl_video *p, enum mp_csp_prim prim,
+ enum mp_csp_trc trc)
+{
+ if (!p->use_lut_3d)
+ return false;
+
+ struct AVBufferRef *icc = NULL;
+ if (p->image.mpi)
+ icc = p->image.mpi->icc_profile;
+
+ if (p->lut_3d_texture && !gl_lcms_has_changed(p->cms, prim, trc, icc))
+ return true;
+
+ // GLES3 doesn't provide filtered 16 bit integer textures
+ // GLES2 doesn't even provide 3D textures
+ const struct ra_format *fmt = ra_find_unorm_format(p->ra, 2, 4);
+ if (!fmt || !(p->ra->caps & RA_CAP_TEX_3D)) {
+ p->use_lut_3d = false;
+ MP_WARN(p, "Disabling color management (no RGBA16 3D textures).\n");
+ return false;
+ }
+
+ struct lut3d *lut3d = NULL;
+ if (!fmt || !gl_lcms_get_lut3d(p->cms, &lut3d, prim, trc, icc) || !lut3d) {
+ p->use_lut_3d = false;
+ return false;
+ }
+
+ ra_tex_free(p->ra, &p->lut_3d_texture);
+
+ struct ra_tex_params params = {
+ .dimensions = 3,
+ .w = lut3d->size[0],
+ .h = lut3d->size[1],
+ .d = lut3d->size[2],
+ .format = fmt,
+ .render_src = true,
+ .src_linear = true,
+ .initial_data = lut3d->data,
+ };
+ p->lut_3d_texture = ra_tex_create(p->ra, &params);
+
+ debug_check_gl(p, "after 3d lut creation");
+
+ for (int i = 0; i < 3; i++)
+ p->lut_3d_size[i] = lut3d->size[i];
+
+ talloc_free(lut3d);
+
+ if (!p->lut_3d_texture) {
+ p->use_lut_3d = false;
+ return false;
+ }
+
+ return true;
+}
+
+// Fill an image struct from a ra_tex + some metadata
+static struct image image_wrap(struct ra_tex *tex, enum plane_type type,
+ int components)
+{
+ assert(type != PLANE_NONE);
+ return (struct image){
+ .type = type,
+ .tex = tex,
+ .multiplier = 1.0,
+ .w = tex ? tex->params.w : 1,
+ .h = tex ? tex->params.h : 1,
+ .transform = identity_trans,
+ .components = components,
+ };
+}
+
+// Bind an image to a free texture unit and return its ID.
+static int pass_bind(struct gl_video *p, struct image img)
+{
+ int idx = p->num_pass_imgs;
+ MP_TARRAY_APPEND(p, p->pass_imgs, p->num_pass_imgs, img);
+ return idx;
+}
+
+// Rotation by 90° and flipping.
+// w/h is used for recentering.
+static void get_transform(float w, float h, int rotate, bool flip,
+ struct gl_transform *out_tr)
+{
+ int a = rotate % 90 ? 0 : rotate / 90;
+ int sin90[4] = {0, 1, 0, -1}; // just to avoid rounding issues etc.
+ int cos90[4] = {1, 0, -1, 0};
+ struct gl_transform tr = {{{ cos90[a], sin90[a]},
+ {-sin90[a], cos90[a]}}};
+
+ // basically, recenter to keep the whole image in view
+ float b[2] = {1, 1};
+ gl_transform_vec(tr, &b[0], &b[1]);
+ tr.t[0] += b[0] < 0 ? w : 0;
+ tr.t[1] += b[1] < 0 ? h : 0;
+
+ if (flip) {
+ struct gl_transform fliptr = {{{1, 0}, {0, -1}}, {0, h}};
+ gl_transform_trans(fliptr, &tr);
+ }
+
+ *out_tr = tr;
+}
+
+// Return the chroma plane upscaled to luma size, but with additional padding
+// for image sizes not aligned to subsampling.
+static int chroma_upsize(int size, int pixel)
+{
+ return (size + pixel - 1) / pixel * pixel;
+}
+
+// If a and b are on the same plane, return what plane type should be used.
+// If a or b are none, the other type always wins.
+// Usually: LUMA/RGB/XYZ > CHROMA > ALPHA
+static enum plane_type merge_plane_types(enum plane_type a, enum plane_type b)
+{
+ if (a == PLANE_NONE)
+ return b;
+ if (b == PLANE_LUMA || b == PLANE_RGB || b == PLANE_XYZ)
+ return b;
+ if (b != PLANE_NONE && a == PLANE_ALPHA)
+ return b;
+ return a;
+}
+
+// Places a video_image's image textures + associated metadata into img[]. The
+// number of textures is equal to p->plane_count. Any necessary plane offsets
+// are stored in off. (e.g. chroma position)
+static void pass_get_images(struct gl_video *p, struct video_image *vimg,
+ struct image img[4], struct gl_transform off[4])
+{
+ assert(vimg->mpi);
+
+ int w = p->image_params.w;
+ int h = p->image_params.h;
+
+ // Determine the chroma offset
+ float ls_w = 1.0 / p->ra_format.chroma_w;
+ float ls_h = 1.0 / p->ra_format.chroma_h;
+
+ struct gl_transform chroma = {{{ls_w, 0.0}, {0.0, ls_h}}};
+
+ if (p->image_params.chroma_location != MP_CHROMA_CENTER) {
+ int cx, cy;
+ mp_get_chroma_location(p->image_params.chroma_location, &cx, &cy);
+ // By default texture coordinates are such that chroma is centered with
+ // any chroma subsampling. If a specific direction is given, make it
+ // so that the luma and chroma sample line up exactly.
+ // For 4:4:4, setting chroma location should have no effect at all.
+ // luma sample size (in chroma coord. space)
+ chroma.t[0] = ls_w < 1 ? ls_w * -cx / 2 : 0;
+ chroma.t[1] = ls_h < 1 ? ls_h * -cy / 2 : 0;
+ }
+
+ memset(img, 0, 4 * sizeof(img[0]));
+ for (int n = 0; n < p->plane_count; n++) {
+ struct texplane *t = &vimg->planes[n];
+
+ enum plane_type type = PLANE_NONE;
+ int padding = 0;
+ for (int i = 0; i < 4; i++) {
+ int c = p->ra_format.components[n][i];
+ enum plane_type ctype;
+ if (c == 0) {
+ ctype = PLANE_NONE;
+ } else if (c == 4) {
+ ctype = PLANE_ALPHA;
+ } else if (p->image_params.color.space == MP_CSP_RGB) {
+ ctype = PLANE_RGB;
+ } else if (p->image_params.color.space == MP_CSP_XYZ) {
+ ctype = PLANE_XYZ;
+ } else {
+ ctype = c == 1 ? PLANE_LUMA : PLANE_CHROMA;
+ }
+ type = merge_plane_types(type, ctype);
+ if (!c && padding == i)
+ padding = i + 1;
+ }
+
+ int msb_valid_bits =
+ p->ra_format.component_bits + MPMIN(p->ra_format.component_pad, 0);
+ int csp = type == PLANE_ALPHA ? MP_CSP_RGB : p->image_params.color.space;
+ float tex_mul =
+ 1.0 / mp_get_csp_mul(csp, msb_valid_bits, p->ra_format.component_bits);
+ if (p->ra_format.component_type == RA_CTYPE_FLOAT)
+ tex_mul = 1.0;
+
+ img[n] = (struct image){
+ .type = type,
+ .tex = t->tex,
+ .multiplier = tex_mul,
+ .w = t->w,
+ .h = t->h,
+ .padding = padding,
+ };
+
+ for (int i = 0; i < 4; i++)
+ img[n].components += !!p->ra_format.components[n][i];
+
+ get_transform(t->w, t->h, p->image_params.rotate, t->flipped,
+ &img[n].transform);
+ if (p->image_params.rotate % 180 == 90)
+ MPSWAP(int, img[n].w, img[n].h);
+
+ off[n] = identity_trans;
+
+ if (type == PLANE_CHROMA) {
+ struct gl_transform rot;
+ get_transform(0, 0, p->image_params.rotate, true, &rot);
+
+ struct gl_transform tr = chroma;
+ gl_transform_vec(rot, &tr.t[0], &tr.t[1]);
+
+ float dx = (chroma_upsize(w, p->ra_format.chroma_w) - w) * ls_w;
+ float dy = (chroma_upsize(h, p->ra_format.chroma_h) - h) * ls_h;
+
+ // Adjust the chroma offset if the real chroma size is fractional
+ // due image sizes not aligned to chroma subsampling.
+ struct gl_transform rot2;
+ get_transform(0, 0, p->image_params.rotate, t->flipped, &rot2);
+ if (rot2.m[0][0] < 0)
+ tr.t[0] += dx;
+ if (rot2.m[1][0] < 0)
+ tr.t[0] += dy;
+ if (rot2.m[0][1] < 0)
+ tr.t[1] += dx;
+ if (rot2.m[1][1] < 0)
+ tr.t[1] += dy;
+
+ off[n] = tr;
+ }
+ }
+}
+
+// Return the index of the given component (assuming all non-padding components
+// of all planes are concatenated into a linear list).
+static int find_comp(struct ra_imgfmt_desc *desc, int component)
+{
+ int cur = 0;
+ for (int n = 0; n < desc->num_planes; n++) {
+ for (int i = 0; i < 4; i++) {
+ if (desc->components[n][i]) {
+ if (desc->components[n][i] == component)
+ return cur;
+ cur++;
+ }
+ }
+ }
+ return -1;
+}
+
+static void init_video(struct gl_video *p)
+{
+ p->use_integer_conversion = false;
+
+ struct ra_hwdec *hwdec = ra_hwdec_get(&p->hwdec_ctx, p->image_params.imgfmt);
+ if (hwdec) {
+ if (hwdec->driver->overlay_frame) {
+ MP_WARN(p, "Using HW-overlay mode. No GL filtering is performed "
+ "on the video!\n");
+ p->hwdec_overlay = hwdec;
+ } else {
+ p->hwdec_mapper = ra_hwdec_mapper_create(hwdec, &p->image_params);
+ if (!p->hwdec_mapper)
+ MP_ERR(p, "Initializing texture for hardware decoding failed.\n");
+ }
+ if (p->hwdec_mapper)
+ p->image_params = p->hwdec_mapper->dst_params;
+ const char **exts = hwdec->glsl_extensions;
+ for (int n = 0; exts && exts[n]; n++)
+ gl_sc_enable_extension(p->sc, (char *)exts[n]);
+ p->hwdec_active = true;
+ }
+
+ p->ra_format = (struct ra_imgfmt_desc){0};
+ ra_get_imgfmt_desc(p->ra, p->image_params.imgfmt, &p->ra_format);
+
+ p->plane_count = p->ra_format.num_planes;
+
+ p->has_alpha = false;
+ p->is_gray = true;
+
+ for (int n = 0; n < p->ra_format.num_planes; n++) {
+ for (int i = 0; i < 4; i++) {
+ if (p->ra_format.components[n][i]) {
+ p->has_alpha |= p->ra_format.components[n][i] == 4;
+ p->is_gray &= p->ra_format.components[n][i] == 1 ||
+ p->ra_format.components[n][i] == 4;
+ }
+ }
+ }
+
+ for (int c = 0; c < 4; c++) {
+ int loc = find_comp(&p->ra_format, c + 1);
+ p->color_swizzle[c] = "rgba"[loc >= 0 && loc < 4 ? loc : 0];
+ }
+ p->color_swizzle[4] = '\0';
+
+ mp_image_params_guess_csp(&p->image_params);
+
+ av_lfg_init(&p->lfg, 1);
+
+ debug_check_gl(p, "before video texture creation");
+
+ if (!p->hwdec_active) {
+ struct video_image *vimg = &p->image;
+
+ struct mp_image layout = {0};
+ mp_image_set_params(&layout, &p->image_params);
+
+ for (int n = 0; n < p->plane_count; n++) {
+ struct texplane *plane = &vimg->planes[n];
+ const struct ra_format *format = p->ra_format.planes[n];
+
+ plane->w = mp_image_plane_w(&layout, n);
+ plane->h = mp_image_plane_h(&layout, n);
+
+ struct ra_tex_params params = {
+ .dimensions = 2,
+ .w = plane->w + p->opts.tex_pad_x,
+ .h = plane->h + p->opts.tex_pad_y,
+ .d = 1,
+ .format = format,
+ .render_src = true,
+ .src_linear = format->linear_filter,
+ .non_normalized = p->opts.use_rectangle,
+ .host_mutable = true,
+ };
+
+ MP_VERBOSE(p, "Texture for plane %d: %dx%d\n", n,
+ params.w, params.h);
+
+ plane->tex = ra_tex_create(p->ra, &params);
+ p->use_integer_conversion |= format->ctype == RA_CTYPE_UINT;
+ }
+ }
+
+ debug_check_gl(p, "after video texture creation");
+
+ // Format-dependent checks.
+ check_gl_features(p);
+
+ gl_video_setup_hooks(p);
+}
+
+static struct dr_buffer *gl_find_dr_buffer(struct gl_video *p, uint8_t *ptr)
+{
+ for (int i = 0; i < p->num_dr_buffers; i++) {
+ struct dr_buffer *buffer = &p->dr_buffers[i];
+ uint8_t *bufptr = buffer->buf->data;
+ size_t size = buffer->buf->params.size;
+ if (ptr >= bufptr && ptr < bufptr + size)
+ return buffer;
+ }
+
+ return NULL;
+}
+
+static void gc_pending_dr_fences(struct gl_video *p, bool force)
+{
+again:;
+ for (int n = 0; n < p->num_dr_buffers; n++) {
+ struct dr_buffer *buffer = &p->dr_buffers[n];
+ if (!buffer->mpi)
+ continue;
+
+ bool res = p->ra->fns->buf_poll(p->ra, buffer->buf);
+ if (res || force) {
+ // Unreferencing the image could cause gl_video_dr_free_buffer()
+ // to be called by the talloc destructor (if it was the last
+ // reference). This will implicitly invalidate the buffer pointer
+ // and change the p->dr_buffers array. To make it worse, it could
+ // free multiple dr_buffers due to weird theoretical corner cases.
+ // This is also why we use the goto to iterate again from the
+ // start, because everything gets fucked up. Hail satan!
+ struct mp_image *ref = buffer->mpi;
+ buffer->mpi = NULL;
+ talloc_free(ref);
+ goto again;
+ }
+ }
+}
+
+static void unref_current_image(struct gl_video *p)
+{
+ struct video_image *vimg = &p->image;
+
+ if (vimg->hwdec_mapped) {
+ assert(p->hwdec_active && p->hwdec_mapper);
+ ra_hwdec_mapper_unmap(p->hwdec_mapper);
+ memset(vimg->planes, 0, sizeof(vimg->planes));
+ vimg->hwdec_mapped = false;
+ }
+
+ vimg->id = 0;
+
+ mp_image_unrefp(&vimg->mpi);
+
+ // While we're at it, also garbage collect pending fences in here to
+ // get it out of the way.
+ gc_pending_dr_fences(p, false);
+}
+
+// If overlay mode is used, make sure to remove the overlay.
+// Be careful with this. Removing the overlay and adding another one will
+// lead to flickering artifacts.
+static void unmap_overlay(struct gl_video *p)
+{
+ if (p->hwdec_overlay)
+ p->hwdec_overlay->driver->overlay_frame(p->hwdec_overlay, NULL, NULL, NULL, true);
+}
+
+static void uninit_video(struct gl_video *p)
+{
+ uninit_rendering(p);
+
+ struct video_image *vimg = &p->image;
+
+ unmap_overlay(p);
+ unref_current_image(p);
+
+ for (int n = 0; n < p->plane_count; n++) {
+ struct texplane *plane = &vimg->planes[n];
+ ra_tex_free(p->ra, &plane->tex);
+ }
+ *vimg = (struct video_image){0};
+
+ // Invalidate image_params to ensure that gl_video_config() will call
+ // init_video() on uninitialized gl_video.
+ p->real_image_params = (struct mp_image_params){0};
+ p->image_params = p->real_image_params;
+ p->hwdec_active = false;
+ p->hwdec_overlay = NULL;
+ ra_hwdec_mapper_free(&p->hwdec_mapper);
+}
+
+static void pass_record(struct gl_video *p, struct mp_pass_perf perf)
+{
+ if (!p->pass || p->pass_idx == VO_PASS_PERF_MAX)
+ return;
+
+ struct pass_info *pass = &p->pass[p->pass_idx];
+ pass->perf = perf;
+
+ if (pass->desc.len == 0)
+ bstr_xappend(p, &pass->desc, bstr0("(unknown)"));
+
+ p->pass_idx++;
+}
+
+PRINTF_ATTRIBUTE(2, 3)
+static void pass_describe(struct gl_video *p, const char *textf, ...)
+{
+ if (!p->pass || p->pass_idx == VO_PASS_PERF_MAX)
+ return;
+
+ struct pass_info *pass = &p->pass[p->pass_idx];
+
+ if (pass->desc.len > 0)
+ bstr_xappend(p, &pass->desc, bstr0(" + "));
+
+ va_list ap;
+ va_start(ap, textf);
+ bstr_xappend_vasprintf(p, &pass->desc, textf, ap);
+ va_end(ap);
+}
+
+static void pass_info_reset(struct gl_video *p, bool is_redraw)
+{
+ p->pass = is_redraw ? p->pass_redraw : p->pass_fresh;
+ p->pass_idx = 0;
+
+ for (int i = 0; i < VO_PASS_PERF_MAX; i++) {
+ p->pass[i].desc.len = 0;
+ p->pass[i].perf = (struct mp_pass_perf){0};
+ }
+}
+
+static void pass_report_performance(struct gl_video *p)
+{
+ if (!p->pass)
+ return;
+
+ for (int i = 0; i < VO_PASS_PERF_MAX; i++) {
+ struct pass_info *pass = &p->pass[i];
+ if (pass->desc.len) {
+ MP_TRACE(p, "pass '%.*s': last %dus avg %dus peak %dus\n",
+ BSTR_P(pass->desc),
+ (int)pass->perf.last/1000,
+ (int)pass->perf.avg/1000,
+ (int)pass->perf.peak/1000);
+ }
+ }
+}
+
+static void pass_prepare_src_tex(struct gl_video *p)
+{
+ struct gl_shader_cache *sc = p->sc;
+
+ for (int n = 0; n < p->num_pass_imgs; n++) {
+ struct image *s = &p->pass_imgs[n];
+ if (!s->tex)
+ continue;
+
+ char *texture_name = mp_tprintf(32, "texture%d", n);
+ char *texture_size = mp_tprintf(32, "texture_size%d", n);
+ char *texture_rot = mp_tprintf(32, "texture_rot%d", n);
+ char *texture_off = mp_tprintf(32, "texture_off%d", n);
+ char *pixel_size = mp_tprintf(32, "pixel_size%d", n);
+
+ gl_sc_uniform_texture(sc, texture_name, s->tex);
+ float f[2] = {1, 1};
+ if (!s->tex->params.non_normalized) {
+ f[0] = s->tex->params.w;
+ f[1] = s->tex->params.h;
+ }
+ gl_sc_uniform_vec2(sc, texture_size, f);
+ gl_sc_uniform_mat2(sc, texture_rot, true, (float *)s->transform.m);
+ gl_sc_uniform_vec2(sc, texture_off, (float *)s->transform.t);
+ gl_sc_uniform_vec2(sc, pixel_size, (float[]){1.0f / f[0],
+ 1.0f / f[1]});
+ }
+}
+
+static void cleanup_binds(struct gl_video *p)
+{
+ p->num_pass_imgs = 0;
+}
+
+// Sets the appropriate compute shader metadata for an implicit compute pass
+// bw/bh: block size
+static void pass_is_compute(struct gl_video *p, int bw, int bh, bool flexible)
+{
+ if (p->pass_compute.active && flexible) {
+ // Avoid overwriting existing block sizes when using a flexible pass
+ bw = p->pass_compute.block_w;
+ bh = p->pass_compute.block_h;
+ }
+
+ p->pass_compute = (struct compute_info){
+ .active = true,
+ .block_w = bw,
+ .block_h = bh,
+ };
+}
+
+// w/h: the width/height of the compute shader's operating domain (e.g. the
+// target target that needs to be written, or the source texture that needs to
+// be reduced)
+static void dispatch_compute(struct gl_video *p, int w, int h,
+ struct compute_info info)
+{
+ PRELUDE("layout (local_size_x = %d, local_size_y = %d) in;\n",
+ info.threads_w > 0 ? info.threads_w : info.block_w,
+ info.threads_h > 0 ? info.threads_h : info.block_h);
+
+ pass_prepare_src_tex(p);
+
+ // Since we don't actually have vertices, we pretend for convenience
+ // reasons that we do and calculate the right texture coordinates based on
+ // the output sample ID
+ gl_sc_uniform_vec2(p->sc, "out_scale", (float[2]){ 1.0 / w, 1.0 / h });
+ PRELUDE("#define outcoord(id) (out_scale * (vec2(id) + vec2(0.5)))\n");
+
+ for (int n = 0; n < p->num_pass_imgs; n++) {
+ struct image *s = &p->pass_imgs[n];
+ if (!s->tex)
+ continue;
+
+ PRELUDE("#define texmap%d(id) (texture_rot%d * outcoord(id) + "
+ "pixel_size%d * texture_off%d)\n", n, n, n, n);
+ PRELUDE("#define texcoord%d texmap%d(gl_GlobalInvocationID)\n", n, n);
+ }
+
+ // always round up when dividing to make sure we don't leave off a part of
+ // the image
+ int num_x = info.block_w > 0 ? (w + info.block_w - 1) / info.block_w : 1,
+ num_y = info.block_h > 0 ? (h + info.block_h - 1) / info.block_h : 1;
+
+ if (!(p->ra->caps & RA_CAP_NUM_GROUPS))
+ PRELUDE("#define gl_NumWorkGroups uvec3(%d, %d, 1)\n", num_x, num_y);
+
+ pass_record(p, gl_sc_dispatch_compute(p->sc, num_x, num_y, 1));
+ cleanup_binds(p);
+}
+
+static struct mp_pass_perf render_pass_quad(struct gl_video *p,
+ struct ra_fbo fbo, bool discard,
+ const struct mp_rect *dst)
+{
+ // The first element is reserved for `vec2 position`
+ int num_vertex_attribs = 1 + p->num_pass_imgs;
+ size_t vertex_stride = num_vertex_attribs * sizeof(struct vertex_pt);
+
+ // Expand the VAO if necessary
+ while (p->vao_len < num_vertex_attribs) {
+ MP_TARRAY_APPEND(p, p->vao, p->vao_len, (struct ra_renderpass_input) {
+ .name = talloc_asprintf(p, "texcoord%d", p->vao_len - 1),
+ .type = RA_VARTYPE_FLOAT,
+ .dim_v = 2,
+ .dim_m = 1,
+ .offset = p->vao_len * sizeof(struct vertex_pt),
+ });
+ }
+
+ int num_vertices = 6; // quad as triangle list
+ int num_attribs_total = num_vertices * num_vertex_attribs;
+ MP_TARRAY_GROW(p, p->tmp_vertex, num_attribs_total);
+
+ struct gl_transform t;
+ gl_transform_ortho_fbo(&t, fbo);
+
+ float x[2] = {dst->x0, dst->x1};
+ float y[2] = {dst->y0, dst->y1};
+ gl_transform_vec(t, &x[0], &y[0]);
+ gl_transform_vec(t, &x[1], &y[1]);
+
+ for (int n = 0; n < 4; n++) {
+ struct vertex_pt *vs = &p->tmp_vertex[num_vertex_attribs * n];
+ // vec2 position in idx 0
+ vs[0].x = x[n / 2];
+ vs[0].y = y[n % 2];
+ for (int i = 0; i < p->num_pass_imgs; i++) {
+ struct image *s = &p->pass_imgs[i];
+ if (!s->tex)
+ continue;
+ struct gl_transform tr = s->transform;
+ float tx = (n / 2) * s->w;
+ float ty = (n % 2) * s->h;
+ gl_transform_vec(tr, &tx, &ty);
+ bool rect = s->tex->params.non_normalized;
+ // vec2 texcoordN in idx N+1
+ vs[i + 1].x = tx / (rect ? 1 : s->tex->params.w);
+ vs[i + 1].y = ty / (rect ? 1 : s->tex->params.h);
+ }
+ }
+
+ memmove(&p->tmp_vertex[num_vertex_attribs * 4],
+ &p->tmp_vertex[num_vertex_attribs * 2],
+ vertex_stride);
+
+ memmove(&p->tmp_vertex[num_vertex_attribs * 5],
+ &p->tmp_vertex[num_vertex_attribs * 1],
+ vertex_stride);
+
+ return gl_sc_dispatch_draw(p->sc, fbo.tex, discard, p->vao, num_vertex_attribs,
+ vertex_stride, p->tmp_vertex, num_vertices);
+}
+
+static void finish_pass_fbo(struct gl_video *p, struct ra_fbo fbo,
+ bool discard, const struct mp_rect *dst)
+{
+ pass_prepare_src_tex(p);
+ pass_record(p, render_pass_quad(p, fbo, discard, dst));
+ debug_check_gl(p, "after rendering");
+ cleanup_binds(p);
+}
+
+// dst_fbo: this will be used for rendering; possibly reallocating the whole
+// FBO, if the required parameters have changed
+// w, h: required FBO target dimension, and also defines the target rectangle
+// used for rasterization
+static void finish_pass_tex(struct gl_video *p, struct ra_tex **dst_tex,
+ int w, int h)
+{
+ if (!ra_tex_resize(p->ra, p->log, dst_tex, w, h, p->fbo_format)) {
+ cleanup_binds(p);
+ gl_sc_reset(p->sc);
+ return;
+ }
+
+ // If RA_CAP_PARALLEL_COMPUTE is set, try to prefer compute shaders
+ // over fragment shaders wherever possible.
+ if (!p->pass_compute.active && (p->ra->caps & RA_CAP_PARALLEL_COMPUTE) &&
+ (*dst_tex)->params.storage_dst)
+ {
+ pass_is_compute(p, 16, 16, true);
+ }
+
+ if (p->pass_compute.active) {
+ gl_sc_uniform_image2D_wo(p->sc, "out_image", *dst_tex);
+ if (!p->pass_compute.directly_writes)
+ GLSL(imageStore(out_image, ivec2(gl_GlobalInvocationID), color);)
+
+ dispatch_compute(p, w, h, p->pass_compute);
+ p->pass_compute = (struct compute_info){0};
+
+ debug_check_gl(p, "after dispatching compute shader");
+ } else {
+ struct ra_fbo fbo = { .tex = *dst_tex, };
+ finish_pass_fbo(p, fbo, true, &(struct mp_rect){0, 0, w, h});
+ }
+}
+
+static const char *get_tex_swizzle(struct image *img)
+{
+ if (!img->tex)
+ return "rgba";
+ return img->tex->params.format->luminance_alpha ? "raaa" : "rgba";
+}
+
+// Copy a texture to the vec4 color, while increasing offset. Also applies
+// the texture multiplier to the sampled color
+static void copy_image(struct gl_video *p, unsigned int *offset, struct image img)
+{
+ const unsigned int count = img.components;
+ char src[5] = {0};
+ char dst[5] = {0};
+
+ assert(*offset + count < sizeof(dst));
+ assert(img.padding + count < sizeof(src));
+
+ int id = pass_bind(p, img);
+
+ const char *tex_fmt = get_tex_swizzle(&img);
+ const char *dst_fmt = "rgba";
+ for (unsigned int i = 0; i < count; i++) {
+ src[i] = tex_fmt[img.padding + i];
+ dst[i] = dst_fmt[*offset + i];
+ }
+
+ if (img.tex && img.tex->params.format->ctype == RA_CTYPE_UINT) {
+ uint64_t tex_max = 1ull << p->ra_format.component_bits;
+ img.multiplier *= 1.0 / (tex_max - 1);
+ }
+
+ GLSLF("color.%s = %f * vec4(texture(texture%d, texcoord%d)).%s;\n",
+ dst, img.multiplier, id, id, src);
+
+ *offset += count;
+}
+
+static void skip_unused(struct gl_video *p, int num_components)
+{
+ for (int i = num_components; i < 4; i++)
+ GLSLF("color.%c = %f;\n", "rgba"[i], i < 3 ? 0.0 : 1.0);
+}
+
+static void uninit_scaler(struct gl_video *p, struct scaler *scaler)
+{
+ ra_tex_free(p->ra, &scaler->sep_fbo);
+ ra_tex_free(p->ra, &scaler->lut);
+ scaler->kernel = NULL;
+ scaler->initialized = false;
+}
+
+static void hook_prelude(struct gl_video *p, const char *name, int id,
+ struct image img)
+{
+ GLSLHF("#define %s_raw texture%d\n", name, id);
+ GLSLHF("#define %s_pos texcoord%d\n", name, id);
+ GLSLHF("#define %s_size texture_size%d\n", name, id);
+ GLSLHF("#define %s_rot texture_rot%d\n", name, id);
+ GLSLHF("#define %s_off texture_off%d\n", name, id);
+ GLSLHF("#define %s_pt pixel_size%d\n", name, id);
+ GLSLHF("#define %s_map texmap%d\n", name, id);
+ GLSLHF("#define %s_mul %f\n", name, img.multiplier);
+
+ char crap[5] = "";
+ snprintf(crap, sizeof(crap), "%s", get_tex_swizzle(&img));
+
+ // Remove leading padding by rotating the swizzle mask.
+ int len = strlen(crap);
+ for (int n = 0; n < img.padding; n++) {
+ if (len) {
+ char f = crap[0];
+ memmove(crap, crap + 1, len - 1);
+ crap[len - 1] = f;
+ }
+ }
+
+ // Set up the sampling functions
+ GLSLHF("#define %s_tex(pos) (%s_mul * vec4(texture(%s_raw, pos)).%s)\n",
+ name, name, name, crap);
+
+ if (p->ra->caps & RA_CAP_GATHER) {
+ GLSLHF("#define %s_gather(pos, c) (%s_mul * vec4("
+ "textureGather(%s_raw, pos, c)))\n", name, name, name);
+ }
+
+ // Since the extra matrix multiplication impacts performance,
+ // skip it unless the texture was actually rotated
+ if (gl_transform_eq(img.transform, identity_trans)) {
+ GLSLHF("#define %s_texOff(off) %s_tex(%s_pos + %s_pt * vec2(off))\n",
+ name, name, name, name);
+ } else {
+ GLSLHF("#define %s_texOff(off) "
+ "%s_tex(%s_pos + %s_rot * vec2(off)/%s_size)\n",
+ name, name, name, name, name);
+ }
+}
+
+static bool saved_img_find(struct gl_video *p, const char *name,
+ struct image *out)
+{
+ if (!name || !out)
+ return false;
+
+ for (int i = 0; i < p->num_saved_imgs; i++) {
+ if (strcmp(p->saved_imgs[i].name, name) == 0) {
+ *out = p->saved_imgs[i].img;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void saved_img_store(struct gl_video *p, const char *name,
+ struct image img)
+{
+ assert(name);
+
+ for (int i = 0; i < p->num_saved_imgs; i++) {
+ if (strcmp(p->saved_imgs[i].name, name) == 0) {
+ p->saved_imgs[i].img = img;
+ return;
+ }
+ }
+
+ MP_TARRAY_APPEND(p, p->saved_imgs, p->num_saved_imgs, (struct saved_img) {
+ .name = name,
+ .img = img
+ });
+}
+
+static bool pass_hook_setup_binds(struct gl_video *p, const char *name,
+ struct image img, struct tex_hook *hook)
+{
+ for (int t = 0; t < SHADER_MAX_BINDS; t++) {
+ char *bind_name = (char *)hook->bind_tex[t];
+
+ if (!bind_name)
+ continue;
+
+ // This is a special name that means "currently hooked texture"
+ if (strcmp(bind_name, "HOOKED") == 0) {
+ int id = pass_bind(p, img);
+ hook_prelude(p, "HOOKED", id, img);
+ hook_prelude(p, name, id, img);
+ continue;
+ }
+
+ // BIND can also be used to load user-defined textures, in which
+ // case we will directly load them as a uniform instead of
+ // generating the hook_prelude boilerplate
+ for (int u = 0; u < p->num_user_textures; u++) {
+ struct gl_user_shader_tex *utex = &p->user_textures[u];
+ if (bstr_equals0(utex->name, bind_name)) {
+ gl_sc_uniform_texture(p->sc, bind_name, utex->tex);
+ goto next_bind;
+ }
+ }
+
+ struct image bind_img;
+ if (!saved_img_find(p, bind_name, &bind_img)) {
+ // Clean up texture bindings and move on to the next hook
+ MP_TRACE(p, "Skipping hook on %s due to no texture named %s.\n",
+ name, bind_name);
+ p->num_pass_imgs -= t;
+ return false;
+ }
+
+ hook_prelude(p, bind_name, pass_bind(p, bind_img), bind_img);
+
+next_bind: ;
+ }
+
+ return true;
+}
+
+static struct ra_tex **next_hook_tex(struct gl_video *p)
+{
+ if (p->idx_hook_textures == p->num_hook_textures)
+ MP_TARRAY_APPEND(p, p->hook_textures, p->num_hook_textures, NULL);
+
+ return &p->hook_textures[p->idx_hook_textures++];
+}
+
+// Process hooks for a plane, saving the result and returning a new image
+// If 'trans' is NULL, the shader is forbidden from transforming img
+static struct image pass_hook(struct gl_video *p, const char *name,
+ struct image img, struct gl_transform *trans)
+{
+ if (!name)
+ return img;
+
+ saved_img_store(p, name, img);
+
+ MP_TRACE(p, "Running hooks for %s\n", name);
+ for (int i = 0; i < p->num_tex_hooks; i++) {
+ struct tex_hook *hook = &p->tex_hooks[i];
+
+ // Figure out if this pass hooks this texture
+ for (int h = 0; h < SHADER_MAX_HOOKS; h++) {
+ if (hook->hook_tex[h] && strcmp(hook->hook_tex[h], name) == 0)
+ goto found;
+ }
+
+ continue;
+
+found:
+ // Check the hook's condition
+ if (hook->cond && !hook->cond(p, img, hook->priv)) {
+ MP_TRACE(p, "Skipping hook on %s due to condition.\n", name);
+ continue;
+ }
+
+ const char *store_name = hook->save_tex ? hook->save_tex : name;
+ bool is_overwrite = strcmp(store_name, name) == 0;
+
+ // If user shader is set to align HOOKED with reference and fix its
+ // offset, it requires HOOKED to be resizable and overwrited.
+ if (is_overwrite && hook->align_offset) {
+ if (!trans) {
+ MP_ERR(p, "Hook tried to align unresizable texture %s!\n",
+ name);
+ return img;
+ }
+
+ struct gl_transform align_off = identity_trans;
+ align_off.t[0] = trans->t[0];
+ align_off.t[1] = trans->t[1];
+
+ gl_transform_trans(align_off, &img.transform);
+ }
+
+ if (!pass_hook_setup_binds(p, name, img, hook))
+ continue;
+
+ // Run the actual hook. This generates a series of GLSL shader
+ // instructions sufficient for drawing the hook's output
+ struct gl_transform hook_off = identity_trans;
+ hook->hook(p, img, &hook_off, hook->priv);
+
+ int comps = hook->components ? hook->components : img.components;
+ skip_unused(p, comps);
+
+ // Compute the updated FBO dimensions and store the result
+ struct mp_rect_f sz = {0, 0, img.w, img.h};
+ gl_transform_rect(hook_off, &sz);
+ int w = lroundf(fabs(sz.x1 - sz.x0));
+ int h = lroundf(fabs(sz.y1 - sz.y0));
+
+ struct ra_tex **tex = next_hook_tex(p);
+ finish_pass_tex(p, tex, w, h);
+ struct image saved_img = image_wrap(*tex, img.type, comps);
+
+ // If the texture we're saving overwrites the "current" texture, also
+ // update the tex parameter so that the future loop cycles will use the
+ // updated values, and export the offset
+ if (is_overwrite) {
+ if (!trans && !gl_transform_eq(hook_off, identity_trans)) {
+ MP_ERR(p, "Hook tried changing size of unscalable texture %s!\n",
+ name);
+ return img;
+ }
+
+ img = saved_img;
+ if (trans) {
+ gl_transform_trans(hook_off, trans);
+
+ // If user shader is set to align HOOKED, the offset it produces
+ // is dynamic (with static resizing factor though).
+ // Align it with reference manually to get offset fixed.
+ if (hook->align_offset) {
+ trans->t[0] = 0.0;
+ trans->t[1] = 0.0;
+ }
+ }
+ }
+
+ saved_img_store(p, store_name, saved_img);
+ }
+
+ return img;
+}
+
+// This can be used at any time in the middle of rendering to specify an
+// optional hook point, which if triggered will render out to a new FBO and
+// load the result back into vec4 color. Offsets applied by the hooks are
+// accumulated in tex_trans, and the FBO is dimensioned according
+// to p->texture_w/h
+static void pass_opt_hook_point(struct gl_video *p, const char *name,
+ struct gl_transform *tex_trans)
+{
+ if (!name)
+ return;
+
+ for (int i = 0; i < p->num_tex_hooks; i++) {
+ struct tex_hook *hook = &p->tex_hooks[i];
+
+ for (int h = 0; h < SHADER_MAX_HOOKS; h++) {
+ if (hook->hook_tex[h] && strcmp(hook->hook_tex[h], name) == 0)
+ goto found;
+ }
+
+ for (int b = 0; b < SHADER_MAX_BINDS; b++) {
+ if (hook->bind_tex[b] && strcmp(hook->bind_tex[b], name) == 0)
+ goto found;
+ }
+ }
+
+ // Nothing uses this texture, don't bother storing it
+ return;
+
+found: ;
+ struct ra_tex **tex = next_hook_tex(p);
+ finish_pass_tex(p, tex, p->texture_w, p->texture_h);
+ struct image img = image_wrap(*tex, PLANE_RGB, p->components);
+ img = pass_hook(p, name, img, tex_trans);
+ copy_image(p, &(int){0}, img);
+ p->texture_w = img.w;
+ p->texture_h = img.h;
+ p->components = img.components;
+ pass_describe(p, "(remainder pass)");
+}
+
+static void load_shader(struct gl_video *p, struct bstr body)
+{
+ gl_sc_hadd_bstr(p->sc, body);
+ gl_sc_uniform_dynamic(p->sc);
+ gl_sc_uniform_f(p->sc, "random", (double)av_lfg_get(&p->lfg) / UINT32_MAX);
+ gl_sc_uniform_dynamic(p->sc);
+ gl_sc_uniform_i(p->sc, "frame", p->frames_uploaded);
+ gl_sc_uniform_vec2(p->sc, "input_size",
+ (float[]){(p->src_rect.x1 - p->src_rect.x0) *
+ p->texture_offset.m[0][0],
+ (p->src_rect.y1 - p->src_rect.y0) *
+ p->texture_offset.m[1][1]});
+ gl_sc_uniform_vec2(p->sc, "target_size",
+ (float[]){p->dst_rect.x1 - p->dst_rect.x0,
+ p->dst_rect.y1 - p->dst_rect.y0});
+ gl_sc_uniform_vec2(p->sc, "tex_offset",
+ (float[]){p->src_rect.x0 * p->texture_offset.m[0][0] +
+ p->texture_offset.t[0],
+ p->src_rect.y0 * p->texture_offset.m[1][1] +
+ p->texture_offset.t[1]});
+}
+
+// Semantic equality
+static bool double_seq(double a, double b)
+{
+ return (isnan(a) && isnan(b)) || a == b;
+}
+
+static bool scaler_fun_eq(struct scaler_fun a, struct scaler_fun b)
+{
+ if ((a.name && !b.name) || (b.name && !a.name))
+ return false;
+
+ return ((!a.name && !b.name) || strcmp(a.name, b.name) == 0) &&
+ double_seq(a.params[0], b.params[0]) &&
+ double_seq(a.params[1], b.params[1]) &&
+ a.blur == b.blur &&
+ a.taper == b.taper;
+}
+
+static bool scaler_conf_eq(struct scaler_config a, struct scaler_config b)
+{
+ // Note: antiring isn't compared because it doesn't affect LUT
+ // generation
+ return scaler_fun_eq(a.kernel, b.kernel) &&
+ scaler_fun_eq(a.window, b.window) &&
+ a.radius == b.radius &&
+ a.clamp == b.clamp;
+}
+
+static void reinit_scaler(struct gl_video *p, struct scaler *scaler,
+ const struct scaler_config *conf,
+ double scale_factor,
+ int sizes[])
+{
+ assert(conf);
+ if (scaler_conf_eq(scaler->conf, *conf) &&
+ scaler->scale_factor == scale_factor &&
+ scaler->initialized)
+ return;
+
+ uninit_scaler(p, scaler);
+
+ if (scaler->index == SCALER_DSCALE && (!conf->kernel.name ||
+ !conf->kernel.name[0]))
+ {
+ conf = &p->opts.scaler[SCALER_SCALE];
+ }
+
+ if (scaler->index == SCALER_CSCALE && (!conf->kernel.name ||
+ !conf->kernel.name[0]))
+ {
+ conf = &p->opts.scaler[SCALER_SCALE];
+ }
+
+ struct filter_kernel bare_window;
+ const struct filter_kernel *t_kernel = mp_find_filter_kernel(conf->kernel.name);
+ const struct filter_window *t_window = mp_find_filter_window(conf->window.name);
+ bool is_tscale = scaler->index == SCALER_TSCALE;
+ if (!t_kernel) {
+ const struct filter_window *window = mp_find_filter_window(conf->kernel.name);
+ if (window) {
+ bare_window = (struct filter_kernel) { .f = *window };
+ t_kernel = &bare_window;
+ }
+ }
+
+ scaler->conf = *conf;
+ scaler->conf.kernel.name = (char *)handle_scaler_opt(conf->kernel.name, is_tscale);
+ scaler->conf.window.name = t_window ? (char *)t_window->name : NULL;
+ scaler->scale_factor = scale_factor;
+ scaler->insufficient = false;
+ scaler->initialized = true;
+ if (!t_kernel)
+ return;
+
+ scaler->kernel_storage = *t_kernel;
+ scaler->kernel = &scaler->kernel_storage;
+
+ if (!t_window) {
+ // fall back to the scaler's default window if available
+ t_window = mp_find_filter_window(t_kernel->window);
+ }
+ if (t_window)
+ scaler->kernel->w = *t_window;
+
+ for (int n = 0; n < 2; n++) {
+ if (!isnan(conf->kernel.params[n]))
+ scaler->kernel->f.params[n] = conf->kernel.params[n];
+ if (!isnan(conf->window.params[n]))
+ scaler->kernel->w.params[n] = conf->window.params[n];
+ }
+
+ if (conf->kernel.blur > 0.0)
+ scaler->kernel->f.blur = conf->kernel.blur;
+ if (conf->window.blur > 0.0)
+ scaler->kernel->w.blur = conf->window.blur;
+
+ if (conf->kernel.taper > 0.0)
+ scaler->kernel->f.taper = conf->kernel.taper;
+ if (conf->window.taper > 0.0)
+ scaler->kernel->w.taper = conf->window.taper;
+
+ if (scaler->kernel->f.resizable && conf->radius > 0.0)
+ scaler->kernel->f.radius = conf->radius;
+
+ scaler->kernel->clamp = conf->clamp;
+ scaler->insufficient = !mp_init_filter(scaler->kernel, sizes, scale_factor);
+
+ int size = scaler->kernel->size;
+ int num_components = size > 2 ? 4 : size;
+ const struct ra_format *fmt = ra_find_float16_format(p->ra, num_components);
+ assert(fmt);
+
+ int width = (size + num_components - 1) / num_components; // round up
+ int stride = width * num_components;
+ assert(size <= stride);
+
+ static const int lut_size = 256;
+ float *weights = talloc_array(NULL, float, lut_size * stride);
+ mp_compute_lut(scaler->kernel, lut_size, stride, weights);
+
+ bool use_1d = scaler->kernel->polar && (p->ra->caps & RA_CAP_TEX_1D);
+
+ struct ra_tex_params lut_params = {
+ .dimensions = use_1d ? 1 : 2,
+ .w = use_1d ? lut_size : width,
+ .h = use_1d ? 1 : lut_size,
+ .d = 1,
+ .format = fmt,
+ .render_src = true,
+ .src_linear = true,
+ .initial_data = weights,
+ };
+ scaler->lut = ra_tex_create(p->ra, &lut_params);
+
+ talloc_free(weights);
+
+ debug_check_gl(p, "after initializing scaler");
+}
+
+// Special helper for sampling from two separated stages
+static void pass_sample_separated(struct gl_video *p, struct image src,
+ struct scaler *scaler, int w, int h)
+{
+ // Separate the transformation into x and y components, per pass
+ struct gl_transform t_x = {
+ .m = {{src.transform.m[0][0], 0.0}, {src.transform.m[1][0], 1.0}},
+ .t = {src.transform.t[0], 0.0},
+ };
+ struct gl_transform t_y = {
+ .m = {{1.0, src.transform.m[0][1]}, {0.0, src.transform.m[1][1]}},
+ .t = {0.0, src.transform.t[1]},
+ };
+
+ // First pass (scale only in the y dir)
+ src.transform = t_y;
+ sampler_prelude(p->sc, pass_bind(p, src));
+ GLSLF("// first pass\n");
+ pass_sample_separated_gen(p->sc, scaler, 0, 1);
+ GLSLF("color *= %f;\n", src.multiplier);
+ finish_pass_tex(p, &scaler->sep_fbo, src.w, h);
+
+ // Second pass (scale only in the x dir)
+ src = image_wrap(scaler->sep_fbo, src.type, src.components);
+ src.transform = t_x;
+ pass_describe(p, "%s second pass", scaler->conf.kernel.name);
+ sampler_prelude(p->sc, pass_bind(p, src));
+ pass_sample_separated_gen(p->sc, scaler, 1, 0);
+}
+
+// Picks either the compute shader version or the regular sampler version
+// depending on hardware support
+static void pass_dispatch_sample_polar(struct gl_video *p, struct scaler *scaler,
+ struct image img, int w, int h)
+{
+ uint64_t reqs = RA_CAP_COMPUTE;
+ if ((p->ra->caps & reqs) != reqs)
+ goto fallback;
+
+ int bound = ceil(scaler->kernel->radius_cutoff);
+ int offset = bound - 1; // padding top/left
+ int padding = offset + bound; // total padding
+
+ float ratiox = (float)w / img.w,
+ ratioy = (float)h / img.h;
+
+ // For performance we want to load at least as many pixels
+ // horizontally as there are threads in a warp (32 for nvidia), as
+ // well as enough to take advantage of shmem parallelism
+ const int warp_size = 32, threads = 256;
+ int bw = warp_size;
+ int bh = threads / bw;
+
+ // We need to sample everything from base_min to base_max, so make sure
+ // we have enough room in shmem
+ int iw = (int)ceil(bw / ratiox) + padding + 1,
+ ih = (int)ceil(bh / ratioy) + padding + 1;
+
+ int shmem_req = iw * ih * img.components * sizeof(float);
+ if (shmem_req > p->ra->max_shmem)
+ goto fallback;
+
+ pass_is_compute(p, bw, bh, false);
+ pass_compute_polar(p->sc, scaler, img.components, bw, bh, iw, ih);
+ return;
+
+fallback:
+ // Fall back to regular polar shader when compute shaders are unsupported
+ // or the kernel is too big for shmem
+ pass_sample_polar(p->sc, scaler, img.components,
+ p->ra->caps & RA_CAP_GATHER);
+}
+
+// Sample from image, with the src rectangle given by it.
+// The dst rectangle is implicit by what the caller will do next, but w and h
+// must still be what is going to be used (to dimension FBOs correctly).
+// This will write the scaled contents to the vec4 "color".
+// The scaler unit is initialized by this function; in order to avoid cache
+// thrashing, the scaler unit should usually use the same parameters.
+static void pass_sample(struct gl_video *p, struct image img,
+ struct scaler *scaler, const struct scaler_config *conf,
+ double scale_factor, int w, int h)
+{
+ reinit_scaler(p, scaler, conf, scale_factor, filter_sizes);
+
+ // Describe scaler
+ const char *scaler_opt[] = {
+ [SCALER_SCALE] = "scale",
+ [SCALER_DSCALE] = "dscale",
+ [SCALER_CSCALE] = "cscale",
+ [SCALER_TSCALE] = "tscale",
+ };
+
+ pass_describe(p, "%s=%s (%s)", scaler_opt[scaler->index],
+ scaler->conf.kernel.name, plane_names[img.type]);
+
+ bool is_separated = scaler->kernel && !scaler->kernel->polar;
+
+ // Set up the transformation+prelude and bind the texture, for everything
+ // other than separated scaling (which does this in the subfunction)
+ if (!is_separated)
+ sampler_prelude(p->sc, pass_bind(p, img));
+
+ // Dispatch the scaler. They're all wildly different.
+ const char *name = scaler->conf.kernel.name;
+ if (strcmp(name, "bilinear") == 0) {
+ GLSL(color = texture(tex, pos);)
+ } else if (strcmp(name, "bicubic_fast") == 0) {
+ pass_sample_bicubic_fast(p->sc);
+ } else if (strcmp(name, "oversample") == 0) {
+ pass_sample_oversample(p->sc, scaler, w, h);
+ } else if (scaler->kernel && scaler->kernel->polar) {
+ pass_dispatch_sample_polar(p, scaler, img, w, h);
+ } else if (scaler->kernel) {
+ pass_sample_separated(p, img, scaler, w, h);
+ } else {
+ MP_ASSERT_UNREACHABLE(); // should never happen
+ }
+
+ // Apply any required multipliers. Separated scaling already does this in
+ // its first stage
+ if (!is_separated)
+ GLSLF("color *= %f;\n", img.multiplier);
+
+ // Micro-optimization: Avoid scaling unneeded channels
+ skip_unused(p, img.components);
+}
+
+// Returns true if two images are semantically equivalent (same metadata)
+static bool image_equiv(struct image a, struct image b)
+{
+ return a.type == b.type &&
+ a.components == b.components &&
+ a.multiplier == b.multiplier &&
+ a.tex->params.format == b.tex->params.format &&
+ a.tex->params.w == b.tex->params.w &&
+ a.tex->params.h == b.tex->params.h &&
+ a.w == b.w &&
+ a.h == b.h &&
+ gl_transform_eq(a.transform, b.transform);
+}
+
+static void deband_hook(struct gl_video *p, struct image img,
+ struct gl_transform *trans, void *priv)
+{
+ pass_describe(p, "debanding (%s)", plane_names[img.type]);
+ pass_sample_deband(p->sc, p->opts.deband_opts, &p->lfg,
+ p->image_params.color.gamma);
+}
+
+static void unsharp_hook(struct gl_video *p, struct image img,
+ struct gl_transform *trans, void *priv)
+{
+ pass_describe(p, "unsharp masking");
+ pass_sample_unsharp(p->sc, p->opts.unsharp);
+}
+
+struct szexp_ctx {
+ struct gl_video *p;
+ struct image img;
+};
+
+static bool szexp_lookup(void *priv, struct bstr var, float size[2])
+{
+ struct szexp_ctx *ctx = priv;
+ struct gl_video *p = ctx->p;
+
+ if (bstr_equals0(var, "NATIVE_CROPPED")) {
+ size[0] = (p->src_rect.x1 - p->src_rect.x0) * p->texture_offset.m[0][0];
+ size[1] = (p->src_rect.y1 - p->src_rect.y0) * p->texture_offset.m[1][1];
+ return true;
+ }
+
+ // The size of OUTPUT is determined. It could be useful for certain
+ // user shaders to skip passes.
+ if (bstr_equals0(var, "OUTPUT")) {
+ size[0] = p->dst_rect.x1 - p->dst_rect.x0;
+ size[1] = p->dst_rect.y1 - p->dst_rect.y0;
+ return true;
+ }
+
+ // HOOKED is a special case
+ if (bstr_equals0(var, "HOOKED")) {
+ size[0] = ctx->img.w;
+ size[1] = ctx->img.h;
+ return true;
+ }
+
+ for (int o = 0; o < p->num_saved_imgs; o++) {
+ if (bstr_equals0(var, p->saved_imgs[o].name)) {
+ size[0] = p->saved_imgs[o].img.w;
+ size[1] = p->saved_imgs[o].img.h;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool user_hook_cond(struct gl_video *p, struct image img, void *priv)
+{
+ struct gl_user_shader_hook *shader = priv;
+ assert(shader);
+
+ float res = false;
+ struct szexp_ctx ctx = {p, img};
+ eval_szexpr(p->log, &ctx, szexp_lookup, shader->cond, &res);
+ return res;
+}
+
+static void user_hook(struct gl_video *p, struct image img,
+ struct gl_transform *trans, void *priv)
+{
+ struct gl_user_shader_hook *shader = priv;
+ assert(shader);
+ load_shader(p, shader->pass_body);
+
+ pass_describe(p, "user shader: %.*s (%s)", BSTR_P(shader->pass_desc),
+ plane_names[img.type]);
+
+ if (shader->compute.active) {
+ p->pass_compute = shader->compute;
+ GLSLF("hook();\n");
+ } else {
+ GLSLF("color = hook();\n");
+ }
+
+ // Make sure we at least create a legal FBO on failure, since it's better
+ // to do this and display an error message than just crash OpenGL
+ float w = 1.0, h = 1.0;
+
+ eval_szexpr(p->log, &(struct szexp_ctx){p, img}, szexp_lookup, shader->width, &w);
+ eval_szexpr(p->log, &(struct szexp_ctx){p, img}, szexp_lookup, shader->height, &h);
+
+ *trans = (struct gl_transform){{{w / img.w, 0}, {0, h / img.h}}};
+ gl_transform_trans(shader->offset, trans);
+}
+
+static bool add_user_hook(void *priv, struct gl_user_shader_hook hook)
+{
+ struct gl_video *p = priv;
+ struct gl_user_shader_hook *copy = talloc_ptrtype(p, copy);
+ *copy = hook;
+
+ struct tex_hook texhook = {
+ .save_tex = bstrdup0(copy, hook.save_tex),
+ .components = hook.components,
+ .align_offset = hook.align_offset,
+ .hook = user_hook,
+ .cond = user_hook_cond,
+ .priv = copy,
+ };
+
+ for (int h = 0; h < SHADER_MAX_HOOKS; h++)
+ texhook.hook_tex[h] = bstrdup0(copy, hook.hook_tex[h]);
+ for (int h = 0; h < SHADER_MAX_BINDS; h++)
+ texhook.bind_tex[h] = bstrdup0(copy, hook.bind_tex[h]);
+
+ MP_TARRAY_APPEND(p, p->tex_hooks, p->num_tex_hooks, texhook);
+ return true;
+}
+
+static bool add_user_tex(void *priv, struct gl_user_shader_tex tex)
+{
+ struct gl_video *p = priv;
+
+ tex.tex = ra_tex_create(p->ra, &tex.params);
+ TA_FREEP(&tex.params.initial_data);
+
+ if (!tex.tex)
+ return false;
+
+ MP_TARRAY_APPEND(p, p->user_textures, p->num_user_textures, tex);
+ return true;
+}
+
+static void load_user_shaders(struct gl_video *p, char **shaders)
+{
+ if (!shaders)
+ return;
+
+ for (int n = 0; shaders[n] != NULL; n++) {
+ struct bstr file = load_cached_file(p, shaders[n]);
+ parse_user_shader(p->log, p->ra, file, p, add_user_hook, add_user_tex);
+ }
+}
+
+static void gl_video_setup_hooks(struct gl_video *p)
+{
+ gl_video_reset_hooks(p);
+
+ if (p->opts.deband) {
+ MP_TARRAY_APPEND(p, p->tex_hooks, p->num_tex_hooks, (struct tex_hook) {
+ .hook_tex = {"LUMA", "CHROMA", "RGB", "XYZ"},
+ .bind_tex = {"HOOKED"},
+ .hook = deband_hook,
+ });
+ }
+
+ if (p->opts.unsharp != 0.0) {
+ MP_TARRAY_APPEND(p, p->tex_hooks, p->num_tex_hooks, (struct tex_hook) {
+ .hook_tex = {"MAIN"},
+ .bind_tex = {"HOOKED"},
+ .hook = unsharp_hook,
+ });
+ }
+
+ load_user_shaders(p, p->opts.user_shaders);
+}
+
+// sample from video textures, set "color" variable to yuv value
+static void pass_read_video(struct gl_video *p)
+{
+ struct image img[4];
+ struct gl_transform offsets[4];
+ pass_get_images(p, &p->image, img, offsets);
+
+ // To keep the code as simple as possibly, we currently run all shader
+ // stages even if they would be unnecessary (e.g. no hooks for a texture).
+ // In the future, deferred image should optimize this away.
+
+ // Merge semantically identical textures. This loop is done from back
+ // to front so that merged textures end up in the right order while
+ // simultaneously allowing us to skip unnecessary merges
+ for (int n = 3; n >= 0; n--) {
+ if (img[n].type == PLANE_NONE)
+ continue;
+
+ int first = n;
+ int num = 0;
+
+ for (int i = 0; i < n; i++) {
+ if (image_equiv(img[n], img[i]) &&
+ gl_transform_eq(offsets[n], offsets[i]))
+ {
+ GLSLF("// merging plane %d ...\n", i);
+ copy_image(p, &num, img[i]);
+ first = MPMIN(first, i);
+ img[i] = (struct image){0};
+ }
+ }
+
+ if (num > 0) {
+ GLSLF("// merging plane %d ... into %d\n", n, first);
+ copy_image(p, &num, img[n]);
+ pass_describe(p, "merging planes");
+ finish_pass_tex(p, &p->merge_tex[n], img[n].w, img[n].h);
+ img[first] = image_wrap(p->merge_tex[n], img[n].type, num);
+ img[n] = (struct image){0};
+ }
+ }
+
+ // If any textures are still in integer format by this point, we need
+ // to introduce an explicit conversion pass to avoid breaking hooks/scaling
+ for (int n = 0; n < 4; n++) {
+ if (img[n].tex && img[n].tex->params.format->ctype == RA_CTYPE_UINT) {
+ GLSLF("// use_integer fix for plane %d\n", n);
+ copy_image(p, &(int){0}, img[n]);
+ pass_describe(p, "use_integer fix");
+ finish_pass_tex(p, &p->integer_tex[n], img[n].w, img[n].h);
+ img[n] = image_wrap(p->integer_tex[n], img[n].type,
+ img[n].components);
+ }
+ }
+
+ // The basic idea is we assume the rgb/luma texture is the "reference" and
+ // scale everything else to match, after all planes are finalized.
+ // We find the reference texture first, in order to maintain texture offset
+ // between hooks on different type of planes.
+ int reference_tex_num = 0;
+ for (int n = 0; n < 4; n++) {
+ switch (img[n].type) {
+ case PLANE_RGB:
+ case PLANE_XYZ:
+ case PLANE_LUMA: break;
+ default: continue;
+ }
+
+ reference_tex_num = n;
+ break;
+ }
+
+ // Dispatch the hooks for all of these textures, saving and perhaps
+ // modifying them in the process
+ for (int n = 0; n < 4; n++) {
+ const char *name;
+ switch (img[n].type) {
+ case PLANE_RGB: name = "RGB"; break;
+ case PLANE_LUMA: name = "LUMA"; break;
+ case PLANE_CHROMA: name = "CHROMA"; break;
+ case PLANE_ALPHA: name = "ALPHA"; break;
+ case PLANE_XYZ: name = "XYZ"; break;
+ default: continue;
+ }
+
+ img[n] = pass_hook(p, name, img[n], &offsets[n]);
+
+ if (reference_tex_num == n) {
+ // The reference texture is finalized now.
+ p->texture_w = img[n].w;
+ p->texture_h = img[n].h;
+ p->texture_offset = offsets[n];
+ }
+ }
+
+ // At this point all planes are finalized but they may not be at the
+ // required size yet. Furthermore, they may have texture offsets that
+ // require realignment.
+
+ // Compute the reference rect
+ struct mp_rect_f src = {0.0, 0.0, p->image_params.w, p->image_params.h};
+ struct mp_rect_f ref = src;
+ gl_transform_rect(p->texture_offset, &ref);
+
+ // Explicitly scale all of the textures that don't match
+ for (int n = 0; n < 4; n++) {
+ if (img[n].type == PLANE_NONE)
+ continue;
+
+ // If the planes are aligned identically, we will end up with the
+ // exact same source rectangle.
+ struct mp_rect_f rect = src;
+ gl_transform_rect(offsets[n], &rect);
+ if (mp_rect_f_seq(ref, rect))
+ continue;
+
+ // If the rectangles differ, then our planes have a different
+ // alignment and/or size. First of all, we have to compute the
+ // corrections required to meet the target rectangle
+ struct gl_transform fix = {
+ .m = {{(ref.x1 - ref.x0) / (rect.x1 - rect.x0), 0.0},
+ {0.0, (ref.y1 - ref.y0) / (rect.y1 - rect.y0)}},
+ .t = {ref.x0, ref.y0},
+ };
+
+ // Since the scale in texture space is different from the scale in
+ // absolute terms, we have to scale the coefficients down to be
+ // relative to the texture's physical dimensions and local offset
+ struct gl_transform scale = {
+ .m = {{(float)img[n].w / p->texture_w, 0.0},
+ {0.0, (float)img[n].h / p->texture_h}},
+ .t = {-rect.x0, -rect.y0},
+ };
+ if (p->image_params.rotate % 180 == 90)
+ MPSWAP(double, scale.m[0][0], scale.m[1][1]);
+
+ gl_transform_trans(scale, &fix);
+
+ // Since the texture transform is a function of the texture coordinates
+ // to texture space, rather than the other way around, we have to
+ // actually apply the *inverse* of this. Fortunately, calculating
+ // the inverse is relatively easy here.
+ fix.m[0][0] = 1.0 / fix.m[0][0];
+ fix.m[1][1] = 1.0 / fix.m[1][1];
+ fix.t[0] = fix.m[0][0] * -fix.t[0];
+ fix.t[1] = fix.m[1][1] * -fix.t[1];
+ gl_transform_trans(fix, &img[n].transform);
+
+ int scaler_id = -1;
+ const char *name = NULL;
+ switch (img[n].type) {
+ case PLANE_RGB:
+ case PLANE_LUMA:
+ case PLANE_XYZ:
+ scaler_id = SCALER_SCALE;
+ // these aren't worth hooking, fringe hypothetical cases only
+ break;
+ case PLANE_CHROMA:
+ scaler_id = SCALER_CSCALE;
+ name = "CHROMA_SCALED";
+ break;
+ case PLANE_ALPHA:
+ // alpha always uses bilinear
+ name = "ALPHA_SCALED";
+ }
+
+ if (scaler_id < 0)
+ continue;
+
+ const struct scaler_config *conf = &p->opts.scaler[scaler_id];
+
+ if (scaler_id == SCALER_CSCALE && (!conf->kernel.name ||
+ !conf->kernel.name[0]))
+ {
+ conf = &p->opts.scaler[SCALER_SCALE];
+ }
+
+ struct scaler *scaler = &p->scaler[scaler_id];
+
+ // bilinear scaling is a free no-op thanks to GPU sampling
+ if (strcmp(conf->kernel.name, "bilinear") != 0) {
+ GLSLF("// upscaling plane %d\n", n);
+ pass_sample(p, img[n], scaler, conf, 1.0, p->texture_w, p->texture_h);
+ finish_pass_tex(p, &p->scale_tex[n], p->texture_w, p->texture_h);
+ img[n] = image_wrap(p->scale_tex[n], img[n].type, img[n].components);
+ }
+
+ // Run any post-scaling hooks
+ img[n] = pass_hook(p, name, img[n], NULL);
+ }
+
+ // All planes are of the same size and properly aligned at this point
+ pass_describe(p, "combining planes");
+ int coord = 0;
+ for (int i = 0; i < 4; i++) {
+ if (img[i].type != PLANE_NONE)
+ copy_image(p, &coord, img[i]);
+ }
+ p->components = coord;
+}
+
+// Utility function that simply binds a texture and reads from it, without any
+// transformations.
+static void pass_read_tex(struct gl_video *p, struct ra_tex *tex)
+{
+ struct image img = image_wrap(tex, PLANE_RGB, p->components);
+ copy_image(p, &(int){0}, img);
+}
+
+// yuv conversion, and any other conversions before main up/down-scaling
+static void pass_convert_yuv(struct gl_video *p)
+{
+ struct gl_shader_cache *sc = p->sc;
+
+ struct mp_csp_params cparams = MP_CSP_PARAMS_DEFAULTS;
+ cparams.gray = p->is_gray;
+ cparams.is_float = p->ra_format.component_type == RA_CTYPE_FLOAT;
+ mp_csp_set_image_params(&cparams, &p->image_params);
+ mp_csp_equalizer_state_get(p->video_eq, &cparams);
+ p->user_gamma = 1.0 / (cparams.gamma * p->opts.gamma);
+
+ pass_describe(p, "color conversion");
+
+ if (p->color_swizzle[0])
+ GLSLF("color = color.%s;\n", p->color_swizzle);
+
+ // Pre-colormatrix input gamma correction
+ if (cparams.color.space == MP_CSP_XYZ)
+ pass_linearize(p->sc, p->image_params.color.gamma);
+
+ // We always explicitly normalize the range in pass_read_video
+ cparams.input_bits = cparams.texture_bits = 0;
+
+ // Conversion to RGB. For RGB itself, this still applies e.g. brightness
+ // and contrast controls, or expansion of e.g. LSB-packed 10 bit data.
+ struct mp_cmat m = {{{0}}};
+ mp_get_csp_matrix(&cparams, &m);
+ gl_sc_uniform_mat3(sc, "colormatrix", true, &m.m[0][0]);
+ gl_sc_uniform_vec3(sc, "colormatrix_c", m.c);
+
+ GLSL(color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;)
+
+ if (cparams.color.space == MP_CSP_XYZ) {
+ pass_delinearize(p->sc, p->image_params.color.gamma);
+ // mp_get_csp_matrix implicitly converts XYZ to DCI-P3
+ p->image_params.color.space = MP_CSP_RGB;
+ p->image_params.color.primaries = MP_CSP_PRIM_DCI_P3;
+ }
+
+ if (p->image_params.color.space == MP_CSP_BT_2020_C) {
+ // Conversion for C'rcY'cC'bc via the BT.2020 CL system:
+ // C'bc = (B'-Y'c) / 1.9404 | C'bc <= 0
+ // = (B'-Y'c) / 1.5816 | C'bc > 0
+ //
+ // C'rc = (R'-Y'c) / 1.7184 | C'rc <= 0
+ // = (R'-Y'c) / 0.9936 | C'rc > 0
+ //
+ // as per the BT.2020 specification, table 4. This is a non-linear
+ // transformation because (constant) luminance receives non-equal
+ // contributions from the three different channels.
+ GLSLF("// constant luminance conversion \n"
+ "color.br = color.br * mix(vec2(1.5816, 0.9936), \n"
+ " vec2(1.9404, 1.7184), \n"
+ " %s(lessThanEqual(color.br, vec2(0))))\n"
+ " + color.gg; \n",
+ gl_sc_bvec(p->sc, 2));
+ // Expand channels to camera-linear light. This shader currently just
+ // assumes everything uses the BT.2020 12-bit gamma function, since the
+ // difference between 10 and 12-bit is negligible for anything other
+ // than 12-bit content.
+ GLSLF("color.rgb = mix(color.rgb * vec3(1.0/4.5), \n"
+ " pow((color.rgb + vec3(0.0993))*vec3(1.0/1.0993), \n"
+ " vec3(1.0/0.45)), \n"
+ " %s(lessThanEqual(vec3(0.08145), color.rgb))); \n",
+ gl_sc_bvec(p->sc, 3));
+ // Calculate the green channel from the expanded RYcB
+ // The BT.2020 specification says Yc = 0.2627*R + 0.6780*G + 0.0593*B
+ GLSL(color.g = (color.g - 0.2627*color.r - 0.0593*color.b)*1.0/0.6780;)
+ // Recompress to receive the R'G'B' result, same as other systems
+ GLSLF("color.rgb = mix(color.rgb * vec3(4.5), \n"
+ " vec3(1.0993) * pow(color.rgb, vec3(0.45)) - vec3(0.0993), \n"
+ " %s(lessThanEqual(vec3(0.0181), color.rgb))); \n",
+ gl_sc_bvec(p->sc, 3));
+ }
+
+ p->components = 3;
+ if (!p->has_alpha || p->opts.alpha_mode == ALPHA_NO) {
+ GLSL(color.a = 1.0;)
+ } else if (p->image_params.alpha == MP_ALPHA_PREMUL) {
+ p->components = 4;
+ } else {
+ p->components = 4;
+ GLSL(color = vec4(color.rgb * color.a, color.a);) // straight -> premul
+ }
+}
+
+static void get_scale_factors(struct gl_video *p, bool transpose_rot, double xy[2])
+{
+ double target_w = p->src_rect.x1 - p->src_rect.x0;
+ double target_h = p->src_rect.y1 - p->src_rect.y0;
+ if (transpose_rot && p->image_params.rotate % 180 == 90)
+ MPSWAP(double, target_w, target_h);
+ xy[0] = (p->dst_rect.x1 - p->dst_rect.x0) / target_w;
+ xy[1] = (p->dst_rect.y1 - p->dst_rect.y0) / target_h;
+}
+
+// Cropping.
+static void compute_src_transform(struct gl_video *p, struct gl_transform *tr)
+{
+ float sx = (p->src_rect.x1 - p->src_rect.x0) / (float)p->texture_w,
+ sy = (p->src_rect.y1 - p->src_rect.y0) / (float)p->texture_h,
+ ox = p->src_rect.x0,
+ oy = p->src_rect.y0;
+ struct gl_transform transform = {{{sx, 0}, {0, sy}}, {ox, oy}};
+
+ gl_transform_trans(p->texture_offset, &transform);
+
+ *tr = transform;
+}
+
+// Takes care of the main scaling and pre/post-conversions
+static void pass_scale_main(struct gl_video *p)
+{
+ // Figure out the main scaler.
+ double xy[2];
+ get_scale_factors(p, true, xy);
+
+ // actual scale factor should be divided by the scale factor of prescaling.
+ xy[0] /= p->texture_offset.m[0][0];
+ xy[1] /= p->texture_offset.m[1][1];
+
+ // The calculation of scale factor involves 32-bit float(from gl_transform),
+ // use non-strict equality test to tolerate precision loss.
+ bool downscaling = xy[0] < 1.0 - FLT_EPSILON || xy[1] < 1.0 - FLT_EPSILON;
+ bool upscaling = !downscaling && (xy[0] > 1.0 + FLT_EPSILON ||
+ xy[1] > 1.0 + FLT_EPSILON);
+ double scale_factor = 1.0;
+
+ struct scaler *scaler = &p->scaler[SCALER_SCALE];
+ struct scaler_config scaler_conf = p->opts.scaler[SCALER_SCALE];
+ if (p->opts.scaler_resizes_only && !downscaling && !upscaling) {
+ scaler_conf.kernel.name = "bilinear";
+ // For scaler-resizes-only, we round the texture offset to
+ // the nearest round value in order to prevent ugly blurriness
+ // (in exchange for slightly shifting the image by up to half a
+ // subpixel)
+ p->texture_offset.t[0] = roundf(p->texture_offset.t[0]);
+ p->texture_offset.t[1] = roundf(p->texture_offset.t[1]);
+ }
+ if (downscaling && p->opts.scaler[SCALER_DSCALE].kernel.name) {
+ scaler_conf = p->opts.scaler[SCALER_DSCALE];
+ scaler = &p->scaler[SCALER_DSCALE];
+ }
+
+ // When requesting correct-downscaling and the clip is anamorphic, and
+ // because only a single scale factor is used for both axes, enable it only
+ // when both axes are downscaled, and use the milder of the factors to not
+ // end up with too much blur on one axis (even if we end up with sub-optimal
+ // scale factor on the other axis). This is better than not respecting
+ // correct scaling at all for anamorphic clips.
+ double f = MPMAX(xy[0], xy[1]);
+ if (p->opts.correct_downscaling && f < 1.0)
+ scale_factor = 1.0 / f;
+
+ // Pre-conversion, like linear light/sigmoidization
+ GLSLF("// scaler pre-conversion\n");
+ bool use_linear = false;
+ if (downscaling) {
+ use_linear = p->opts.linear_downscaling;
+
+ // Linear light downscaling results in nasty artifacts for HDR curves
+ // due to the potentially extreme brightness differences severely
+ // compounding any ringing. So just scale in gamma light instead.
+ if (mp_trc_is_hdr(p->image_params.color.gamma))
+ use_linear = false;
+ } else if (upscaling) {
+ use_linear = p->opts.linear_upscaling || p->opts.sigmoid_upscaling;
+ }
+
+ if (use_linear) {
+ p->use_linear = true;
+ pass_linearize(p->sc, p->image_params.color.gamma);
+ pass_opt_hook_point(p, "LINEAR", NULL);
+ }
+
+ bool use_sigmoid = use_linear && p->opts.sigmoid_upscaling && upscaling;
+ float sig_center, sig_slope, sig_offset, sig_scale;
+ if (use_sigmoid) {
+ // Coefficients for the sigmoidal transform are taken from the
+ // formula here: http://www.imagemagick.org/Usage/color_mods/#sigmoidal
+ sig_center = p->opts.sigmoid_center;
+ sig_slope = p->opts.sigmoid_slope;
+ // This function needs to go through (0,0) and (1,1) so we compute the
+ // values at 1 and 0, and then scale/shift them, respectively.
+ sig_offset = 1.0/(1+expf(sig_slope * sig_center));
+ sig_scale = 1.0/(1+expf(sig_slope * (sig_center-1))) - sig_offset;
+ GLSL(color.rgb = clamp(color.rgb, 0.0, 1.0);)
+ GLSLF("color.rgb = %f - log(1.0/(color.rgb * %f + %f) - 1.0) * 1.0/%f;\n",
+ sig_center, sig_scale, sig_offset, sig_slope);
+ pass_opt_hook_point(p, "SIGMOID", NULL);
+ }
+
+ pass_opt_hook_point(p, "PREKERNEL", NULL);
+
+ int vp_w = p->dst_rect.x1 - p->dst_rect.x0;
+ int vp_h = p->dst_rect.y1 - p->dst_rect.y0;
+ struct gl_transform transform;
+ compute_src_transform(p, &transform);
+
+ GLSLF("// main scaling\n");
+ finish_pass_tex(p, &p->indirect_tex, p->texture_w, p->texture_h);
+ struct image src = image_wrap(p->indirect_tex, PLANE_RGB, p->components);
+ gl_transform_trans(transform, &src.transform);
+ pass_sample(p, src, scaler, &scaler_conf, scale_factor, vp_w, vp_h);
+
+ // Changes the texture size to display size after main scaler.
+ p->texture_w = vp_w;
+ p->texture_h = vp_h;
+
+ pass_opt_hook_point(p, "POSTKERNEL", NULL);
+
+ GLSLF("// scaler post-conversion\n");
+ if (use_sigmoid) {
+ // Inverse of the transformation above
+ GLSL(color.rgb = clamp(color.rgb, 0.0, 1.0);)
+ GLSLF("color.rgb = (1.0/(1.0 + exp(%f * (%f - color.rgb))) - %f) * 1.0/%f;\n",
+ sig_slope, sig_center, sig_offset, sig_scale);
+ }
+}
+
+// Adapts the colors to the right output color space. (Final pass during
+// rendering)
+// If OSD is true, ignore any changes that may have been made to the video
+// by previous passes (i.e. linear scaling)
+static void pass_colormanage(struct gl_video *p, struct mp_colorspace src,
+ struct mp_colorspace fbo_csp, int flags, bool osd)
+{
+ struct ra *ra = p->ra;
+
+ // Configure the destination according to the FBO color space,
+ // unless specific transfer function, primaries or target peak
+ // is set. If values are set to _AUTO, the most likely intended
+ // values are guesstimated later in this function.
+ struct mp_colorspace dst = {
+ .gamma = p->opts.target_trc == MP_CSP_TRC_AUTO ?
+ fbo_csp.gamma : p->opts.target_trc,
+ .primaries = p->opts.target_prim == MP_CSP_PRIM_AUTO ?
+ fbo_csp.primaries : p->opts.target_prim,
+ .light = MP_CSP_LIGHT_DISPLAY,
+ .hdr.max_luma = !p->opts.target_peak ?
+ fbo_csp.hdr.max_luma : p->opts.target_peak,
+ };
+
+ if (!p->colorspace_override_warned &&
+ ((fbo_csp.gamma && dst.gamma != fbo_csp.gamma) ||
+ (fbo_csp.primaries && dst.primaries != fbo_csp.primaries)))
+ {
+ MP_WARN(p, "One or more colorspace value is being overridden "
+ "by user while the FBO provides colorspace information: "
+ "transfer function: (dst: %s, fbo: %s), "
+ "primaries: (dst: %s, fbo: %s). "
+ "Rendering can lead to incorrect results!\n",
+ m_opt_choice_str(mp_csp_trc_names, dst.gamma),
+ m_opt_choice_str(mp_csp_trc_names, fbo_csp.gamma),
+ m_opt_choice_str(mp_csp_prim_names, dst.primaries),
+ m_opt_choice_str(mp_csp_prim_names, fbo_csp.primaries));
+ p->colorspace_override_warned = true;
+ }
+
+ if (dst.gamma == MP_CSP_TRC_HLG)
+ dst.light = MP_CSP_LIGHT_SCENE_HLG;
+
+ if (p->use_lut_3d && (flags & RENDER_SCREEN_COLOR)) {
+ // The 3DLUT is always generated against the video's original source
+ // space, *not* the reference space. (To avoid having to regenerate
+ // the 3DLUT for the OSD on every frame)
+ enum mp_csp_prim prim_orig = p->image_params.color.primaries;
+ enum mp_csp_trc trc_orig = p->image_params.color.gamma;
+
+ // One exception: HDR is not implemented by LittleCMS for technical
+ // limitation reasons, so we use a gamma 2.2 input curve here instead.
+ // We could pick any value we want here, the difference is just coding
+ // efficiency.
+ if (mp_trc_is_hdr(trc_orig))
+ trc_orig = MP_CSP_TRC_GAMMA22;
+
+ if (gl_video_get_lut3d(p, prim_orig, trc_orig)) {
+ dst.primaries = prim_orig;
+ dst.gamma = trc_orig;
+ assert(dst.primaries && dst.gamma);
+ }
+ }
+
+ if (dst.primaries == MP_CSP_PRIM_AUTO) {
+ // The vast majority of people are on sRGB or BT.709 displays, so pick
+ // this as the default output color space.
+ dst.primaries = MP_CSP_PRIM_BT_709;
+
+ if (src.primaries == MP_CSP_PRIM_BT_601_525 ||
+ src.primaries == MP_CSP_PRIM_BT_601_625)
+ {
+ // Since we auto-pick BT.601 and BT.709 based on the dimensions,
+ // combined with the fact that they're very similar to begin with,
+ // and to avoid confusing the average user, just don't adapt BT.601
+ // content automatically at all.
+ dst.primaries = src.primaries;
+ }
+ }
+
+ if (dst.gamma == MP_CSP_TRC_AUTO) {
+ // Most people seem to complain when the image is darker or brighter
+ // than what they're "used to", so just avoid changing the gamma
+ // altogether by default. The only exceptions to this rule apply to
+ // very unusual TRCs, which even hardcode technoluddites would probably
+ // not enjoy viewing unaltered.
+ dst.gamma = src.gamma;
+
+ // Avoid outputting linear light or HDR content "by default". For these
+ // just pick gamma 2.2 as a default, since it's a good estimate for
+ // the response of typical displays
+ if (dst.gamma == MP_CSP_TRC_LINEAR || mp_trc_is_hdr(dst.gamma))
+ dst.gamma = MP_CSP_TRC_GAMMA22;
+ }
+
+ // If there's no specific signal peak known for the output display, infer
+ // it from the chosen transfer function. Also normalize the src peak, in
+ // case it was unknown
+ if (!dst.hdr.max_luma)
+ dst.hdr.max_luma = mp_trc_nom_peak(dst.gamma) * MP_REF_WHITE;
+ if (!src.hdr.max_luma)
+ src.hdr.max_luma = mp_trc_nom_peak(src.gamma) * MP_REF_WHITE;
+
+ // Whitelist supported modes
+ switch (p->opts.tone_map.curve) {
+ case TONE_MAPPING_AUTO:
+ case TONE_MAPPING_CLIP:
+ case TONE_MAPPING_MOBIUS:
+ case TONE_MAPPING_REINHARD:
+ case TONE_MAPPING_HABLE:
+ case TONE_MAPPING_GAMMA:
+ case TONE_MAPPING_LINEAR:
+ case TONE_MAPPING_BT_2390:
+ break;
+ default:
+ MP_WARN(p, "Tone mapping curve unsupported by vo_gpu, falling back.\n");
+ p->opts.tone_map.curve = TONE_MAPPING_AUTO;
+ break;
+ }
+
+ switch (p->opts.tone_map.gamut_mode) {
+ case GAMUT_AUTO:
+ case GAMUT_WARN:
+ case GAMUT_CLIP:
+ case GAMUT_DESATURATE:
+ break;
+ default:
+ MP_WARN(p, "Gamut mapping mode unsupported by vo_gpu, falling back.\n");
+ p->opts.tone_map.gamut_mode = GAMUT_AUTO;
+ break;
+ }
+
+ struct gl_tone_map_opts tone_map = p->opts.tone_map;
+ bool detect_peak = tone_map.compute_peak >= 0 && mp_trc_is_hdr(src.gamma)
+ && src.hdr.max_luma > dst.hdr.max_luma;
+
+ if (detect_peak && !p->hdr_peak_ssbo) {
+ struct {
+ float average[2];
+ int32_t frame_sum;
+ uint32_t frame_max;
+ uint32_t counter;
+ } peak_ssbo = {0};
+
+ struct ra_buf_params params = {
+ .type = RA_BUF_TYPE_SHADER_STORAGE,
+ .size = sizeof(peak_ssbo),
+ .initial_data = &peak_ssbo,
+ };
+
+ p->hdr_peak_ssbo = ra_buf_create(ra, &params);
+ if (!p->hdr_peak_ssbo) {
+ MP_WARN(p, "Failed to create HDR peak detection SSBO, disabling.\n");
+ tone_map.compute_peak = p->opts.tone_map.compute_peak = -1;
+ detect_peak = false;
+ }
+ }
+
+ if (detect_peak) {
+ pass_describe(p, "detect HDR peak");
+ pass_is_compute(p, 8, 8, true); // 8x8 is good for performance
+ gl_sc_ssbo(p->sc, "PeakDetect", p->hdr_peak_ssbo,
+ "vec2 average;"
+ "int frame_sum;"
+ "uint frame_max;"
+ "uint counter;"
+ );
+ } else {
+ tone_map.compute_peak = -1;
+ }
+
+ // Adapt from src to dst as necessary
+ pass_color_map(p->sc, p->use_linear && !osd, src, dst, &tone_map);
+
+ if (p->use_lut_3d && (flags & RENDER_SCREEN_COLOR)) {
+ gl_sc_uniform_texture(p->sc, "lut_3d", p->lut_3d_texture);
+ GLSL(vec3 cpos;)
+ for (int i = 0; i < 3; i++)
+ GLSLF("cpos[%d] = LUT_POS(color[%d], %d.0);\n", i, i, p->lut_3d_size[i]);
+ GLSL(color.rgb = tex3D(lut_3d, cpos).rgb;)
+ }
+}
+
+void gl_video_set_fb_depth(struct gl_video *p, int fb_depth)
+{
+ p->fb_depth = fb_depth;
+}
+
+static void pass_dither(struct gl_video *p)
+{
+ // Assume 8 bits per component if unknown.
+ int dst_depth = p->fb_depth > 0 ? p->fb_depth : 8;
+ if (p->opts.dither_depth > 0)
+ dst_depth = p->opts.dither_depth;
+
+ if (p->opts.dither_depth < 0 || p->opts.dither_algo == DITHER_NONE)
+ return;
+
+ if (p->opts.dither_algo == DITHER_ERROR_DIFFUSION) {
+ const struct error_diffusion_kernel *kernel =
+ mp_find_error_diffusion_kernel(p->opts.error_diffusion);
+ int o_w = p->dst_rect.x1 - p->dst_rect.x0,
+ o_h = p->dst_rect.y1 - p->dst_rect.y0;
+
+ int shmem_req = mp_ef_compute_shared_memory_size(kernel, o_h);
+ if (shmem_req > p->ra->max_shmem) {
+ MP_WARN(p, "Fallback to dither=fruit because there is no enough "
+ "shared memory (%d/%d).\n",
+ shmem_req, (int)p->ra->max_shmem);
+ p->opts.dither_algo = DITHER_FRUIT;
+ } else {
+ finish_pass_tex(p, &p->error_diffusion_tex[0], o_w, o_h);
+
+ struct image img = image_wrap(p->error_diffusion_tex[0], PLANE_RGB, p->components);
+
+ // Ensure the block size doesn't exceed the maximum of the
+ // implementation.
+ int block_size = MPMIN(p->ra->max_compute_group_threads, o_h);
+
+ pass_describe(p, "dither=error-diffusion (kernel=%s, depth=%d)",
+ kernel->name, dst_depth);
+
+ p->pass_compute = (struct compute_info) {
+ .active = true,
+ .threads_w = block_size,
+ .threads_h = 1,
+ .directly_writes = true
+ };
+
+ int tex_id = pass_bind(p, img);
+
+ pass_error_diffusion(p->sc, kernel, tex_id, o_w, o_h,
+ dst_depth, block_size);
+
+ finish_pass_tex(p, &p->error_diffusion_tex[1], o_w, o_h);
+
+ img = image_wrap(p->error_diffusion_tex[1], PLANE_RGB, p->components);
+ copy_image(p, &(int){0}, img);
+
+ return;
+ }
+ }
+
+ if (!p->dither_texture) {
+ MP_VERBOSE(p, "Dither to %d.\n", dst_depth);
+
+ int tex_size = 0;
+ void *tex_data = NULL;
+ const struct ra_format *fmt = NULL;
+ void *temp = NULL;
+
+ if (p->opts.dither_algo == DITHER_FRUIT) {
+ int sizeb = p->opts.dither_size;
+ int size = 1 << sizeb;
+
+ if (p->last_dither_matrix_size != size) {
+ p->last_dither_matrix = talloc_realloc(p, p->last_dither_matrix,
+ float, size * size);
+ mp_make_fruit_dither_matrix(p->last_dither_matrix, sizeb);
+ p->last_dither_matrix_size = size;
+ }
+
+ // Prefer R16 texture since they provide higher precision.
+ fmt = ra_find_unorm_format(p->ra, 2, 1);
+ if (!fmt)
+ fmt = ra_find_float16_format(p->ra, 1);
+ if (fmt) {
+ tex_size = size;
+ tex_data = p->last_dither_matrix;
+ if (fmt->ctype == RA_CTYPE_UNORM) {
+ uint16_t *t = temp = talloc_array(NULL, uint16_t, size * size);
+ for (int n = 0; n < size * size; n++)
+ t[n] = p->last_dither_matrix[n] * UINT16_MAX;
+ tex_data = t;
+ }
+ } else {
+ MP_VERBOSE(p, "GL too old. Falling back to ordered dither.\n");
+ p->opts.dither_algo = DITHER_ORDERED;
+ }
+ }
+
+ if (p->opts.dither_algo == DITHER_ORDERED) {
+ temp = talloc_array(NULL, char, 8 * 8);
+ mp_make_ordered_dither_matrix(temp, 8);
+
+ fmt = ra_find_unorm_format(p->ra, 1, 1);
+ tex_size = 8;
+ tex_data = temp;
+ }
+
+ struct ra_tex_params params = {
+ .dimensions = 2,
+ .w = tex_size,
+ .h = tex_size,
+ .d = 1,
+ .format = fmt,
+ .render_src = true,
+ .src_repeat = true,
+ .initial_data = tex_data,
+ };
+ p->dither_texture = ra_tex_create(p->ra, &params);
+
+ debug_check_gl(p, "dither setup");
+
+ talloc_free(temp);
+
+ if (!p->dither_texture)
+ return;
+ }
+
+ GLSLF("// dithering\n");
+
+ // This defines how many bits are considered significant for output on
+ // screen. The superfluous bits will be used for rounding according to the
+ // dither matrix. The precision of the source implicitly decides how many
+ // dither patterns can be visible.
+ int dither_quantization = (1 << dst_depth) - 1;
+ int dither_size = p->dither_texture->params.w;
+
+ gl_sc_uniform_texture(p->sc, "dither", p->dither_texture);
+
+ GLSLF("vec2 dither_pos = gl_FragCoord.xy * 1.0/%d.0;\n", dither_size);
+
+ if (p->opts.temporal_dither) {
+ int phase = (p->frames_rendered / p->opts.temporal_dither_period) % 8u;
+ float r = phase * (M_PI / 2); // rotate
+ float m = phase < 4 ? 1 : -1; // mirror
+
+ float matrix[2][2] = {{cos(r), -sin(r) },
+ {sin(r) * m, cos(r) * m}};
+ gl_sc_uniform_dynamic(p->sc);
+ gl_sc_uniform_mat2(p->sc, "dither_trafo", true, &matrix[0][0]);
+
+ GLSL(dither_pos = dither_trafo * dither_pos;)
+ }
+
+ GLSL(float dither_value = texture(dither, dither_pos).r;)
+ GLSLF("color = floor(color * %d.0 + dither_value + 0.5 / %d.0) * 1.0/%d.0;\n",
+ dither_quantization, dither_size * dither_size, dither_quantization);
+}
+
+// Draws the OSD, in scene-referred colors.. If cms is true, subtitles are
+// instead adapted to the display's gamut.
+static void pass_draw_osd(struct gl_video *p, int osd_flags, int frame_flags,
+ double pts, struct mp_osd_res rect, struct ra_fbo fbo,
+ bool cms)
+{
+ if (frame_flags & RENDER_FRAME_VF_SUBS)
+ osd_flags |= OSD_DRAW_SUB_FILTER;
+
+ if ((osd_flags & OSD_DRAW_SUB_ONLY) && (osd_flags & OSD_DRAW_OSD_ONLY))
+ return;
+
+ mpgl_osd_generate(p->osd, rect, pts, p->image_params.stereo3d, osd_flags);
+
+ timer_pool_start(p->osd_timer);
+ for (int n = 0; n < MAX_OSD_PARTS; n++) {
+ // (This returns false if this part is empty with nothing to draw.)
+ if (!mpgl_osd_draw_prepare(p->osd, n, p->sc))
+ continue;
+ // When subtitles need to be color managed, assume they're in sRGB
+ // (for lack of anything saner to do)
+ if (cms) {
+ static const struct mp_colorspace csp_srgb = {
+ .primaries = MP_CSP_PRIM_BT_709,
+ .gamma = MP_CSP_TRC_SRGB,
+ .light = MP_CSP_LIGHT_DISPLAY,
+ };
+
+ pass_colormanage(p, csp_srgb, fbo.color_space, frame_flags, true);
+ }
+ mpgl_osd_draw_finish(p->osd, n, p->sc, fbo);
+ }
+
+ timer_pool_stop(p->osd_timer);
+ pass_describe(p, "drawing osd");
+ pass_record(p, timer_pool_measure(p->osd_timer));
+}
+
+static float chroma_realign(int size, int pixel)
+{
+ return size / (float)chroma_upsize(size, pixel);
+}
+
+// Minimal rendering code path, for GLES or OpenGL 2.1 without proper FBOs.
+static void pass_render_frame_dumb(struct gl_video *p)
+{
+ struct image img[4];
+ struct gl_transform off[4];
+ pass_get_images(p, &p->image, img, off);
+
+ struct gl_transform transform;
+ compute_src_transform(p, &transform);
+
+ int index = 0;
+ for (int i = 0; i < p->plane_count; i++) {
+ int cw = img[i].type == PLANE_CHROMA ? p->ra_format.chroma_w : 1;
+ int ch = img[i].type == PLANE_CHROMA ? p->ra_format.chroma_h : 1;
+ if (p->image_params.rotate % 180 == 90)
+ MPSWAP(int, cw, ch);
+
+ struct gl_transform t = transform;
+ t.m[0][0] *= chroma_realign(p->texture_w, cw);
+ t.m[1][1] *= chroma_realign(p->texture_h, ch);
+
+ t.t[0] /= cw;
+ t.t[1] /= ch;
+
+ t.t[0] += off[i].t[0];
+ t.t[1] += off[i].t[1];
+
+ gl_transform_trans(img[i].transform, &t);
+ img[i].transform = t;
+
+ copy_image(p, &index, img[i]);
+ }
+
+ pass_convert_yuv(p);
+}
+
+// The main rendering function, takes care of everything up to and including
+// upscaling. p->image is rendered.
+// flags: bit set of RENDER_FRAME_* flags
+static bool pass_render_frame(struct gl_video *p, struct mp_image *mpi,
+ uint64_t id, int flags)
+{
+ // initialize the texture parameters and temporary variables
+ p->texture_w = p->image_params.w;
+ p->texture_h = p->image_params.h;
+ p->texture_offset = identity_trans;
+ p->components = 0;
+ p->num_saved_imgs = 0;
+ p->idx_hook_textures = 0;
+ p->use_linear = false;
+
+ // try uploading the frame
+ if (!pass_upload_image(p, mpi, id))
+ return false;
+
+ if (p->image_params.rotate % 180 == 90)
+ MPSWAP(int, p->texture_w, p->texture_h);
+
+ if (p->dumb_mode)
+ return true;
+
+ pass_read_video(p);
+ pass_opt_hook_point(p, "NATIVE", &p->texture_offset);
+ pass_convert_yuv(p);
+ pass_opt_hook_point(p, "MAINPRESUB", &p->texture_offset);
+
+ // For subtitles
+ double vpts = p->image.mpi->pts;
+ if (vpts == MP_NOPTS_VALUE)
+ vpts = p->osd_pts;
+
+ if (p->osd && p->opts.blend_subs == BLEND_SUBS_VIDEO &&
+ (flags & RENDER_FRAME_SUBS))
+ {
+ double scale[2];
+ get_scale_factors(p, false, scale);
+ struct mp_osd_res rect = {
+ .w = p->texture_w, .h = p->texture_h,
+ .display_par = scale[1] / scale[0], // counter compensate scaling
+ };
+ finish_pass_tex(p, &p->blend_subs_tex, rect.w, rect.h);
+ struct ra_fbo fbo = { p->blend_subs_tex };
+ pass_draw_osd(p, OSD_DRAW_SUB_ONLY, flags, vpts, rect, fbo, false);
+ pass_read_tex(p, p->blend_subs_tex);
+ pass_describe(p, "blend subs video");
+ }
+ pass_opt_hook_point(p, "MAIN", &p->texture_offset);
+
+ pass_scale_main(p);
+
+ int vp_w = p->dst_rect.x1 - p->dst_rect.x0,
+ vp_h = p->dst_rect.y1 - p->dst_rect.y0;
+ if (p->osd && p->opts.blend_subs == BLEND_SUBS_YES &&
+ (flags & RENDER_FRAME_SUBS))
+ {
+ // Recreate the real video size from the src/dst rects
+ struct mp_osd_res rect = {
+ .w = vp_w, .h = vp_h,
+ .ml = -p->src_rect.x0, .mr = p->src_rect.x1 - p->image_params.w,
+ .mt = -p->src_rect.y0, .mb = p->src_rect.y1 - p->image_params.h,
+ .display_par = 1.0,
+ };
+ // Adjust margins for scale
+ double scale[2];
+ get_scale_factors(p, true, scale);
+ rect.ml *= scale[0]; rect.mr *= scale[0];
+ rect.mt *= scale[1]; rect.mb *= scale[1];
+ // We should always blend subtitles in non-linear light
+ if (p->use_linear) {
+ pass_delinearize(p->sc, p->image_params.color.gamma);
+ p->use_linear = false;
+ }
+ finish_pass_tex(p, &p->blend_subs_tex, p->texture_w, p->texture_h);
+ struct ra_fbo fbo = { p->blend_subs_tex };
+ pass_draw_osd(p, OSD_DRAW_SUB_ONLY, flags, vpts, rect, fbo, false);
+ pass_read_tex(p, p->blend_subs_tex);
+ pass_describe(p, "blend subs");
+ }
+
+ pass_opt_hook_point(p, "SCALED", NULL);
+
+ return true;
+}
+
+static void pass_draw_to_screen(struct gl_video *p, struct ra_fbo fbo, int flags)
+{
+ if (p->dumb_mode)
+ pass_render_frame_dumb(p);
+
+ // Adjust the overall gamma before drawing to screen
+ if (p->user_gamma != 1) {
+ gl_sc_uniform_f(p->sc, "user_gamma", p->user_gamma);
+ GLSL(color.rgb = clamp(color.rgb, 0.0, 1.0);)
+ GLSL(color.rgb = pow(color.rgb, vec3(user_gamma));)
+ }
+
+ pass_colormanage(p, p->image_params.color, fbo.color_space, flags, false);
+
+ // Since finish_pass_fbo doesn't work with compute shaders, and neither
+ // does the checkerboard/dither code, we may need an indirection via
+ // p->screen_tex here.
+ if (p->pass_compute.active) {
+ int o_w = p->dst_rect.x1 - p->dst_rect.x0,
+ o_h = p->dst_rect.y1 - p->dst_rect.y0;
+ finish_pass_tex(p, &p->screen_tex, o_w, o_h);
+ struct image tmp = image_wrap(p->screen_tex, PLANE_RGB, p->components);
+ copy_image(p, &(int){0}, tmp);
+ }
+
+ if (p->has_alpha){
+ if (p->opts.alpha_mode == ALPHA_BLEND_TILES) {
+ // Draw checkerboard pattern to indicate transparency
+ GLSLF("// transparency checkerboard\n");
+ GLSL(bvec2 tile = lessThan(fract(gl_FragCoord.xy * 1.0/32.0), vec2(0.5));)
+ GLSL(vec3 background = vec3(tile.x == tile.y ? 0.93 : 0.87);)
+ GLSL(color.rgb += background.rgb * (1.0 - color.a);)
+ GLSL(color.a = 1.0;)
+ } else if (p->opts.alpha_mode == ALPHA_BLEND) {
+ // Blend into background color (usually black)
+ struct m_color c = p->opts.background;
+ GLSLF("vec4 background = vec4(%f, %f, %f, %f);\n",
+ c.r / 255.0, c.g / 255.0, c.b / 255.0, c.a / 255.0);
+ GLSL(color.rgb += background.rgb * (1.0 - color.a);)
+ GLSL(color.a = background.a;)
+ }
+ }
+
+ pass_opt_hook_point(p, "OUTPUT", NULL);
+
+ if (flags & RENDER_SCREEN_COLOR)
+ pass_dither(p);
+ pass_describe(p, "output to screen");
+ finish_pass_fbo(p, fbo, false, &p->dst_rect);
+}
+
+// flags: bit set of RENDER_FRAME_* flags
+static bool update_surface(struct gl_video *p, struct mp_image *mpi,
+ uint64_t id, struct surface *surf, int flags)
+{
+ int vp_w = p->dst_rect.x1 - p->dst_rect.x0,
+ vp_h = p->dst_rect.y1 - p->dst_rect.y0;
+
+ pass_info_reset(p, false);
+ if (!pass_render_frame(p, mpi, id, flags))
+ return false;
+
+ // Frame blending should always be done in linear light to preserve the
+ // overall brightness, otherwise this will result in flashing dark frames
+ // because mixing in compressed light artificially darkens the results
+ if (!p->use_linear) {
+ p->use_linear = true;
+ pass_linearize(p->sc, p->image_params.color.gamma);
+ }
+
+ finish_pass_tex(p, &surf->tex, vp_w, vp_h);
+ surf->id = id;
+ surf->pts = mpi->pts;
+ return true;
+}
+
+// Draws an interpolate frame to fbo, based on the frame timing in t
+// flags: bit set of RENDER_FRAME_* flags
+static void gl_video_interpolate_frame(struct gl_video *p, struct vo_frame *t,
+ struct ra_fbo fbo, int flags)
+{
+ bool is_new = false;
+
+ // Reset the queue completely if this is a still image, to avoid any
+ // interpolation artifacts from surrounding frames when unpausing or
+ // framestepping
+ if (t->still)
+ gl_video_reset_surfaces(p);
+
+ // First of all, figure out if we have a frame available at all, and draw
+ // it manually + reset the queue if not
+ if (p->surfaces[p->surface_now].id == 0) {
+ struct surface *now = &p->surfaces[p->surface_now];
+ if (!update_surface(p, t->current, t->frame_id, now, flags))
+ return;
+ p->surface_idx = p->surface_now;
+ is_new = true;
+ }
+
+ // Find the right frame for this instant
+ if (t->current) {
+ int next = surface_wrap(p->surface_now + 1);
+ while (p->surfaces[next].id &&
+ p->surfaces[next].id > p->surfaces[p->surface_now].id &&
+ p->surfaces[p->surface_now].id < t->frame_id)
+ {
+ p->surface_now = next;
+ next = surface_wrap(next + 1);
+ }
+ }
+
+ // Figure out the queue size. For illustration, a filter radius of 2 would
+ // look like this: _ A [B] C D _
+ // A is surface_bse, B is surface_now, C is surface_now+1 and D is
+ // surface_end.
+ struct scaler *tscale = &p->scaler[SCALER_TSCALE];
+ reinit_scaler(p, tscale, &p->opts.scaler[SCALER_TSCALE], 1, tscale_sizes);
+ bool oversample = strcmp(tscale->conf.kernel.name, "oversample") == 0;
+ bool linear = strcmp(tscale->conf.kernel.name, "linear") == 0;
+ int size;
+
+ if (oversample || linear) {
+ size = 2;
+ } else {
+ assert(tscale->kernel && !tscale->kernel->polar);
+ size = ceil(tscale->kernel->size);
+ }
+
+ int radius = size/2;
+ int surface_now = p->surface_now;
+ int surface_bse = surface_wrap(surface_now - (radius-1));
+ int surface_end = surface_wrap(surface_now + radius);
+ assert(surface_wrap(surface_bse + size-1) == surface_end);
+
+ // Render new frames while there's room in the queue. Note that technically,
+ // this should be done before the step where we find the right frame, but
+ // it only barely matters at the very beginning of playback, and this way
+ // makes the code much more linear.
+ int surface_dst = surface_wrap(p->surface_idx + 1);
+ for (int i = 0; i < t->num_frames; i++) {
+ // Avoid overwriting data we might still need
+ if (surface_dst == surface_bse - 1)
+ break;
+
+ struct mp_image *f = t->frames[i];
+ uint64_t f_id = t->frame_id + i;
+ if (!mp_image_params_equal(&f->params, &p->real_image_params))
+ continue;
+
+ if (f_id > p->surfaces[p->surface_idx].id) {
+ struct surface *dst = &p->surfaces[surface_dst];
+ if (!update_surface(p, f, f_id, dst, flags))
+ return;
+ p->surface_idx = surface_dst;
+ surface_dst = surface_wrap(surface_dst + 1);
+ is_new = true;
+ }
+ }
+
+ // Figure out whether the queue is "valid". A queue is invalid if the
+ // frames' PTS is not monotonically increasing. Anything else is invalid,
+ // so avoid blending incorrect data and just draw the latest frame as-is.
+ // Possible causes for failure of this condition include seeks, pausing,
+ // end of playback or start of playback.
+ bool valid = true;
+ for (int i = surface_bse, ii; valid && i != surface_end; i = ii) {
+ ii = surface_wrap(i + 1);
+ if (p->surfaces[i].id == 0 || p->surfaces[ii].id == 0) {
+ valid = false;
+ } else if (p->surfaces[ii].id < p->surfaces[i].id) {
+ valid = false;
+ MP_DBG(p, "interpolation queue underrun\n");
+ }
+ }
+
+ // Update OSD PTS to synchronize subtitles with the displayed frame
+ p->osd_pts = p->surfaces[surface_now].pts;
+
+ // Finally, draw the right mix of frames to the screen.
+ if (!is_new)
+ pass_info_reset(p, true);
+ pass_describe(p, "interpolation");
+ if (!valid || t->still) {
+ // surface_now is guaranteed to be valid, so we can safely use it.
+ pass_read_tex(p, p->surfaces[surface_now].tex);
+ p->is_interpolated = false;
+ } else {
+ double mix = t->vsync_offset / t->ideal_frame_duration;
+ // The scaler code always wants the fcoord to be between 0 and 1,
+ // so we try to adjust by using the previous set of N frames instead
+ // (which requires some extra checking to make sure it's valid)
+ if (mix < 0.0) {
+ int prev = surface_wrap(surface_bse - 1);
+ if (p->surfaces[prev].id != 0 &&
+ p->surfaces[prev].id < p->surfaces[surface_bse].id)
+ {
+ mix += 1.0;
+ surface_bse = prev;
+ } else {
+ mix = 0.0; // at least don't blow up, this should only
+ // ever happen at the start of playback
+ }
+ }
+
+ if (oversample) {
+ // Oversample uses the frame area as mix ratio, not the vsync
+ // position itself
+ double vsync_dist = t->vsync_interval / t->ideal_frame_duration,
+ threshold = tscale->conf.kernel.params[0];
+ threshold = isnan(threshold) ? 0.0 : threshold;
+ mix = (1 - mix) / vsync_dist;
+ mix = mix <= 0 + threshold ? 0 : mix;
+ mix = mix >= 1 - threshold ? 1 : mix;
+ mix = 1 - mix;
+ }
+
+ // Blend the frames together
+ if (oversample || linear) {
+ gl_sc_uniform_dynamic(p->sc);
+ gl_sc_uniform_f(p->sc, "inter_coeff", mix);
+ GLSL(color = mix(texture(texture0, texcoord0),
+ texture(texture1, texcoord1),
+ inter_coeff);)
+ } else {
+ gl_sc_uniform_dynamic(p->sc);
+ gl_sc_uniform_f(p->sc, "fcoord", mix);
+ pass_sample_separated_gen(p->sc, tscale, 0, 0);
+ }
+
+ // Load all the required frames
+ for (int i = 0; i < size; i++) {
+ struct image img =
+ image_wrap(p->surfaces[surface_wrap(surface_bse+i)].tex,
+ PLANE_RGB, p->components);
+ // Since the code in pass_sample_separated currently assumes
+ // the textures are bound in-order and starting at 0, we just
+ // assert to make sure this is the case (which it should always be)
+ int id = pass_bind(p, img);
+ assert(id == i);
+ }
+
+ MP_TRACE(p, "inter frame dur: %f vsync: %f, mix: %f\n",
+ t->ideal_frame_duration, t->vsync_interval, mix);
+ p->is_interpolated = true;
+ }
+ pass_draw_to_screen(p, fbo, flags);
+
+ p->frames_drawn += 1;
+}
+
+void gl_video_render_frame(struct gl_video *p, struct vo_frame *frame,
+ struct ra_fbo fbo, int flags)
+{
+ gl_video_update_options(p);
+
+ struct mp_rect target_rc = {0, 0, fbo.tex->params.w, fbo.tex->params.h};
+
+ p->broken_frame = false;
+
+ bool has_frame = !!frame->current;
+
+ struct m_color c = p->clear_color;
+ float clear_color[4] = {c.r / 255.0, c.g / 255.0, c.b / 255.0, c.a / 255.0};
+ p->ra->fns->clear(p->ra, fbo.tex, clear_color, &target_rc);
+
+ if (p->hwdec_overlay) {
+ if (has_frame) {
+ float *color = p->hwdec_overlay->overlay_colorkey;
+ p->ra->fns->clear(p->ra, fbo.tex, color, &p->dst_rect);
+ }
+
+ p->hwdec_overlay->driver->overlay_frame(p->hwdec_overlay, frame->current,
+ &p->src_rect, &p->dst_rect,
+ frame->frame_id != p->image.id);
+
+ if (frame->current)
+ p->osd_pts = frame->current->pts;
+
+ // Disable GL rendering
+ has_frame = false;
+ }
+
+ if (has_frame) {
+ bool interpolate = p->opts.interpolation && frame->display_synced &&
+ (p->frames_drawn || !frame->still);
+ if (interpolate) {
+ double ratio = frame->ideal_frame_duration / frame->vsync_interval;
+ if (fabs(ratio - 1.0) < p->opts.interpolation_threshold)
+ interpolate = false;
+ }
+
+ if (interpolate) {
+ gl_video_interpolate_frame(p, frame, fbo, flags);
+ } else {
+ bool is_new = frame->frame_id != p->image.id;
+
+ // Redrawing a frame might update subtitles.
+ if (frame->still && p->opts.blend_subs)
+ is_new = true;
+
+ if (is_new || !p->output_tex_valid) {
+ p->output_tex_valid = false;
+
+ pass_info_reset(p, !is_new);
+ if (!pass_render_frame(p, frame->current, frame->frame_id, flags))
+ goto done;
+
+ // For the non-interpolation case, we draw to a single "cache"
+ // texture to speed up subsequent re-draws (if any exist)
+ struct ra_fbo dest_fbo = fbo;
+ bool repeats = frame->num_vsyncs > 1 && frame->display_synced;
+ if ((repeats || frame->still) && !p->dumb_mode &&
+ (p->ra->caps & RA_CAP_BLIT) && fbo.tex->params.blit_dst)
+ {
+ // Attempt to use the same format as the destination FBO
+ // if possible. Some RAs use a wrapped dummy format here,
+ // so fall back to the fbo_format in that case.
+ const struct ra_format *fmt = fbo.tex->params.format;
+ if (fmt->dummy_format)
+ fmt = p->fbo_format;
+
+ bool r = ra_tex_resize(p->ra, p->log, &p->output_tex,
+ fbo.tex->params.w, fbo.tex->params.h,
+ fmt);
+ if (r) {
+ dest_fbo = (struct ra_fbo) { p->output_tex };
+ p->output_tex_valid = true;
+ }
+ }
+ pass_draw_to_screen(p, dest_fbo, flags);
+ }
+
+ // "output tex valid" and "output tex needed" are equivalent
+ if (p->output_tex_valid && fbo.tex->params.blit_dst) {
+ pass_info_reset(p, true);
+ pass_describe(p, "redraw cached frame");
+ struct mp_rect src = p->dst_rect;
+ struct mp_rect dst = src;
+ if (fbo.flip) {
+ dst.y0 = fbo.tex->params.h - src.y0;
+ dst.y1 = fbo.tex->params.h - src.y1;
+ }
+ timer_pool_start(p->blit_timer);
+ p->ra->fns->blit(p->ra, fbo.tex, p->output_tex, &dst, &src);
+ timer_pool_stop(p->blit_timer);
+ pass_record(p, timer_pool_measure(p->blit_timer));
+ }
+ }
+ }
+
+done:
+
+ debug_check_gl(p, "after video rendering");
+
+ if (p->osd && (flags & (RENDER_FRAME_SUBS | RENDER_FRAME_OSD))) {
+ // If we haven't actually drawn anything so far, then we technically
+ // need to consider this the start of a new pass. Let's call it a
+ // redraw just because, since it's basically a blank frame anyway
+ if (!has_frame)
+ pass_info_reset(p, true);
+
+ int osd_flags = p->opts.blend_subs ? OSD_DRAW_OSD_ONLY : 0;
+ if (!(flags & RENDER_FRAME_SUBS))
+ osd_flags |= OSD_DRAW_OSD_ONLY;
+ if (!(flags & RENDER_FRAME_OSD))
+ osd_flags |= OSD_DRAW_SUB_ONLY;
+
+ pass_draw_osd(p, osd_flags, flags, p->osd_pts, p->osd_rect, fbo, true);
+ debug_check_gl(p, "after OSD rendering");
+ }
+
+ p->broken_frame |= gl_sc_error_state(p->sc);
+ if (p->broken_frame) {
+ // Make the screen solid blue to make it visually clear that an
+ // error has occurred
+ float color[4] = {0.0, 0.05, 0.5, 1.0};
+ p->ra->fns->clear(p->ra, fbo.tex, color, &target_rc);
+ }
+
+ p->frames_rendered++;
+ pass_report_performance(p);
+}
+
+void gl_video_screenshot(struct gl_video *p, struct vo_frame *frame,
+ struct voctrl_screenshot *args)
+{
+ if (!p->ra->fns->tex_download)
+ return;
+
+ bool ok = false;
+ struct mp_image *res = NULL;
+ struct ra_tex *target = NULL;
+ struct mp_rect old_src = p->src_rect;
+ struct mp_rect old_dst = p->dst_rect;
+ struct mp_osd_res old_osd = p->osd_rect;
+ struct vo_frame *nframe = vo_frame_ref(frame);
+
+ // Disable interpolation and such.
+ nframe->redraw = true;
+ nframe->repeat = false;
+ nframe->still = true;
+ nframe->pts = 0;
+ nframe->duration = -1;
+
+ if (!args->scaled) {
+ int w, h;
+ mp_image_params_get_dsize(&p->image_params, &w, &h);
+ if (w < 1 || h < 1)
+ return;
+
+ int src_w = p->image_params.w;
+ int src_h = p->image_params.h;
+ struct mp_rect src = {0, 0, src_w, src_h};
+ struct mp_rect dst = {0, 0, w, h};
+
+ if (mp_image_crop_valid(&p->image_params))
+ src = p->image_params.crop;
+
+ if (p->image_params.rotate % 180 == 90) {
+ MPSWAP(int, w, h);
+ MPSWAP(int, src_w, src_h);
+ }
+ mp_rect_rotate(&src, src_w, src_h, p->image_params.rotate);
+ mp_rect_rotate(&dst, w, h, p->image_params.rotate);
+
+ struct mp_osd_res osd = {
+ .display_par = 1.0,
+ .w = mp_rect_w(dst),
+ .h = mp_rect_h(dst),
+ };
+ gl_video_resize(p, &src, &dst, &osd);
+ }
+
+ gl_video_reset_surfaces(p);
+
+ struct ra_tex_params params = {
+ .dimensions = 2,
+ .downloadable = true,
+ .w = p->osd_rect.w,
+ .h = p->osd_rect.h,
+ .d = 1,
+ .render_dst = true,
+ };
+
+ params.format = ra_find_unorm_format(p->ra, 1, 4);
+ int mpfmt = IMGFMT_RGB0;
+ if (args->high_bit_depth && p->ra_format.component_bits > 8) {
+ const struct ra_format *fmt = ra_find_unorm_format(p->ra, 2, 4);
+ if (fmt && fmt->renderable) {
+ params.format = fmt;
+ mpfmt = IMGFMT_RGBA64;
+ }
+ }
+
+ if (!params.format || !params.format->renderable)
+ goto done;
+ target = ra_tex_create(p->ra, &params);
+ if (!target)
+ goto done;
+
+ int flags = 0;
+ if (args->subs)
+ flags |= RENDER_FRAME_SUBS;
+ if (args->osd)
+ flags |= RENDER_FRAME_OSD;
+ if (args->scaled)
+ flags |= RENDER_SCREEN_COLOR;
+ gl_video_render_frame(p, nframe, (struct ra_fbo){target}, flags);
+
+ res = mp_image_alloc(mpfmt, params.w, params.h);
+ if (!res)
+ goto done;
+
+ struct ra_tex_download_params download_params = {
+ .tex = target,
+ .dst = res->planes[0],
+ .stride = res->stride[0],
+ };
+ if (!p->ra->fns->tex_download(p->ra, &download_params))
+ goto done;
+
+ if (p->broken_frame)
+ goto done;
+
+ ok = true;
+done:
+ talloc_free(nframe);
+ ra_tex_free(p->ra, &target);
+ gl_video_resize(p, &old_src, &old_dst, &old_osd);
+ gl_video_reset_surfaces(p);
+ if (!ok)
+ TA_FREEP(&res);
+ args->res = res;
+}
+
+// Use this color instead of the global option.
+void gl_video_set_clear_color(struct gl_video *p, struct m_color c)
+{
+ p->force_clear_color = true;
+ p->clear_color = c;
+}
+
+void gl_video_set_osd_pts(struct gl_video *p, double pts)
+{
+ p->osd_pts = pts;
+}
+
+bool gl_video_check_osd_change(struct gl_video *p, struct mp_osd_res *res,
+ double pts)
+{
+ return p->osd ? mpgl_osd_check_change(p->osd, res, pts) : false;
+}
+
+void gl_video_resize(struct gl_video *p,
+ struct mp_rect *src, struct mp_rect *dst,
+ struct mp_osd_res *osd)
+{
+ if (mp_rect_equals(&p->src_rect, src) &&
+ mp_rect_equals(&p->dst_rect, dst) &&
+ osd_res_equals(p->osd_rect, *osd))
+ return;
+
+ p->src_rect = *src;
+ p->dst_rect = *dst;
+ p->osd_rect = *osd;
+
+ gl_video_reset_surfaces(p);
+
+ if (p->osd)
+ mpgl_osd_resize(p->osd, p->osd_rect, p->image_params.stereo3d);
+}
+
+static void frame_perf_data(struct pass_info pass[], struct mp_frame_perf *out)
+{
+ for (int i = 0; i < VO_PASS_PERF_MAX; i++) {
+ if (!pass[i].desc.len)
+ break;
+ out->perf[out->count] = pass[i].perf;
+ strncpy(out->desc[out->count], pass[i].desc.start,
+ sizeof(out->desc[out->count]) - 1);
+ out->desc[out->count][sizeof(out->desc[out->count]) - 1] = '\0';
+ out->count++;
+ }
+}
+
+void gl_video_perfdata(struct gl_video *p, struct voctrl_performance_data *out)
+{
+ *out = (struct voctrl_performance_data){0};
+ frame_perf_data(p->pass_fresh, &out->fresh);
+ frame_perf_data(p->pass_redraw, &out->redraw);
+}
+
+// Returns false on failure.
+static bool pass_upload_image(struct gl_video *p, struct mp_image *mpi, uint64_t id)
+{
+ struct video_image *vimg = &p->image;
+
+ if (vimg->id == id)
+ return true;
+
+ unref_current_image(p);
+
+ mpi = mp_image_new_ref(mpi);
+ if (!mpi)
+ goto error;
+
+ vimg->mpi = mpi;
+ vimg->id = id;
+ p->osd_pts = mpi->pts;
+ p->frames_uploaded++;
+
+ if (p->hwdec_active) {
+ // Hardware decoding
+
+ if (!p->hwdec_mapper)
+ goto error;
+
+ pass_describe(p, "map frame (hwdec)");
+ timer_pool_start(p->upload_timer);
+ bool ok = ra_hwdec_mapper_map(p->hwdec_mapper, vimg->mpi) >= 0;
+ timer_pool_stop(p->upload_timer);
+ pass_record(p, timer_pool_measure(p->upload_timer));
+
+ vimg->hwdec_mapped = true;
+ if (ok) {
+ struct mp_image layout = {0};
+ mp_image_set_params(&layout, &p->image_params);
+ struct ra_tex **tex = p->hwdec_mapper->tex;
+ for (int n = 0; n < p->plane_count; n++) {
+ vimg->planes[n] = (struct texplane){
+ .w = mp_image_plane_w(&layout, n),
+ .h = mp_image_plane_h(&layout, n),
+ .tex = tex[n],
+ };
+ }
+ } else {
+ MP_FATAL(p, "Mapping hardware decoded surface failed.\n");
+ goto error;
+ }
+ return true;
+ }
+
+ // Software decoding
+ assert(mpi->num_planes == p->plane_count);
+
+ timer_pool_start(p->upload_timer);
+ for (int n = 0; n < p->plane_count; n++) {
+ struct texplane *plane = &vimg->planes[n];
+ if (!plane->tex) {
+ timer_pool_stop(p->upload_timer);
+ goto error;
+ }
+
+ struct ra_tex_upload_params params = {
+ .tex = plane->tex,
+ .src = mpi->planes[n],
+ .invalidate = true,
+ .stride = mpi->stride[n],
+ };
+
+ plane->flipped = params.stride < 0;
+ if (plane->flipped) {
+ int h = mp_image_plane_h(mpi, n);
+ params.src = (char *)params.src + (h - 1) * params.stride;
+ params.stride = -params.stride;
+ }
+
+ struct dr_buffer *mapped = gl_find_dr_buffer(p, mpi->planes[n]);
+ if (mapped) {
+ params.buf = mapped->buf;
+ params.buf_offset = (uintptr_t)params.src -
+ (uintptr_t)mapped->buf->data;
+ params.src = NULL;
+ }
+
+ if (p->using_dr_path != !!mapped) {
+ p->using_dr_path = !!mapped;
+ MP_VERBOSE(p, "DR enabled: %s\n", p->using_dr_path ? "yes" : "no");
+ }
+
+ if (!p->ra->fns->tex_upload(p->ra, &params)) {
+ timer_pool_stop(p->upload_timer);
+ goto error;
+ }
+
+ if (mapped && !mapped->mpi)
+ mapped->mpi = mp_image_new_ref(mpi);
+ }
+ timer_pool_stop(p->upload_timer);
+
+ bool using_pbo = p->ra->use_pbo || !(p->ra->caps & RA_CAP_DIRECT_UPLOAD);
+ const char *mode = p->using_dr_path ? "DR" : using_pbo ? "PBO" : "naive";
+ pass_describe(p, "upload frame (%s)", mode);
+ pass_record(p, timer_pool_measure(p->upload_timer));
+
+ return true;
+
+error:
+ unref_current_image(p);
+ p->broken_frame = true;
+ return false;
+}
+
+static bool test_fbo(struct gl_video *p, const struct ra_format *fmt)
+{
+ MP_VERBOSE(p, "Testing FBO format %s\n", fmt->name);
+ struct ra_tex *tex = NULL;
+ bool success = ra_tex_resize(p->ra, p->log, &tex, 16, 16, fmt);
+ ra_tex_free(p->ra, &tex);
+ return success;
+}
+
+// Return whether dumb-mode can be used without disabling any features.
+// Essentially, vo_gpu with mostly default settings will return true.
+static bool check_dumb_mode(struct gl_video *p)
+{
+ struct gl_video_opts *o = &p->opts;
+ if (p->use_integer_conversion)
+ return false;
+ if (o->dumb_mode > 0) // requested by user
+ return true;
+ if (o->dumb_mode < 0) // disabled by user
+ return false;
+
+ // otherwise, use auto-detection
+ if (o->correct_downscaling || o->linear_downscaling ||
+ o->linear_upscaling || o->sigmoid_upscaling || o->interpolation ||
+ o->blend_subs || o->deband || o->unsharp)
+ return false;
+ // check remaining scalers (tscale is already implicitly excluded above)
+ for (int i = 0; i < SCALER_COUNT; i++) {
+ if (i != SCALER_TSCALE) {
+ const char *name = o->scaler[i].kernel.name;
+ if (name && strcmp(name, "bilinear") != 0)
+ return false;
+ }
+ }
+ if (o->user_shaders && o->user_shaders[0])
+ return false;
+ return true;
+}
+
+// Disable features that are not supported with the current OpenGL version.
+static void check_gl_features(struct gl_video *p)
+{
+ struct ra *ra = p->ra;
+ bool have_float_tex = !!ra_find_float16_format(ra, 1);
+ bool have_mglsl = ra->glsl_version >= 130; // modern GLSL
+ const struct ra_format *rg_tex = ra_find_unorm_format(p->ra, 1, 2);
+ bool have_texrg = rg_tex && !rg_tex->luminance_alpha;
+ bool have_compute = ra->caps & RA_CAP_COMPUTE;
+ bool have_ssbo = ra->caps & RA_CAP_BUF_RW;
+ bool have_fragcoord = ra->caps & RA_CAP_FRAGCOORD;
+
+ const char *auto_fbo_fmts[] = {"rgba16f", "rgba16hf", "rgba16",
+ "rgb10_a2", "rgba8", 0};
+ const char *user_fbo_fmts[] = {p->opts.fbo_format, 0};
+ const char **fbo_fmts = user_fbo_fmts[0] && strcmp(user_fbo_fmts[0], "auto")
+ ? user_fbo_fmts : auto_fbo_fmts;
+ bool user_specified_fbo_fmt = fbo_fmts == user_fbo_fmts;
+ bool fbo_test_result = false;
+ bool have_fbo = false;
+ p->fbo_format = NULL;
+ for (int n = 0; fbo_fmts[n]; n++) {
+ const char *fmt = fbo_fmts[n];
+ const struct ra_format *f = ra_find_named_format(p->ra, fmt);
+ if (!f && user_specified_fbo_fmt)
+ MP_WARN(p, "FBO format '%s' not found!\n", fmt);
+ if (f && f->renderable && f->linear_filter &&
+ (fbo_test_result = test_fbo(p, f))) {
+ MP_VERBOSE(p, "Using FBO format %s.\n", f->name);
+ have_fbo = true;
+ p->fbo_format = f;
+ break;
+ }
+
+ if (user_specified_fbo_fmt) {
+ MP_WARN(p, "User-specified FBO format '%s' failed to initialize! "
+ "(exists=%d, renderable=%d, linear_filter=%d, "
+ "fbo_test_result=%d)\n",
+ fmt, !!f, f ? f->renderable : 0, f ? f->linear_filter : 0,
+ fbo_test_result);
+ }
+ }
+
+ if (!have_fragcoord && p->opts.dither_depth >= 0 &&
+ p->opts.dither_algo != DITHER_NONE)
+ {
+ p->opts.dither_algo = DITHER_NONE;
+ MP_WARN(p, "Disabling dithering (no gl_FragCoord).\n");
+ }
+ if (!have_fragcoord && p->opts.alpha_mode == ALPHA_BLEND_TILES) {
+ p->opts.alpha_mode = ALPHA_BLEND;
+ // Verbose, since this is the default setting
+ MP_VERBOSE(p, "Disabling alpha checkerboard (no gl_FragCoord).\n");
+ }
+ if (!have_fbo && have_compute) {
+ have_compute = false;
+ MP_WARN(p, "Force-disabling compute shaders as an FBO format was not "
+ "available! See your FBO format configuration!\n");
+ }
+
+ if (have_compute && have_fbo && !p->fbo_format->storable) {
+ have_compute = false;
+ MP_WARN(p, "Force-disabling compute shaders as the chosen FBO format "
+ "is not storable! See your FBO format configuration!\n");
+ }
+
+ if (!have_compute && p->opts.dither_algo == DITHER_ERROR_DIFFUSION) {
+ MP_WARN(p, "Disabling error diffusion dithering because compute shader "
+ "was not supported. Fallback to dither=fruit instead.\n");
+ p->opts.dither_algo = DITHER_FRUIT;
+ }
+
+ bool have_compute_peak = have_compute && have_ssbo;
+ if (!have_compute_peak && p->opts.tone_map.compute_peak >= 0) {
+ int msgl = p->opts.tone_map.compute_peak == 1 ? MSGL_WARN : MSGL_V;
+ MP_MSG(p, msgl, "Disabling HDR peak computation (one or more of the "
+ "following is not supported: compute shaders=%d, "
+ "SSBO=%d).\n", have_compute, have_ssbo);
+ p->opts.tone_map.compute_peak = -1;
+ }
+
+ p->forced_dumb_mode = p->opts.dumb_mode > 0 || !have_fbo || !have_texrg;
+ bool voluntarily_dumb = check_dumb_mode(p);
+ if (p->forced_dumb_mode || voluntarily_dumb) {
+ if (voluntarily_dumb) {
+ MP_VERBOSE(p, "No advanced processing required. Enabling dumb mode.\n");
+ } else if (p->opts.dumb_mode <= 0) {
+ MP_WARN(p, "High bit depth FBOs unsupported. Enabling dumb mode.\n"
+ "Most extended features will be disabled.\n");
+ }
+ p->dumb_mode = true;
+ static const struct scaler_config dumb_scaler_config = {
+ {"bilinear", .params = {NAN, NAN}},
+ {.params = {NAN, NAN}},
+ };
+ // Most things don't work, so whitelist all options that still work.
+ p->opts = (struct gl_video_opts){
+ .scaler = {
+ [SCALER_SCALE] = dumb_scaler_config,
+ [SCALER_DSCALE] = dumb_scaler_config,
+ [SCALER_CSCALE] = dumb_scaler_config,
+ [SCALER_TSCALE] = dumb_scaler_config,
+ },
+ .gamma = p->opts.gamma,
+ .gamma_auto = p->opts.gamma_auto,
+ .pbo = p->opts.pbo,
+ .fbo_format = p->opts.fbo_format,
+ .alpha_mode = p->opts.alpha_mode,
+ .use_rectangle = p->opts.use_rectangle,
+ .background = p->opts.background,
+ .dither_algo = p->opts.dither_algo,
+ .dither_depth = p->opts.dither_depth,
+ .dither_size = p->opts.dither_size,
+ .error_diffusion = p->opts.error_diffusion,
+ .temporal_dither = p->opts.temporal_dither,
+ .temporal_dither_period = p->opts.temporal_dither_period,
+ .tex_pad_x = p->opts.tex_pad_x,
+ .tex_pad_y = p->opts.tex_pad_y,
+ .tone_map = p->opts.tone_map,
+ .early_flush = p->opts.early_flush,
+ .icc_opts = p->opts.icc_opts,
+ .hwdec_interop = p->opts.hwdec_interop,
+ .target_trc = p->opts.target_trc,
+ .target_prim = p->opts.target_prim,
+ .target_peak = p->opts.target_peak,
+ };
+ if (!have_fbo)
+ p->use_lut_3d = false;
+ return;
+ }
+ p->dumb_mode = false;
+
+ // Normally, we want to disable them by default if FBOs are unavailable,
+ // because they will be slow (not critically slow, but still slower).
+ // Without FP textures, we must always disable them.
+ // I don't know if luminance alpha float textures exist, so disregard them.
+ for (int n = 0; n < SCALER_COUNT; n++) {
+ const struct filter_kernel *kernel =
+ mp_find_filter_kernel(p->opts.scaler[n].kernel.name);
+ if (kernel) {
+ char *reason = NULL;
+ if (!have_float_tex)
+ reason = "(float tex. missing)";
+ if (!have_mglsl)
+ reason = "(GLSL version too old)";
+ if (reason) {
+ MP_WARN(p, "Disabling scaler #%d %s %s.\n", n,
+ p->opts.scaler[n].kernel.name, reason);
+ // p->opts is a copy => we can just mess with it.
+ p->opts.scaler[n].kernel.name = "bilinear";
+ if (n == SCALER_TSCALE)
+ p->opts.interpolation = false;
+ }
+ }
+ }
+
+ int use_cms = p->opts.target_prim != MP_CSP_PRIM_AUTO ||
+ p->opts.target_trc != MP_CSP_TRC_AUTO || p->use_lut_3d;
+
+ // mix() is needed for some gamma functions
+ if (!have_mglsl && (p->opts.linear_downscaling ||
+ p->opts.linear_upscaling || p->opts.sigmoid_upscaling))
+ {
+ p->opts.linear_downscaling = false;
+ p->opts.linear_upscaling = false;
+ p->opts.sigmoid_upscaling = false;
+ MP_WARN(p, "Disabling linear/sigmoid scaling (GLSL version too old).\n");
+ }
+ if (!have_mglsl && use_cms) {
+ p->opts.target_prim = MP_CSP_PRIM_AUTO;
+ p->opts.target_trc = MP_CSP_TRC_AUTO;
+ p->use_lut_3d = false;
+ MP_WARN(p, "Disabling color management (GLSL version too old).\n");
+ }
+ if (!have_mglsl && p->opts.deband) {
+ p->opts.deband = false;
+ MP_WARN(p, "Disabling debanding (GLSL version too old).\n");
+ }
+}
+
+static void init_gl(struct gl_video *p)
+{
+ debug_check_gl(p, "before init_gl");
+
+ p->upload_timer = timer_pool_create(p->ra);
+ p->blit_timer = timer_pool_create(p->ra);
+ p->osd_timer = timer_pool_create(p->ra);
+
+ debug_check_gl(p, "after init_gl");
+
+ ra_dump_tex_formats(p->ra, MSGL_DEBUG);
+ ra_dump_img_formats(p->ra, MSGL_DEBUG);
+}
+
+void gl_video_uninit(struct gl_video *p)
+{
+ if (!p)
+ return;
+
+ uninit_video(p);
+ ra_hwdec_ctx_uninit(&p->hwdec_ctx);
+ gl_sc_destroy(p->sc);
+
+ ra_tex_free(p->ra, &p->lut_3d_texture);
+ ra_buf_free(p->ra, &p->hdr_peak_ssbo);
+
+ timer_pool_destroy(p->upload_timer);
+ timer_pool_destroy(p->blit_timer);
+ timer_pool_destroy(p->osd_timer);
+
+ for (int i = 0; i < VO_PASS_PERF_MAX; i++) {
+ talloc_free(p->pass_fresh[i].desc.start);
+ talloc_free(p->pass_redraw[i].desc.start);
+ }
+
+ mpgl_osd_destroy(p->osd);
+
+ // Forcibly destroy possibly remaining image references. This should also
+ // cause gl_video_dr_free_buffer() to be called for the remaining buffers.
+ gc_pending_dr_fences(p, true);
+
+ // Should all have been unreffed already.
+ assert(!p->num_dr_buffers);
+
+ talloc_free(p);
+}
+
+void gl_video_reset(struct gl_video *p)
+{
+ gl_video_reset_surfaces(p);
+}
+
+bool gl_video_showing_interpolated_frame(struct gl_video *p)
+{
+ return p->is_interpolated;
+}
+
+static bool is_imgfmt_desc_supported(struct gl_video *p,
+ const struct ra_imgfmt_desc *desc)
+{
+ if (!desc->num_planes)
+ return false;
+
+ if (desc->planes[0]->ctype == RA_CTYPE_UINT && p->forced_dumb_mode)
+ return false;
+
+ return true;
+}
+
+bool gl_video_check_format(struct gl_video *p, int mp_format)
+{
+ struct ra_imgfmt_desc desc;
+ if (ra_get_imgfmt_desc(p->ra, mp_format, &desc) &&
+ is_imgfmt_desc_supported(p, &desc))
+ return true;
+ if (ra_hwdec_get(&p->hwdec_ctx, mp_format))
+ return true;
+ return false;
+}
+
+void gl_video_config(struct gl_video *p, struct mp_image_params *params)
+{
+ unmap_overlay(p);
+ unref_current_image(p);
+
+ if (!mp_image_params_equal(&p->real_image_params, params)) {
+ uninit_video(p);
+ p->real_image_params = *params;
+ p->image_params = *params;
+ if (params->imgfmt)
+ init_video(p);
+ }
+
+ gl_video_reset_surfaces(p);
+}
+
+void gl_video_set_osd_source(struct gl_video *p, struct osd_state *osd)
+{
+ mpgl_osd_destroy(p->osd);
+ p->osd = NULL;
+ p->osd_state = osd;
+ reinit_osd(p);
+}
+
+struct gl_video *gl_video_init(struct ra *ra, struct mp_log *log,
+ struct mpv_global *g)
+{
+ struct gl_video *p = talloc_ptrtype(NULL, p);
+ *p = (struct gl_video) {
+ .ra = ra,
+ .global = g,
+ .log = log,
+ .sc = gl_sc_create(ra, g, log),
+ .video_eq = mp_csp_equalizer_create(p, g),
+ .opts_cache = m_config_cache_alloc(p, g, &gl_video_conf),
+ };
+ // make sure this variable is initialized to *something*
+ p->pass = p->pass_fresh;
+ struct gl_video_opts *opts = p->opts_cache->opts;
+ p->cms = gl_lcms_init(p, log, g, opts->icc_opts),
+ p->opts = *opts;
+ for (int n = 0; n < SCALER_COUNT; n++)
+ p->scaler[n] = (struct scaler){.index = n};
+ // our VAO always has the vec2 position as the first element
+ MP_TARRAY_APPEND(p, p->vao, p->vao_len, (struct ra_renderpass_input) {
+ .name = "position",
+ .type = RA_VARTYPE_FLOAT,
+ .dim_v = 2,
+ .dim_m = 1,
+ .offset = 0,
+ });
+ init_gl(p);
+ reinit_from_options(p);
+ return p;
+}
+
+// Get static string for scaler shader. If "tscale" is set to true, the
+// scaler must be a separable convolution filter.
+static const char *handle_scaler_opt(const char *name, bool tscale)
+{
+ if (name && name[0]) {
+ const struct filter_kernel *kernel = mp_find_filter_kernel(name);
+ if (kernel && (!tscale || !kernel->polar))
+ return kernel->f.name;
+
+ const struct filter_window *window = mp_find_filter_window(name);
+ if (window)
+ return window->name;
+
+ for (const char *const *filter = tscale ? fixed_tscale_filters
+ : fixed_scale_filters;
+ *filter; filter++) {
+ if (strcmp(*filter, name) == 0)
+ return *filter;
+ }
+ }
+ return NULL;
+}
+
+static void gl_video_update_options(struct gl_video *p)
+{
+ if (m_config_cache_update(p->opts_cache)) {
+ gl_lcms_update_options(p->cms);
+ reinit_from_options(p);
+ }
+
+ if (mp_csp_equalizer_state_changed(p->video_eq))
+ p->output_tex_valid = false;
+}
+
+static void reinit_from_options(struct gl_video *p)
+{
+ p->use_lut_3d = gl_lcms_has_profile(p->cms);
+
+ // Copy the option fields, so that check_gl_features() can mutate them.
+ // This works only for the fields themselves of course, not for any memory
+ // referenced by them.
+ p->opts = *(struct gl_video_opts *)p->opts_cache->opts;
+
+ if (!p->force_clear_color)
+ p->clear_color = p->opts.background;
+
+ check_gl_features(p);
+ uninit_rendering(p);
+ if (p->opts.shader_cache)
+ gl_sc_set_cache_dir(p->sc, p->opts.shader_cache_dir);
+ p->ra->use_pbo = p->opts.pbo;
+ gl_video_setup_hooks(p);
+ reinit_osd(p);
+
+ struct mp_vo_opts *vo_opts = mp_get_config_group(p, p->global, &vo_sub_opts);
+ if (p->opts.interpolation && !vo_opts->video_sync && !p->dsi_warned) {
+ MP_WARN(p, "Interpolation now requires enabling display-sync mode.\n"
+ "E.g.: --video-sync=display-resample\n");
+ p->dsi_warned = true;
+ }
+ talloc_free(vo_opts);
+
+ if (p->opts.correct_downscaling && !p->correct_downscaling_warned) {
+ const char *name = p->opts.scaler[SCALER_DSCALE].kernel.name;
+ if (!name)
+ name = p->opts.scaler[SCALER_SCALE].kernel.name;
+ if (!name || !strcmp(name, "bilinear")) {
+ MP_WARN(p, "correct-downscaling requires non-bilinear scaler.\n");
+ p->correct_downscaling_warned = true;
+ }
+ }
+}
+
+void gl_video_configure_queue(struct gl_video *p, struct vo *vo)
+{
+ gl_video_update_options(p);
+
+ int queue_size = 1;
+
+ // Figure out an adequate size for the interpolation queue. The larger
+ // the radius, the earlier we need to queue frames.
+ if (p->opts.interpolation) {
+ const struct filter_kernel *kernel =
+ mp_find_filter_kernel(p->opts.scaler[SCALER_TSCALE].kernel.name);
+ if (kernel) {
+ // filter_scale wouldn't be correctly initialized were we to use it here.
+ // This is fine since we're always upsampling, but beware if downsampling
+ // is added!
+ double radius = kernel->f.radius;
+ radius = radius > 0 ? radius : p->opts.scaler[SCALER_TSCALE].radius;
+ queue_size += 1 + ceil(radius);
+ } else {
+ // Oversample/linear case
+ queue_size += 2;
+ }
+ }
+
+ vo_set_queue_params(vo, 0, queue_size);
+}
+
+static int validate_scaler_opt(struct mp_log *log, const m_option_t *opt,
+ struct bstr name, const char **value)
+{
+ struct bstr param = bstr0(*value);
+ char s[32] = {0};
+ int r = 1;
+ bool tscale = bstr_equals0(name, "tscale");
+ if (bstr_equals0(param, "help")) {
+ r = M_OPT_EXIT;
+ } else if (bstr_equals0(name, "dscale") && !param.len) {
+ return r; // empty dscale means "use same as upscaler"
+ } else if (bstr_equals0(name, "cscale") && !param.len) {
+ return r; // empty cscale means "use same as upscaler"
+ } else {
+ snprintf(s, sizeof(s), "%.*s", BSTR_P(param));
+ if (!handle_scaler_opt(s, tscale))
+ r = M_OPT_INVALID;
+ }
+ if (r < 1) {
+ mp_info(log, "Available scalers:\n");
+ for (const char *const *filter = tscale ? fixed_tscale_filters
+ : fixed_scale_filters;
+ *filter; filter++) {
+ mp_info(log, " %s\n", *filter);
+ }
+ for (int n = 0; mp_filter_kernels[n].f.name; n++) {
+ if (!tscale || !mp_filter_kernels[n].polar)
+ mp_info(log, " %s\n", mp_filter_kernels[n].f.name);
+ }
+ for (int n = 0; mp_filter_windows[n].name; n++) {
+ for (int m = 0; mp_filter_kernels[m].f.name; m++) {
+ if (!strcmp(mp_filter_windows[n].name, mp_filter_kernels[m].f.name))
+ goto next_window; // don't log duplicates
+ }
+ mp_info(log, " %s\n", mp_filter_windows[n].name);
+next_window: ;
+ }
+ if (s[0])
+ mp_fatal(log, "No scaler named '%s' found!\n", s);
+ }
+ return r;
+}
+
+static int validate_window_opt(struct mp_log *log, const m_option_t *opt,
+ struct bstr name, const char **value)
+{
+ struct bstr param = bstr0(*value);
+ char s[32] = {0};
+ int r = 1;
+ if (bstr_equals0(param, "help")) {
+ r = M_OPT_EXIT;
+ } else if (!param.len) {
+ return r; // empty string means "use preferred window"
+ } else {
+ snprintf(s, sizeof(s), "%.*s", BSTR_P(param));
+ const struct filter_window *window = mp_find_filter_window(s);
+ if (!window)
+ r = M_OPT_INVALID;
+ }
+ if (r < 1) {
+ mp_info(log, "Available windows:\n");
+ for (int n = 0; mp_filter_windows[n].name; n++)
+ mp_info(log, " %s\n", mp_filter_windows[n].name);
+ if (s[0])
+ mp_fatal(log, "No window named '%s' found!\n", s);
+ }
+ return r;
+}
+
+static int validate_error_diffusion_opt(struct mp_log *log, const m_option_t *opt,
+ struct bstr name, const char **value)
+{
+ struct bstr param = bstr0(*value);
+ char s[32] = {0};
+ int r = 1;
+ if (bstr_equals0(param, "help")) {
+ r = M_OPT_EXIT;
+ } else {
+ snprintf(s, sizeof(s), "%.*s", BSTR_P(param));
+ const struct error_diffusion_kernel *k = mp_find_error_diffusion_kernel(s);
+ if (!k)
+ r = M_OPT_INVALID;
+ }
+ if (r < 1) {
+ mp_info(log, "Available error diffusion kernels:\n");
+ for (int n = 0; mp_error_diffusion_kernels[n].name; n++)
+ mp_info(log, " %s\n", mp_error_diffusion_kernels[n].name);
+ if (s[0])
+ mp_fatal(log, "No error diffusion kernel named '%s' found!\n", s);
+ }
+ return r;
+}
+
+void gl_video_set_ambient_lux(struct gl_video *p, int lux)
+{
+ if (p->opts.gamma_auto) {
+ p->opts.gamma = gl_video_scale_ambient_lux(16.0, 256.0, 1.0, 1.2, lux);
+ MP_TRACE(p, "ambient light changed: %d lux (gamma: %f)\n", lux,
+ p->opts.gamma);
+ }
+}
+
+static void *gl_video_dr_alloc_buffer(struct gl_video *p, size_t size)
+{
+ struct ra_buf_params params = {
+ .type = RA_BUF_TYPE_TEX_UPLOAD,
+ .host_mapped = true,
+ .size = size,
+ };
+
+ struct ra_buf *buf = ra_buf_create(p->ra, &params);
+ if (!buf)
+ return NULL;
+
+ MP_TARRAY_GROW(p, p->dr_buffers, p->num_dr_buffers);
+ p->dr_buffers[p->num_dr_buffers++] = (struct dr_buffer){ .buf = buf };
+
+ return buf->data;
+}
+
+static void gl_video_dr_free_buffer(void *opaque, uint8_t *data)
+{
+ struct gl_video *p = opaque;
+
+ for (int n = 0; n < p->num_dr_buffers; n++) {
+ struct dr_buffer *buffer = &p->dr_buffers[n];
+ if (buffer->buf->data == data) {
+ assert(!buffer->mpi); // can't be freed while it has a ref
+ ra_buf_free(p->ra, &buffer->buf);
+ MP_TARRAY_REMOVE_AT(p->dr_buffers, p->num_dr_buffers, n);
+ return;
+ }
+ }
+ // not found - must not happen
+ MP_ASSERT_UNREACHABLE();
+}
+
+struct mp_image *gl_video_get_image(struct gl_video *p, int imgfmt, int w, int h,
+ int stride_align, int flags)
+{
+ if (flags & VO_DR_FLAG_HOST_CACHED) {
+ if (p->ra->caps & RA_CAP_SLOW_DR) {
+ MP_VERBOSE(p, "DR path suspected slow/uncached, disabling.\n");
+ return NULL;
+ }
+ }
+
+ if (!gl_video_check_format(p, imgfmt))
+ return NULL;
+
+ int size = mp_image_get_alloc_size(imgfmt, w, h, stride_align);
+ if (size < 0)
+ return NULL;
+
+ int alloc_size = size + stride_align;
+ void *ptr = gl_video_dr_alloc_buffer(p, alloc_size);
+ if (!ptr)
+ return NULL;
+
+ // (we expect vo.c to proxy the free callback, so it happens in the same
+ // thread it was allocated in, removing the need for synchronization)
+ struct mp_image *res = mp_image_from_buffer(imgfmt, w, h, stride_align,
+ ptr, alloc_size, p,
+ gl_video_dr_free_buffer);
+ if (!res)
+ gl_video_dr_free_buffer(p, ptr);
+ return res;
+}
+
+void gl_video_init_hwdecs(struct gl_video *p, struct ra_ctx *ra_ctx,
+ struct mp_hwdec_devices *devs,
+ bool load_all_by_default)
+{
+ assert(!p->hwdec_ctx.ra_ctx);
+ p->hwdec_ctx = (struct ra_hwdec_ctx) {
+ .log = p->log,
+ .global = p->global,
+ .ra_ctx = ra_ctx,
+ };
+
+ ra_hwdec_ctx_init(&p->hwdec_ctx, devs, p->opts.hwdec_interop, load_all_by_default);
+}
+
+void gl_video_load_hwdecs_for_img_fmt(struct gl_video *p, struct mp_hwdec_devices *devs,
+ struct hwdec_imgfmt_request *params)
+{
+ assert(p->hwdec_ctx.ra_ctx);
+ ra_hwdec_ctx_load_fmt(&p->hwdec_ctx, devs, params);
+}
diff --git a/video/out/gpu/video.h b/video/out/gpu/video.h
new file mode 100644
index 0000000..411d336
--- /dev/null
+++ b/video/out/gpu/video.h
@@ -0,0 +1,238 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MP_GL_VIDEO_H
+#define MP_GL_VIDEO_H
+
+#include <stdbool.h>
+
+#include "options/m_option.h"
+#include "sub/osd.h"
+#include "utils.h"
+#include "lcms.h"
+#include "shader_cache.h"
+#include "video/csputils.h"
+#include "video/out/filter_kernels.h"
+
+struct scaler_fun {
+ char *name;
+ float params[2];
+ float blur;
+ float taper;
+};
+
+struct scaler_config {
+ struct scaler_fun kernel;
+ struct scaler_fun window;
+ float radius;
+ float antiring;
+ float clamp;
+};
+
+struct scaler {
+ int index;
+ struct scaler_config conf;
+ double scale_factor;
+ bool initialized;
+ struct filter_kernel *kernel;
+ struct ra_tex *lut;
+ struct ra_tex *sep_fbo;
+ bool insufficient;
+
+ // kernel points here
+ struct filter_kernel kernel_storage;
+};
+
+enum scaler_unit {
+ SCALER_SCALE, // luma/video
+ SCALER_DSCALE, // luma-video downscaling
+ SCALER_CSCALE, // chroma upscaling
+ SCALER_TSCALE, // temporal scaling (interpolation)
+ SCALER_COUNT
+};
+
+enum dither_algo {
+ DITHER_NONE = 0,
+ DITHER_FRUIT,
+ DITHER_ORDERED,
+ DITHER_ERROR_DIFFUSION,
+};
+
+enum alpha_mode {
+ ALPHA_NO = 0,
+ ALPHA_YES,
+ ALPHA_BLEND,
+ ALPHA_BLEND_TILES,
+};
+
+enum blend_subs_mode {
+ BLEND_SUBS_NO = 0,
+ BLEND_SUBS_YES,
+ BLEND_SUBS_VIDEO,
+};
+
+enum tone_mapping {
+ TONE_MAPPING_AUTO,
+ TONE_MAPPING_CLIP,
+ TONE_MAPPING_MOBIUS,
+ TONE_MAPPING_REINHARD,
+ TONE_MAPPING_HABLE,
+ TONE_MAPPING_GAMMA,
+ TONE_MAPPING_LINEAR,
+ TONE_MAPPING_SPLINE,
+ TONE_MAPPING_BT_2390,
+ TONE_MAPPING_BT_2446A,
+ TONE_MAPPING_ST2094_40,
+ TONE_MAPPING_ST2094_10,
+};
+
+enum gamut_mode {
+ GAMUT_AUTO,
+ GAMUT_CLIP,
+ GAMUT_PERCEPTUAL,
+ GAMUT_RELATIVE,
+ GAMUT_SATURATION,
+ GAMUT_ABSOLUTE,
+ GAMUT_DESATURATE,
+ GAMUT_DARKEN,
+ GAMUT_WARN,
+ GAMUT_LINEAR,
+};
+
+struct gl_tone_map_opts {
+ int curve;
+ float curve_param;
+ float max_boost;
+ bool inverse;
+ int compute_peak;
+ float decay_rate;
+ float scene_threshold_low;
+ float scene_threshold_high;
+ float peak_percentile;
+ float contrast_recovery;
+ float contrast_smoothness;
+ int gamut_mode;
+ bool visualize;
+};
+
+struct gl_video_opts {
+ int dumb_mode;
+ struct scaler_config scaler[4];
+ float gamma;
+ bool gamma_auto;
+ int target_prim;
+ int target_trc;
+ int target_peak;
+ int target_contrast;
+ int target_gamut;
+ struct gl_tone_map_opts tone_map;
+ bool correct_downscaling;
+ bool linear_downscaling;
+ bool linear_upscaling;
+ bool sigmoid_upscaling;
+ float sigmoid_center;
+ float sigmoid_slope;
+ bool scaler_resizes_only;
+ bool pbo;
+ int dither_depth;
+ int dither_algo;
+ int dither_size;
+ bool temporal_dither;
+ int temporal_dither_period;
+ char *error_diffusion;
+ char *fbo_format;
+ int alpha_mode;
+ bool use_rectangle;
+ struct m_color background;
+ bool interpolation;
+ float interpolation_threshold;
+ int blend_subs;
+ char **user_shaders;
+ char **user_shader_opts;
+ bool deband;
+ struct deband_opts *deband_opts;
+ float unsharp;
+ int tex_pad_x, tex_pad_y;
+ struct mp_icc_opts *icc_opts;
+ bool shader_cache;
+ int early_flush;
+ char *shader_cache_dir;
+ char *hwdec_interop;
+};
+
+extern const struct m_sub_options gl_video_conf;
+
+struct gl_video;
+struct vo_frame;
+struct voctrl_screenshot;
+
+enum {
+ RENDER_FRAME_SUBS = 1 << 0,
+ RENDER_FRAME_OSD = 1 << 1,
+ RENDER_FRAME_VF_SUBS = 1 << 2,
+ RENDER_SCREEN_COLOR = 1 << 3, // 3D LUT and dithering
+ RENDER_FRAME_DEF = RENDER_FRAME_SUBS | RENDER_FRAME_OSD | RENDER_SCREEN_COLOR,
+};
+
+struct gl_video *gl_video_init(struct ra *ra, struct mp_log *log,
+ struct mpv_global *g);
+void gl_video_uninit(struct gl_video *p);
+void gl_video_set_osd_source(struct gl_video *p, struct osd_state *osd);
+bool gl_video_check_format(struct gl_video *p, int mp_format);
+void gl_video_config(struct gl_video *p, struct mp_image_params *params);
+void gl_video_render_frame(struct gl_video *p, struct vo_frame *frame,
+ struct ra_fbo fbo, int flags);
+void gl_video_resize(struct gl_video *p,
+ struct mp_rect *src, struct mp_rect *dst,
+ struct mp_osd_res *osd);
+void gl_video_set_fb_depth(struct gl_video *p, int fb_depth);
+void gl_video_perfdata(struct gl_video *p, struct voctrl_performance_data *out);
+void gl_video_set_clear_color(struct gl_video *p, struct m_color color);
+void gl_video_set_osd_pts(struct gl_video *p, double pts);
+bool gl_video_check_osd_change(struct gl_video *p, struct mp_osd_res *osd,
+ double pts);
+
+void gl_video_screenshot(struct gl_video *p, struct vo_frame *frame,
+ struct voctrl_screenshot *args);
+
+float gl_video_scale_ambient_lux(float lmin, float lmax,
+ float rmin, float rmax, float lux);
+void gl_video_set_ambient_lux(struct gl_video *p, int lux);
+void gl_video_set_icc_profile(struct gl_video *p, bstr icc_data);
+bool gl_video_icc_auto_enabled(struct gl_video *p);
+bool gl_video_gamma_auto_enabled(struct gl_video *p);
+struct mp_colorspace gl_video_get_output_colorspace(struct gl_video *p);
+
+void gl_video_reset(struct gl_video *p);
+bool gl_video_showing_interpolated_frame(struct gl_video *p);
+
+struct mp_hwdec_devices;
+void gl_video_init_hwdecs(struct gl_video *p, struct ra_ctx *ra_ctx,
+ struct mp_hwdec_devices *devs,
+ bool load_all_by_default);
+struct hwdec_imgfmt_request;
+void gl_video_load_hwdecs_for_img_fmt(struct gl_video *p, struct mp_hwdec_devices *devs,
+ struct hwdec_imgfmt_request *params);
+
+struct vo;
+void gl_video_configure_queue(struct gl_video *p, struct vo *vo);
+
+struct mp_image *gl_video_get_image(struct gl_video *p, int imgfmt, int w, int h,
+ int stride_align, int flags);
+
+
+#endif
diff --git a/video/out/gpu/video_shaders.c b/video/out/gpu/video_shaders.c
new file mode 100644
index 0000000..6c0e8a8
--- /dev/null
+++ b/video/out/gpu/video_shaders.c
@@ -0,0 +1,1033 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <math.h>
+
+#include "video_shaders.h"
+#include "video.h"
+
+#define GLSL(x) gl_sc_add(sc, #x "\n");
+#define GLSLF(...) gl_sc_addf(sc, __VA_ARGS__)
+#define GLSLH(x) gl_sc_hadd(sc, #x "\n");
+#define GLSLHF(...) gl_sc_haddf(sc, __VA_ARGS__)
+
+// Set up shared/commonly used variables and macros
+void sampler_prelude(struct gl_shader_cache *sc, int tex_num)
+{
+ GLSLF("#undef tex\n");
+ GLSLF("#undef texmap\n");
+ GLSLF("#define tex texture%d\n", tex_num);
+ GLSLF("#define texmap texmap%d\n", tex_num);
+ GLSLF("vec2 pos = texcoord%d;\n", tex_num);
+ GLSLF("vec2 size = texture_size%d;\n", tex_num);
+ GLSLF("vec2 pt = pixel_size%d;\n", tex_num);
+}
+
+static void pass_sample_separated_get_weights(struct gl_shader_cache *sc,
+ struct scaler *scaler)
+{
+ gl_sc_uniform_texture(sc, "lut", scaler->lut);
+ GLSLF("float ypos = LUT_POS(fcoord, %d.0);\n", scaler->lut->params.h);
+
+ int N = scaler->kernel->size;
+ int width = (N + 3) / 4; // round up
+
+ GLSLF("float weights[%d];\n", N);
+ for (int i = 0; i < N; i++) {
+ if (i % 4 == 0)
+ GLSLF("c = texture(lut, vec2(%f, ypos));\n", (i / 4 + 0.5) / width);
+ GLSLF("weights[%d] = c[%d];\n", i, i % 4);
+ }
+}
+
+// Handle a single pass (either vertical or horizontal). The direction is given
+// by the vector (d_x, d_y). If the vector is 0, then planar interpolation is
+// used instead (samples from texture0 through textureN)
+void pass_sample_separated_gen(struct gl_shader_cache *sc, struct scaler *scaler,
+ int d_x, int d_y)
+{
+ int N = scaler->kernel->size;
+ bool use_ar = scaler->conf.antiring > 0;
+ bool planar = d_x == 0 && d_y == 0;
+ GLSL(color = vec4(0.0);)
+ GLSLF("{\n");
+ if (!planar) {
+ GLSLF("vec2 dir = vec2(%d.0, %d.0);\n", d_x, d_y);
+ GLSL(pt *= dir;)
+ GLSL(float fcoord = dot(fract(pos * size - vec2(0.5)), dir);)
+ GLSLF("vec2 base = pos - fcoord * pt - pt * vec2(%d.0);\n", N / 2 - 1);
+ }
+ GLSL(vec4 c;)
+ if (use_ar) {
+ GLSL(vec4 hi = vec4(0.0);)
+ GLSL(vec4 lo = vec4(1.0);)
+ }
+ pass_sample_separated_get_weights(sc, scaler);
+ GLSLF("// scaler samples\n");
+ for (int n = 0; n < N; n++) {
+ if (planar) {
+ GLSLF("c = texture(texture%d, texcoord%d);\n", n, n);
+ } else {
+ GLSLF("c = texture(tex, base + pt * vec2(%d.0));\n", n);
+ }
+ GLSLF("color += vec4(weights[%d]) * c;\n", n);
+ if (use_ar && (n == N/2-1 || n == N/2)) {
+ GLSL(lo = min(lo, c);)
+ GLSL(hi = max(hi, c);)
+ }
+ }
+ if (use_ar)
+ GLSLF("color = mix(color, clamp(color, lo, hi), %f);\n",
+ scaler->conf.antiring);
+ GLSLF("}\n");
+}
+
+// Subroutine for computing and adding an individual texel contribution
+// If planar is false, samples directly
+// If planar is true, takes the pixel from inX[idx] where X is the component and
+// `idx` must be defined by the caller
+static void polar_sample(struct gl_shader_cache *sc, struct scaler *scaler,
+ int x, int y, int components, bool planar)
+{
+ double radius = scaler->kernel->radius * scaler->kernel->filter_scale;
+ double radius_cutoff = scaler->kernel->radius_cutoff;
+
+ // Since we can't know the subpixel position in advance, assume a
+ // worst case scenario
+ int yy = y > 0 ? y-1 : y;
+ int xx = x > 0 ? x-1 : x;
+ double dmax = sqrt(xx*xx + yy*yy);
+ // Skip samples definitely outside the radius
+ if (dmax >= radius_cutoff)
+ return;
+ GLSLF("d = length(vec2(%d.0, %d.0) - fcoord);\n", x, y);
+ // Check for samples that might be skippable
+ bool maybe_skippable = dmax >= radius_cutoff - M_SQRT2;
+ if (maybe_skippable)
+ GLSLF("if (d < %f) {\n", radius_cutoff);
+
+ // get the weight for this pixel
+ if (scaler->lut->params.dimensions == 1) {
+ GLSLF("w = tex1D(lut, LUT_POS(d * 1.0/%f, %d.0)).r;\n",
+ radius, scaler->lut->params.w);
+ } else {
+ GLSLF("w = texture(lut, vec2(0.5, LUT_POS(d * 1.0/%f, %d.0))).r;\n",
+ radius, scaler->lut->params.h);
+ }
+ GLSL(wsum += w;)
+
+ if (planar) {
+ for (int n = 0; n < components; n++)
+ GLSLF("color[%d] += w * in%d[idx];\n", n, n);
+ } else {
+ GLSLF("in0 = texture(tex, base + pt * vec2(%d.0, %d.0));\n", x, y);
+ GLSL(color += vec4(w) * in0;)
+ }
+
+ if (maybe_skippable)
+ GLSLF("}\n");
+}
+
+void pass_sample_polar(struct gl_shader_cache *sc, struct scaler *scaler,
+ int components, bool sup_gather)
+{
+ GLSL(color = vec4(0.0);)
+ GLSLF("{\n");
+ GLSL(vec2 fcoord = fract(pos * size - vec2(0.5));)
+ GLSL(vec2 base = pos - fcoord * pt;)
+ GLSLF("float w, d, wsum = 0.0;\n");
+ for (int n = 0; n < components; n++)
+ GLSLF("vec4 in%d;\n", n);
+ GLSL(int idx;)
+
+ gl_sc_uniform_texture(sc, "lut", scaler->lut);
+
+ GLSLF("// scaler samples\n");
+ int bound = ceil(scaler->kernel->radius_cutoff);
+ for (int y = 1-bound; y <= bound; y += 2) {
+ for (int x = 1-bound; x <= bound; x += 2) {
+ // First we figure out whether it's more efficient to use direct
+ // sampling or gathering. The problem is that gathering 4 texels
+ // only to discard some of them is very wasteful, so only do it if
+ // we suspect it will be a win rather than a loss. This is the case
+ // exactly when all four texels are within bounds
+ bool use_gather = sqrt(x*x + y*y) < scaler->kernel->radius_cutoff;
+
+ if (!sup_gather)
+ use_gather = false;
+
+ if (use_gather) {
+ // Gather the four surrounding texels simultaneously
+ for (int n = 0; n < components; n++) {
+ GLSLF("in%d = textureGatherOffset(tex, base, "
+ "ivec2(%d, %d), %d);\n", n, x, y, n);
+ }
+
+ // Mix in all of the points with their weights
+ for (int p = 0; p < 4; p++) {
+ // The four texels are gathered counterclockwise starting
+ // from the bottom left
+ static const int xo[4] = {0, 1, 1, 0};
+ static const int yo[4] = {1, 1, 0, 0};
+ if (x+xo[p] > bound || y+yo[p] > bound)
+ continue;
+ GLSLF("idx = %d;\n", p);
+ polar_sample(sc, scaler, x+xo[p], y+yo[p], components, true);
+ }
+ } else {
+ // switch to direct sampling instead, for efficiency/compatibility
+ for (int yy = y; yy <= bound && yy <= y+1; yy++) {
+ for (int xx = x; xx <= bound && xx <= x+1; xx++)
+ polar_sample(sc, scaler, xx, yy, components, false);
+ }
+ }
+ }
+ }
+
+ GLSL(color = color / vec4(wsum);)
+ GLSLF("}\n");
+}
+
+// bw/bh: block size
+// iw/ih: input size (pre-calculated to fit all required texels)
+void pass_compute_polar(struct gl_shader_cache *sc, struct scaler *scaler,
+ int components, int bw, int bh, int iw, int ih)
+{
+ int bound = ceil(scaler->kernel->radius_cutoff);
+ int offset = bound - 1; // padding top/left
+
+ GLSL(color = vec4(0.0);)
+ GLSLF("{\n");
+ GLSL(vec2 wpos = texmap(gl_WorkGroupID * gl_WorkGroupSize);)
+ GLSL(vec2 wbase = wpos - pt * fract(wpos * size - vec2(0.5));)
+ GLSL(vec2 fcoord = fract(pos * size - vec2(0.5));)
+ GLSL(vec2 base = pos - pt * fcoord;)
+ GLSL(ivec2 rel = ivec2(round((base - wbase) * size));)
+ GLSL(int idx;)
+ GLSLF("float w, d, wsum = 0.0;\n");
+ gl_sc_uniform_texture(sc, "lut", scaler->lut);
+
+ // Load all relevant texels into shmem
+ for (int c = 0; c < components; c++)
+ GLSLHF("shared float in%d[%d];\n", c, ih * iw);
+
+ GLSL(vec4 c;)
+ GLSLF("for (int y = int(gl_LocalInvocationID.y); y < %d; y += %d) {\n", ih, bh);
+ GLSLF("for (int x = int(gl_LocalInvocationID.x); x < %d; x += %d) {\n", iw, bw);
+ GLSLF("c = texture(tex, wbase + pt * vec2(x - %d, y - %d));\n", offset, offset);
+ for (int c = 0; c < components; c++)
+ GLSLF("in%d[%d * y + x] = c[%d];\n", c, iw, c);
+ GLSLF("}}\n");
+ GLSL(groupMemoryBarrier();)
+ GLSL(barrier();)
+
+ // Dispatch the actual samples
+ GLSLF("// scaler samples\n");
+ for (int y = 1-bound; y <= bound; y++) {
+ for (int x = 1-bound; x <= bound; x++) {
+ GLSLF("idx = %d * rel.y + rel.x + %d;\n", iw,
+ iw * (y + offset) + x + offset);
+ polar_sample(sc, scaler, x, y, components, true);
+ }
+ }
+
+ GLSL(color = color / vec4(wsum);)
+ GLSLF("}\n");
+}
+
+static void bicubic_calcweights(struct gl_shader_cache *sc, const char *t, const char *s)
+{
+ // Explanation of how bicubic scaling with only 4 texel fetches is done:
+ // http://www.mate.tue.nl/mate/pdfs/10318.pdf
+ // 'Efficient GPU-Based Texture Interpolation using Uniform B-Splines'
+ // Explanation why this algorithm normally always blurs, even with unit
+ // scaling:
+ // http://bigwww.epfl.ch/preprints/ruijters1001p.pdf
+ // 'GPU Prefilter for Accurate Cubic B-spline Interpolation'
+ GLSLF("vec4 %s = vec4(-0.5, 0.1666, 0.3333, -0.3333) * %s"
+ " + vec4(1, 0, -0.5, 0.5);\n", t, s);
+ GLSLF("%s = %s * %s + vec4(0, 0, -0.5, 0.5);\n", t, t, s);
+ GLSLF("%s = %s * %s + vec4(-0.6666, 0, 0.8333, 0.1666);\n", t, t, s);
+ GLSLF("%s.xy *= vec2(1, 1) / vec2(%s.z, %s.w);\n", t, t, t);
+ GLSLF("%s.xy += vec2(1.0 + %s, 1.0 - %s);\n", t, s, s);
+}
+
+void pass_sample_bicubic_fast(struct gl_shader_cache *sc)
+{
+ GLSLF("{\n");
+ GLSL(vec2 fcoord = fract(pos * size + vec2(0.5, 0.5));)
+ bicubic_calcweights(sc, "parmx", "fcoord.x");
+ bicubic_calcweights(sc, "parmy", "fcoord.y");
+ GLSL(vec4 cdelta;)
+ GLSL(cdelta.xz = parmx.rg * vec2(-pt.x, pt.x);)
+ GLSL(cdelta.yw = parmy.rg * vec2(-pt.y, pt.y);)
+ // first y-interpolation
+ GLSL(vec4 ar = texture(tex, pos + cdelta.xy);)
+ GLSL(vec4 ag = texture(tex, pos + cdelta.xw);)
+ GLSL(vec4 ab = mix(ag, ar, parmy.b);)
+ // second y-interpolation
+ GLSL(vec4 br = texture(tex, pos + cdelta.zy);)
+ GLSL(vec4 bg = texture(tex, pos + cdelta.zw);)
+ GLSL(vec4 aa = mix(bg, br, parmy.b);)
+ // x-interpolation
+ GLSL(color = mix(aa, ab, parmx.b);)
+ GLSLF("}\n");
+}
+
+void pass_sample_oversample(struct gl_shader_cache *sc, struct scaler *scaler,
+ int w, int h)
+{
+ GLSLF("{\n");
+ GLSL(vec2 pos = pos - vec2(0.5) * pt;) // round to nearest
+ GLSL(vec2 fcoord = fract(pos * size - vec2(0.5));)
+ // Determine the mixing coefficient vector
+ gl_sc_uniform_vec2(sc, "output_size", (float[2]){w, h});
+ GLSL(vec2 coeff = fcoord * output_size/size;)
+ float threshold = scaler->conf.kernel.params[0];
+ threshold = isnan(threshold) ? 0.0 : threshold;
+ GLSLF("coeff = (coeff - %f) * 1.0/%f;\n", threshold, 1.0 - 2 * threshold);
+ GLSL(coeff = clamp(coeff, 0.0, 1.0);)
+ // Compute the right blend of colors
+ GLSL(color = texture(tex, pos + pt * (coeff - fcoord));)
+ GLSLF("}\n");
+}
+
+// Common constants for SMPTE ST.2084 (HDR)
+static const float PQ_M1 = 2610./4096 * 1./4,
+ PQ_M2 = 2523./4096 * 128,
+ PQ_C1 = 3424./4096,
+ PQ_C2 = 2413./4096 * 32,
+ PQ_C3 = 2392./4096 * 32;
+
+// Common constants for ARIB STD-B67 (HLG)
+static const float HLG_A = 0.17883277,
+ HLG_B = 0.28466892,
+ HLG_C = 0.55991073;
+
+// Common constants for Panasonic V-Log
+static const float VLOG_B = 0.00873,
+ VLOG_C = 0.241514,
+ VLOG_D = 0.598206;
+
+// Common constants for Sony S-Log
+static const float SLOG_A = 0.432699,
+ SLOG_B = 0.037584,
+ SLOG_C = 0.616596 + 0.03,
+ SLOG_P = 3.538813,
+ SLOG_Q = 0.030001,
+ SLOG_K2 = 155.0 / 219.0;
+
+// Linearize (expand), given a TRC as input. In essence, this is the ITU-R
+// EOTF, calculated on an idealized (reference) monitor with a white point of
+// MP_REF_WHITE and infinite contrast.
+//
+// These functions always output to a normalized scale of [0,1], for
+// convenience of the video.c code that calls it. To get the values in an
+// absolute scale, multiply the result by `mp_trc_nom_peak(trc)`
+void pass_linearize(struct gl_shader_cache *sc, enum mp_csp_trc trc)
+{
+ if (trc == MP_CSP_TRC_LINEAR)
+ return;
+
+ GLSLF("// linearize\n");
+
+ // Note that this clamp may technically violate the definition of
+ // ITU-R BT.2100, which allows for sub-blacks and super-whites to be
+ // displayed on the display where such would be possible. That said, the
+ // problem is that not all gamma curves are well-defined on the values
+ // outside this range, so we ignore it and just clip anyway for sanity.
+ GLSL(color.rgb = clamp(color.rgb, 0.0, 1.0);)
+
+ switch (trc) {
+ case MP_CSP_TRC_SRGB:
+ GLSLF("color.rgb = mix(color.rgb * vec3(1.0/12.92), \n"
+ " pow((color.rgb + vec3(0.055))/vec3(1.055), vec3(2.4)), \n"
+ " %s(lessThan(vec3(0.04045), color.rgb))); \n",
+ gl_sc_bvec(sc, 3));
+ break;
+ case MP_CSP_TRC_BT_1886:
+ GLSL(color.rgb = pow(color.rgb, vec3(2.4));)
+ break;
+ case MP_CSP_TRC_GAMMA18:
+ GLSL(color.rgb = pow(color.rgb, vec3(1.8));)
+ break;
+ case MP_CSP_TRC_GAMMA20:
+ GLSL(color.rgb = pow(color.rgb, vec3(2.0));)
+ break;
+ case MP_CSP_TRC_GAMMA22:
+ GLSL(color.rgb = pow(color.rgb, vec3(2.2));)
+ break;
+ case MP_CSP_TRC_GAMMA24:
+ GLSL(color.rgb = pow(color.rgb, vec3(2.4));)
+ break;
+ case MP_CSP_TRC_GAMMA26:
+ GLSL(color.rgb = pow(color.rgb, vec3(2.6));)
+ break;
+ case MP_CSP_TRC_GAMMA28:
+ GLSL(color.rgb = pow(color.rgb, vec3(2.8));)
+ break;
+ case MP_CSP_TRC_PRO_PHOTO:
+ GLSLF("color.rgb = mix(color.rgb * vec3(1.0/16.0), \n"
+ " pow(color.rgb, vec3(1.8)), \n"
+ " %s(lessThan(vec3(0.03125), color.rgb))); \n",
+ gl_sc_bvec(sc, 3));
+ break;
+ case MP_CSP_TRC_PQ:
+ GLSLF("color.rgb = pow(color.rgb, vec3(1.0/%f));\n", PQ_M2);
+ GLSLF("color.rgb = max(color.rgb - vec3(%f), vec3(0.0)) \n"
+ " / (vec3(%f) - vec3(%f) * color.rgb);\n",
+ PQ_C1, PQ_C2, PQ_C3);
+ GLSLF("color.rgb = pow(color.rgb, vec3(%f));\n", 1.0 / PQ_M1);
+ // PQ's output range is 0-10000, but we need it to be relative to
+ // MP_REF_WHITE instead, so rescale
+ GLSLF("color.rgb *= vec3(%f);\n", 10000 / MP_REF_WHITE);
+ break;
+ case MP_CSP_TRC_HLG:
+ GLSLF("color.rgb = mix(vec3(4.0) * color.rgb * color.rgb,\n"
+ " exp((color.rgb - vec3(%f)) * vec3(1.0/%f)) + vec3(%f),\n"
+ " %s(lessThan(vec3(0.5), color.rgb)));\n",
+ HLG_C, HLG_A, HLG_B, gl_sc_bvec(sc, 3));
+ GLSLF("color.rgb *= vec3(1.0/%f);\n", MP_REF_WHITE_HLG);
+ break;
+ case MP_CSP_TRC_V_LOG:
+ GLSLF("color.rgb = mix((color.rgb - vec3(0.125)) * vec3(1.0/5.6), \n"
+ " pow(vec3(10.0), (color.rgb - vec3(%f)) * vec3(1.0/%f)) \n"
+ " - vec3(%f), \n"
+ " %s(lessThanEqual(vec3(0.181), color.rgb))); \n",
+ VLOG_D, VLOG_C, VLOG_B, gl_sc_bvec(sc, 3));
+ break;
+ case MP_CSP_TRC_S_LOG1:
+ GLSLF("color.rgb = pow(vec3(10.0), (color.rgb - vec3(%f)) * vec3(1.0/%f))\n"
+ " - vec3(%f);\n",
+ SLOG_C, SLOG_A, SLOG_B);
+ break;
+ case MP_CSP_TRC_S_LOG2:
+ GLSLF("color.rgb = mix((color.rgb - vec3(%f)) * vec3(1.0/%f), \n"
+ " (pow(vec3(10.0), (color.rgb - vec3(%f)) * vec3(1.0/%f)) \n"
+ " - vec3(%f)) * vec3(1.0/%f), \n"
+ " %s(lessThanEqual(vec3(%f), color.rgb))); \n",
+ SLOG_Q, SLOG_P, SLOG_C, SLOG_A, SLOG_B, SLOG_K2, gl_sc_bvec(sc, 3), SLOG_Q);
+ break;
+ case MP_CSP_TRC_ST428:
+ GLSL(color.rgb = vec3(52.37/48.0) * pow(color.rgb, vec3(2.6)););
+ break;
+ default:
+ abort();
+ }
+
+ // Rescale to prevent clipping on non-float textures
+ GLSLF("color.rgb *= vec3(1.0/%f);\n", mp_trc_nom_peak(trc));
+}
+
+// Delinearize (compress), given a TRC as output. This corresponds to the
+// inverse EOTF (not the OETF) in ITU-R terminology, again assuming a
+// reference monitor.
+//
+// Like pass_linearize, this functions ingests values on an normalized scale
+void pass_delinearize(struct gl_shader_cache *sc, enum mp_csp_trc trc)
+{
+ if (trc == MP_CSP_TRC_LINEAR)
+ return;
+
+ GLSLF("// delinearize\n");
+ GLSL(color.rgb = clamp(color.rgb, 0.0, 1.0);)
+ GLSLF("color.rgb *= vec3(%f);\n", mp_trc_nom_peak(trc));
+
+ switch (trc) {
+ case MP_CSP_TRC_SRGB:
+ GLSLF("color.rgb = mix(color.rgb * vec3(12.92), \n"
+ " vec3(1.055) * pow(color.rgb, vec3(1.0/2.4)) \n"
+ " - vec3(0.055), \n"
+ " %s(lessThanEqual(vec3(0.0031308), color.rgb))); \n",
+ gl_sc_bvec(sc, 3));
+ break;
+ case MP_CSP_TRC_BT_1886:
+ GLSL(color.rgb = pow(color.rgb, vec3(1.0/2.4));)
+ break;
+ case MP_CSP_TRC_GAMMA18:
+ GLSL(color.rgb = pow(color.rgb, vec3(1.0/1.8));)
+ break;
+ case MP_CSP_TRC_GAMMA20:
+ GLSL(color.rgb = pow(color.rgb, vec3(1.0/2.0));)
+ break;
+ case MP_CSP_TRC_GAMMA22:
+ GLSL(color.rgb = pow(color.rgb, vec3(1.0/2.2));)
+ break;
+ case MP_CSP_TRC_GAMMA24:
+ GLSL(color.rgb = pow(color.rgb, vec3(1.0/2.4));)
+ break;
+ case MP_CSP_TRC_GAMMA26:
+ GLSL(color.rgb = pow(color.rgb, vec3(1.0/2.6));)
+ break;
+ case MP_CSP_TRC_GAMMA28:
+ GLSL(color.rgb = pow(color.rgb, vec3(1.0/2.8));)
+ break;
+ case MP_CSP_TRC_PRO_PHOTO:
+ GLSLF("color.rgb = mix(color.rgb * vec3(16.0), \n"
+ " pow(color.rgb, vec3(1.0/1.8)), \n"
+ " %s(lessThanEqual(vec3(0.001953), color.rgb))); \n",
+ gl_sc_bvec(sc, 3));
+ break;
+ case MP_CSP_TRC_PQ:
+ GLSLF("color.rgb *= vec3(1.0/%f);\n", 10000 / MP_REF_WHITE);
+ GLSLF("color.rgb = pow(color.rgb, vec3(%f));\n", PQ_M1);
+ GLSLF("color.rgb = (vec3(%f) + vec3(%f) * color.rgb) \n"
+ " / (vec3(1.0) + vec3(%f) * color.rgb);\n",
+ PQ_C1, PQ_C2, PQ_C3);
+ GLSLF("color.rgb = pow(color.rgb, vec3(%f));\n", PQ_M2);
+ break;
+ case MP_CSP_TRC_HLG:
+ GLSLF("color.rgb *= vec3(%f);\n", MP_REF_WHITE_HLG);
+ GLSLF("color.rgb = mix(vec3(0.5) * sqrt(color.rgb),\n"
+ " vec3(%f) * log(color.rgb - vec3(%f)) + vec3(%f),\n"
+ " %s(lessThan(vec3(1.0), color.rgb)));\n",
+ HLG_A, HLG_B, HLG_C, gl_sc_bvec(sc, 3));
+ break;
+ case MP_CSP_TRC_V_LOG:
+ GLSLF("color.rgb = mix(vec3(5.6) * color.rgb + vec3(0.125), \n"
+ " vec3(%f) * log(color.rgb + vec3(%f)) \n"
+ " + vec3(%f), \n"
+ " %s(lessThanEqual(vec3(0.01), color.rgb))); \n",
+ VLOG_C / M_LN10, VLOG_B, VLOG_D, gl_sc_bvec(sc, 3));
+ break;
+ case MP_CSP_TRC_S_LOG1:
+ GLSLF("color.rgb = vec3(%f) * log(color.rgb + vec3(%f)) + vec3(%f);\n",
+ SLOG_A / M_LN10, SLOG_B, SLOG_C);
+ break;
+ case MP_CSP_TRC_S_LOG2:
+ GLSLF("color.rgb = mix(vec3(%f) * color.rgb + vec3(%f), \n"
+ " vec3(%f) * log(vec3(%f) * color.rgb + vec3(%f)) \n"
+ " + vec3(%f), \n"
+ " %s(lessThanEqual(vec3(0.0), color.rgb))); \n",
+ SLOG_P, SLOG_Q, SLOG_A / M_LN10, SLOG_K2, SLOG_B, SLOG_C, gl_sc_bvec(sc, 3));
+ break;
+ case MP_CSP_TRC_ST428:
+ GLSL(color.rgb = pow(color.rgb * vec3(48.0/52.37), vec3(1.0/2.6)););
+ break;
+ default:
+ abort();
+ }
+}
+
+// Apply the OOTF mapping from a given light type to display-referred light.
+// Assumes absolute scale values. `peak` is used to tune the OOTF where
+// applicable (currently only HLG).
+static void pass_ootf(struct gl_shader_cache *sc, enum mp_csp_light light,
+ float peak)
+{
+ if (light == MP_CSP_LIGHT_DISPLAY)
+ return;
+
+ GLSLF("// apply ootf\n");
+
+ switch (light)
+ {
+ case MP_CSP_LIGHT_SCENE_HLG: {
+ // HLG OOTF from BT.2100, scaled to the chosen display peak
+ float gamma = MPMAX(1.0, 1.2 + 0.42 * log10(peak * MP_REF_WHITE / 1000.0));
+ GLSLF("color.rgb *= vec3(%f * pow(dot(src_luma, color.rgb), %f));\n",
+ peak / pow(12.0 / MP_REF_WHITE_HLG, gamma), gamma - 1.0);
+ break;
+ }
+ case MP_CSP_LIGHT_SCENE_709_1886:
+ // This OOTF is defined by encoding the result as 709 and then decoding
+ // it as 1886; although this is called 709_1886 we actually use the
+ // more precise (by one decimal) values from BT.2020 instead
+ GLSLF("color.rgb = mix(color.rgb * vec3(4.5), \n"
+ " vec3(1.0993) * pow(color.rgb, vec3(0.45)) - vec3(0.0993), \n"
+ " %s(lessThan(vec3(0.0181), color.rgb))); \n",
+ gl_sc_bvec(sc, 3));
+ GLSL(color.rgb = pow(color.rgb, vec3(2.4));)
+ break;
+ case MP_CSP_LIGHT_SCENE_1_2:
+ GLSL(color.rgb = pow(color.rgb, vec3(1.2));)
+ break;
+ default:
+ abort();
+ }
+}
+
+// Inverse of the function pass_ootf, for completeness' sake.
+static void pass_inverse_ootf(struct gl_shader_cache *sc, enum mp_csp_light light,
+ float peak)
+{
+ if (light == MP_CSP_LIGHT_DISPLAY)
+ return;
+
+ GLSLF("// apply inverse ootf\n");
+
+ switch (light)
+ {
+ case MP_CSP_LIGHT_SCENE_HLG: {
+ float gamma = MPMAX(1.0, 1.2 + 0.42 * log10(peak * MP_REF_WHITE / 1000.0));
+ GLSLF("color.rgb *= vec3(1.0/%f);\n", peak / pow(12.0 / MP_REF_WHITE_HLG, gamma));
+ GLSLF("color.rgb /= vec3(max(1e-6, pow(dot(src_luma, color.rgb), %f)));\n",
+ (gamma - 1.0) / gamma);
+ break;
+ }
+ case MP_CSP_LIGHT_SCENE_709_1886:
+ GLSL(color.rgb = pow(color.rgb, vec3(1.0/2.4));)
+ GLSLF("color.rgb = mix(color.rgb * vec3(1.0/4.5), \n"
+ " pow((color.rgb + vec3(0.0993)) * vec3(1.0/1.0993), \n"
+ " vec3(1/0.45)), \n"
+ " %s(lessThan(vec3(0.08145), color.rgb))); \n",
+ gl_sc_bvec(sc, 3));
+ break;
+ case MP_CSP_LIGHT_SCENE_1_2:
+ GLSL(color.rgb = pow(color.rgb, vec3(1.0/1.2));)
+ break;
+ default:
+ abort();
+ }
+}
+
+// Average light level for SDR signals. This is equal to a signal level of 0.5
+// under a typical presentation gamma of about 2.0.
+static const float sdr_avg = 0.25;
+
+static void hdr_update_peak(struct gl_shader_cache *sc,
+ const struct gl_tone_map_opts *opts)
+{
+ // Update the sig_peak/sig_avg from the old SSBO state
+ GLSL(if (average.y > 0.0) {)
+ GLSL( sig_avg = max(1e-3, average.x);)
+ GLSL( sig_peak = max(1.00, average.y);)
+ GLSL(})
+
+ // Chosen to avoid overflowing on an 8K buffer
+ const float log_min = 1e-3, log_scale = 400.0, sig_scale = 10000.0;
+
+ // For performance, and to avoid overflows, we tally up the sub-results per
+ // pixel using shared memory first
+ GLSLH(shared int wg_sum;)
+ GLSLH(shared uint wg_max;)
+ GLSL(wg_sum = 0; wg_max = 0u;)
+ GLSL(barrier();)
+ GLSLF("float sig_log = log(max(sig_max, %f));\n", log_min);
+ GLSLF("atomicAdd(wg_sum, int(sig_log * %f));\n", log_scale);
+ GLSLF("atomicMax(wg_max, uint(sig_max * %f));\n", sig_scale);
+
+ // Have one thread per work group update the global atomics
+ GLSL(memoryBarrierShared();)
+ GLSL(barrier();)
+ GLSL(if (gl_LocalInvocationIndex == 0u) {)
+ GLSL( int wg_avg = wg_sum / int(gl_WorkGroupSize.x * gl_WorkGroupSize.y);)
+ GLSL( atomicAdd(frame_sum, wg_avg);)
+ GLSL( atomicMax(frame_max, wg_max);)
+ GLSL( memoryBarrierBuffer();)
+ GLSL(})
+ GLSL(barrier();)
+
+ // Finally, to update the global state, we increment a counter per dispatch
+ GLSL(uint num_wg = gl_NumWorkGroups.x * gl_NumWorkGroups.y;)
+ GLSL(if (gl_LocalInvocationIndex == 0u && atomicAdd(counter, 1u) == num_wg - 1u) {)
+ GLSL( counter = 0u;)
+ GLSL( vec2 cur = vec2(float(frame_sum) / float(num_wg), frame_max);)
+ GLSLF(" cur *= vec2(1.0/%f, 1.0/%f);\n", log_scale, sig_scale);
+ GLSL( cur.x = exp(cur.x);)
+ GLSL( if (average.y == 0.0))
+ GLSL( average = cur;)
+
+ // Use an IIR low-pass filter to smooth out the detected values, with a
+ // configurable decay rate based on the desired time constant (tau)
+ if (opts->decay_rate) {
+ float decay = 1.0f - expf(-1.0f / opts->decay_rate);
+ GLSLF(" average += %f * (cur - average);\n", decay);
+ } else {
+ GLSLF(" average = cur;\n");
+ }
+
+ // Scene change hysteresis
+ float log_db = 10.0 / log(10.0);
+ GLSLF(" float weight = smoothstep(%f, %f, abs(log(cur.x / average.x)));\n",
+ opts->scene_threshold_low / log_db,
+ opts->scene_threshold_high / log_db);
+ GLSL( average = mix(average, cur, weight);)
+
+ // Reset SSBO state for the next frame
+ GLSL( frame_sum = 0; frame_max = 0u;)
+ GLSL( memoryBarrierBuffer();)
+ GLSL(})
+}
+
+static inline float pq_delinearize(float x)
+{
+ x *= MP_REF_WHITE / 10000.0;
+ x = powf(x, PQ_M1);
+ x = (PQ_C1 + PQ_C2 * x) / (1.0 + PQ_C3 * x);
+ x = pow(x, PQ_M2);
+ return x;
+}
+
+// Tone map from a known peak brightness to the range [0,1]. If ref_peak
+// is 0, we will use peak detection instead
+static void pass_tone_map(struct gl_shader_cache *sc,
+ float src_peak, float dst_peak,
+ const struct gl_tone_map_opts *opts)
+{
+ GLSLF("// HDR tone mapping\n");
+
+ // To prevent discoloration due to out-of-bounds clipping, we need to make
+ // sure to reduce the value range as far as necessary to keep the entire
+ // signal in range, so tone map based on the brightest component.
+ GLSL(int sig_idx = 0;)
+ GLSL(if (color[1] > color[sig_idx]) sig_idx = 1;)
+ GLSL(if (color[2] > color[sig_idx]) sig_idx = 2;)
+ GLSL(float sig_max = color[sig_idx];)
+ GLSLF("float sig_peak = %f;\n", src_peak);
+ GLSLF("float sig_avg = %f;\n", sdr_avg);
+
+ if (opts->compute_peak >= 0)
+ hdr_update_peak(sc, opts);
+
+ // Always hard-clip the upper bound of the signal range to avoid functions
+ // exploding on inputs greater than 1.0
+ GLSLF("vec3 sig = min(color.rgb, sig_peak);\n");
+
+ // This function always operates on an absolute scale, so ignore the
+ // dst_peak normalization for it
+ float dst_scale = dst_peak;
+ enum tone_mapping curve = opts->curve ? opts->curve : TONE_MAPPING_BT_2390;
+ if (curve == TONE_MAPPING_BT_2390)
+ dst_scale = 1.0;
+
+ // Rescale the variables in order to bring it into a representation where
+ // 1.0 represents the dst_peak. This is because all of the tone mapping
+ // algorithms are defined in such a way that they map to the range [0.0, 1.0].
+ if (dst_scale > 1.0) {
+ GLSLF("sig *= 1.0/%f;\n", dst_scale);
+ GLSLF("sig_peak *= 1.0/%f;\n", dst_scale);
+ }
+
+ GLSL(float sig_orig = sig[sig_idx];)
+ GLSLF("float slope = min(%f, %f / sig_avg);\n", opts->max_boost, sdr_avg);
+ GLSL(sig *= slope;)
+ GLSL(sig_peak *= slope;)
+
+ float param = opts->curve_param;
+ switch (curve) {
+ case TONE_MAPPING_CLIP:
+ GLSLF("sig = min(%f * sig, 1.0);\n", isnan(param) ? 1.0 : param);
+ break;
+
+ case TONE_MAPPING_MOBIUS:
+ GLSLF("if (sig_peak > (1.0 + 1e-6)) {\n");
+ GLSLF("const float j = %f;\n", isnan(param) ? 0.3 : param);
+ // solve for M(j) = j; M(sig_peak) = 1.0; M'(j) = 1.0
+ // where M(x) = scale * (x+a)/(x+b)
+ GLSLF("float a = -j*j * (sig_peak - 1.0) / (j*j - 2.0*j + sig_peak);\n");
+ GLSLF("float b = (j*j - 2.0*j*sig_peak + sig_peak) / "
+ "max(1e-6, sig_peak - 1.0);\n");
+ GLSLF("float scale = (b*b + 2.0*b*j + j*j) / (b-a);\n");
+ GLSLF("sig = mix(sig, scale * (sig + vec3(a)) / (sig + vec3(b)),"
+ " %s(greaterThan(sig, vec3(j))));\n",
+ gl_sc_bvec(sc, 3));
+ GLSLF("}\n");
+ break;
+
+ case TONE_MAPPING_REINHARD: {
+ float contrast = isnan(param) ? 0.5 : param,
+ offset = (1.0 - contrast) / contrast;
+ GLSLF("sig = sig / (sig + vec3(%f));\n", offset);
+ GLSLF("float scale = (sig_peak + %f) / sig_peak;\n", offset);
+ GLSL(sig *= scale;)
+ break;
+ }
+
+ case TONE_MAPPING_HABLE: {
+ float A = 0.15, B = 0.50, C = 0.10, D = 0.20, E = 0.02, F = 0.30;
+ GLSLHF("vec3 hable(vec3 x) {\n");
+ GLSLHF("return (x * (%f*x + vec3(%f)) + vec3(%f)) / "
+ " (x * (%f*x + vec3(%f)) + vec3(%f)) "
+ " - vec3(%f);\n",
+ A, C*B, D*E,
+ A, B, D*F,
+ E/F);
+ GLSLHF("}\n");
+ GLSLF("sig = hable(max(vec3(0.0), sig)) / hable(vec3(sig_peak)).x;\n");
+ break;
+ }
+
+ case TONE_MAPPING_GAMMA: {
+ float gamma = isnan(param) ? 1.8 : param;
+ GLSLF("const float cutoff = 0.05, gamma = 1.0/%f;\n", gamma);
+ GLSL(float scale = pow(cutoff / sig_peak, gamma.x) / cutoff;)
+ GLSLF("sig = mix(scale * sig,"
+ " pow(sig / sig_peak, vec3(gamma)),"
+ " %s(greaterThan(sig, vec3(cutoff))));\n",
+ gl_sc_bvec(sc, 3));
+ break;
+ }
+
+ case TONE_MAPPING_LINEAR: {
+ float coeff = isnan(param) ? 1.0 : param;
+ GLSLF("sig = min(%f / sig_peak, 1.0) * sig;\n", coeff);
+ break;
+ }
+
+ case TONE_MAPPING_BT_2390:
+ // We first need to encode both sig and sig_peak into PQ space
+ GLSLF("vec4 sig_pq = vec4(sig.rgb, sig_peak); \n"
+ "sig_pq *= vec4(1.0/%f); \n"
+ "sig_pq = pow(sig_pq, vec4(%f)); \n"
+ "sig_pq = (vec4(%f) + vec4(%f) * sig_pq) \n"
+ " / (vec4(1.0) + vec4(%f) * sig_pq); \n"
+ "sig_pq = pow(sig_pq, vec4(%f)); \n",
+ 10000.0 / MP_REF_WHITE, PQ_M1, PQ_C1, PQ_C2, PQ_C3, PQ_M2);
+ // Encode both the signal and the target brightness to be relative to
+ // the source peak brightness, and figure out the target peak in this space
+ GLSLF("float scale = 1.0 / sig_pq.a; \n"
+ "sig_pq.rgb *= vec3(scale); \n"
+ "float maxLum = %f * scale; \n",
+ pq_delinearize(dst_peak));
+ // Apply piece-wise hermite spline
+ GLSLF("float ks = 1.5 * maxLum - 0.5; \n"
+ "vec3 tb = (sig_pq.rgb - vec3(ks)) / vec3(1.0 - ks); \n"
+ "vec3 tb2 = tb * tb; \n"
+ "vec3 tb3 = tb2 * tb; \n"
+ "vec3 pb = (2.0 * tb3 - 3.0 * tb2 + vec3(1.0)) * vec3(ks) + \n"
+ " (tb3 - 2.0 * tb2 + tb) * vec3(1.0 - ks) + \n"
+ " (-2.0 * tb3 + 3.0 * tb2) * vec3(maxLum); \n"
+ "sig = mix(pb, sig_pq.rgb, %s(lessThan(sig_pq.rgb, vec3(ks)))); \n",
+ gl_sc_bvec(sc, 3));
+ // Convert back from PQ space to linear light
+ GLSLF("sig *= vec3(sig_pq.a); \n"
+ "sig = pow(sig, vec3(1.0/%f)); \n"
+ "sig = max(sig - vec3(%f), 0.0) / \n"
+ " (vec3(%f) - vec3(%f) * sig); \n"
+ "sig = pow(sig, vec3(1.0/%f)); \n"
+ "sig *= vec3(%f); \n",
+ PQ_M2, PQ_C1, PQ_C2, PQ_C3, PQ_M1, 10000.0 / MP_REF_WHITE);
+ break;
+
+ default:
+ abort();
+ }
+
+ GLSLF("float coeff = max(sig[sig_idx] - %f, 1e-6) / \n"
+ " max(sig[sig_idx], 1.0); \n"
+ "coeff = %f * pow(coeff / %f, %f); \n"
+ "color.rgb *= sig[sig_idx] / sig_orig; \n"
+ "color.rgb = mix(color.rgb, %f * sig, coeff); \n",
+ 0.18 / dst_scale, 0.90, dst_scale, 0.20, dst_scale);
+}
+
+// Map colors from one source space to another. These source spaces must be
+// known (i.e. not MP_CSP_*_AUTO), as this function won't perform any
+// auto-guessing. If is_linear is true, we assume the input has already been
+// linearized (e.g. for linear-scaling). If `opts->compute_peak` is true, we
+// will detect the peak instead of relying on metadata. Note that this requires
+// the caller to have already bound the appropriate SSBO and set up the compute
+// shader metadata
+void pass_color_map(struct gl_shader_cache *sc, bool is_linear,
+ struct mp_colorspace src, struct mp_colorspace dst,
+ const struct gl_tone_map_opts *opts)
+{
+ GLSLF("// color mapping\n");
+
+ // Some operations need access to the video's luma coefficients, so make
+ // them available
+ float rgb2xyz[3][3];
+ mp_get_rgb2xyz_matrix(mp_get_csp_primaries(src.primaries), rgb2xyz);
+ gl_sc_uniform_vec3(sc, "src_luma", rgb2xyz[1]);
+ mp_get_rgb2xyz_matrix(mp_get_csp_primaries(dst.primaries), rgb2xyz);
+ gl_sc_uniform_vec3(sc, "dst_luma", rgb2xyz[1]);
+
+ bool need_ootf = src.light != dst.light;
+ if (src.light == MP_CSP_LIGHT_SCENE_HLG && src.hdr.max_luma != dst.hdr.max_luma)
+ need_ootf = true;
+
+ // All operations from here on require linear light as a starting point,
+ // so we linearize even if src.gamma == dst.gamma when one of the other
+ // operations needs it
+ bool need_linear = src.gamma != dst.gamma ||
+ src.primaries != dst.primaries ||
+ src.hdr.max_luma != dst.hdr.max_luma ||
+ need_ootf;
+
+ if (need_linear && !is_linear) {
+ // We also pull it up so that 1.0 is the reference white
+ pass_linearize(sc, src.gamma);
+ is_linear = true;
+ }
+
+ // Pre-scale the incoming values into an absolute scale
+ GLSLF("color.rgb *= vec3(%f);\n", mp_trc_nom_peak(src.gamma));
+
+ if (need_ootf)
+ pass_ootf(sc, src.light, src.hdr.max_luma / MP_REF_WHITE);
+
+ // Tone map to prevent clipping due to excessive brightness
+ if (src.hdr.max_luma > dst.hdr.max_luma) {
+ pass_tone_map(sc, src.hdr.max_luma / MP_REF_WHITE,
+ dst.hdr.max_luma / MP_REF_WHITE, opts);
+ }
+
+ // Adapt to the right colorspace if necessary
+ if (src.primaries != dst.primaries) {
+ struct mp_csp_primaries csp_src = mp_get_csp_primaries(src.primaries),
+ csp_dst = mp_get_csp_primaries(dst.primaries);
+ float m[3][3] = {{0}};
+ mp_get_cms_matrix(csp_src, csp_dst, MP_INTENT_RELATIVE_COLORIMETRIC, m);
+ gl_sc_uniform_mat3(sc, "cms_matrix", true, &m[0][0]);
+ GLSL(color.rgb = cms_matrix * color.rgb;)
+
+ if (!opts->gamut_mode || opts->gamut_mode == GAMUT_DESATURATE) {
+ GLSL(float cmin = min(min(color.r, color.g), color.b);)
+ GLSL(if (cmin < 0.0) {
+ float luma = dot(dst_luma, color.rgb);
+ float coeff = cmin / (cmin - luma);
+ color.rgb = mix(color.rgb, vec3(luma), coeff);
+ })
+ GLSLF("float cmax = 1.0/%f * max(max(color.r, color.g), color.b);\n",
+ dst.hdr.max_luma / MP_REF_WHITE);
+ GLSL(if (cmax > 1.0) color.rgb /= cmax;)
+ }
+ }
+
+ if (need_ootf)
+ pass_inverse_ootf(sc, dst.light, dst.hdr.max_luma / MP_REF_WHITE);
+
+ // Post-scale the outgoing values from absolute scale to normalized.
+ // For SDR, we normalize to the chosen signal peak. For HDR, we normalize
+ // to the encoding range of the transfer function.
+ float dst_range = dst.hdr.max_luma / MP_REF_WHITE;
+ if (mp_trc_is_hdr(dst.gamma))
+ dst_range = mp_trc_nom_peak(dst.gamma);
+
+ GLSLF("color.rgb *= vec3(%f);\n", 1.0 / dst_range);
+
+ // Warn for remaining out-of-gamut colors if enabled
+ if (opts->gamut_mode == GAMUT_WARN) {
+ GLSL(if (any(greaterThan(color.rgb, vec3(1.005))) ||
+ any(lessThan(color.rgb, vec3(-0.005)))))
+ GLSL(color.rgb = vec3(1.0) - color.rgb;) // invert
+ }
+
+ if (is_linear)
+ pass_delinearize(sc, dst.gamma);
+}
+
+// Wide usage friendly PRNG, shamelessly stolen from a GLSL tricks forum post.
+// Obtain random numbers by calling rand(h), followed by h = permute(h) to
+// update the state. Assumes the texture was hooked.
+// permute() was modified from the original to avoid "large" numbers in
+// calculations, since low-end mobile GPUs choke on them (overflow).
+static void prng_init(struct gl_shader_cache *sc, AVLFG *lfg)
+{
+ GLSLH(float mod289(float x) { return x - floor(x * 1.0/289.0) * 289.0; })
+ GLSLHF("float permute(float x) {\n");
+ GLSLH(return mod289( mod289(34.0*x + 1.0) * (fract(x) + 1.0) );)
+ GLSLHF("}\n");
+ GLSLH(float rand(float x) { return fract(x * 1.0/41.0); })
+
+ // Initialize the PRNG by hashing the position + a random uniform
+ GLSL(vec3 _m = vec3(HOOKED_pos, random) + vec3(1.0);)
+ GLSL(float h = permute(permute(permute(_m.x)+_m.y)+_m.z);)
+ gl_sc_uniform_dynamic(sc);
+ gl_sc_uniform_f(sc, "random", (double)av_lfg_get(lfg) / UINT32_MAX);
+}
+
+const struct deband_opts deband_opts_def = {
+ .iterations = 1,
+ .threshold = 48.0,
+ .range = 16.0,
+ .grain = 32.0,
+};
+
+#define OPT_BASE_STRUCT struct deband_opts
+const struct m_sub_options deband_conf = {
+ .opts = (const m_option_t[]) {
+ {"iterations", OPT_INT(iterations), M_RANGE(0, 16)},
+ {"threshold", OPT_FLOAT(threshold), M_RANGE(0.0, 4096.0)},
+ {"range", OPT_FLOAT(range), M_RANGE(1.0, 64.0)},
+ {"grain", OPT_FLOAT(grain), M_RANGE(0.0, 4096.0)},
+ {0}
+ },
+ .size = sizeof(struct deband_opts),
+ .defaults = &deband_opts_def,
+};
+
+// Stochastically sample a debanded result from a hooked texture.
+void pass_sample_deband(struct gl_shader_cache *sc, struct deband_opts *opts,
+ AVLFG *lfg, enum mp_csp_trc trc)
+{
+ // Initialize the PRNG
+ GLSLF("{\n");
+ prng_init(sc, lfg);
+
+ // Helper: Compute a stochastic approximation of the avg color around a
+ // pixel
+ GLSLHF("vec4 average(float range, inout float h) {\n");
+ // Compute a random rangle and distance
+ GLSLH(float dist = rand(h) * range; h = permute(h);)
+ GLSLH(float dir = rand(h) * 6.2831853; h = permute(h);)
+ GLSLH(vec2 o = dist * vec2(cos(dir), sin(dir));)
+
+ // Sample at quarter-turn intervals around the source pixel
+ GLSLH(vec4 ref[4];)
+ GLSLH(ref[0] = HOOKED_texOff(vec2( o.x, o.y));)
+ GLSLH(ref[1] = HOOKED_texOff(vec2(-o.y, o.x));)
+ GLSLH(ref[2] = HOOKED_texOff(vec2(-o.x, -o.y));)
+ GLSLH(ref[3] = HOOKED_texOff(vec2( o.y, -o.x));)
+
+ // Return the (normalized) average
+ GLSLH(return (ref[0] + ref[1] + ref[2] + ref[3])*0.25;)
+ GLSLHF("}\n");
+
+ // Sample the source pixel
+ GLSL(color = HOOKED_tex(HOOKED_pos);)
+ GLSLF("vec4 avg, diff;\n");
+ for (int i = 1; i <= opts->iterations; i++) {
+ // Sample the average pixel and use it instead of the original if
+ // the difference is below the given threshold
+ GLSLF("avg = average(%f, h);\n", i * opts->range);
+ GLSL(diff = abs(color - avg);)
+ GLSLF("color = mix(avg, color, %s(greaterThan(diff, vec4(%f))));\n",
+ gl_sc_bvec(sc, 4), opts->threshold / (i * 16384.0));
+ }
+
+ // Add some random noise to smooth out residual differences
+ GLSL(vec3 noise;)
+ GLSL(noise.x = rand(h); h = permute(h);)
+ GLSL(noise.y = rand(h); h = permute(h);)
+ GLSL(noise.z = rand(h); h = permute(h);)
+
+ // Noise is scaled to the signal level to prevent extreme noise for HDR
+ float gain = opts->grain/8192.0 / mp_trc_nom_peak(trc);
+ GLSLF("color.xyz += %f * (noise - vec3(0.5));\n", gain);
+ GLSLF("}\n");
+}
+
+// Assumes the texture was hooked
+void pass_sample_unsharp(struct gl_shader_cache *sc, float param) {
+ GLSLF("{\n");
+ GLSL(float st1 = 1.2;)
+ GLSL(vec4 p = HOOKED_tex(HOOKED_pos);)
+ GLSL(vec4 sum1 = HOOKED_texOff(st1 * vec2(+1, +1))
+ + HOOKED_texOff(st1 * vec2(+1, -1))
+ + HOOKED_texOff(st1 * vec2(-1, +1))
+ + HOOKED_texOff(st1 * vec2(-1, -1));)
+ GLSL(float st2 = 1.5;)
+ GLSL(vec4 sum2 = HOOKED_texOff(st2 * vec2(+1, 0))
+ + HOOKED_texOff(st2 * vec2( 0, +1))
+ + HOOKED_texOff(st2 * vec2(-1, 0))
+ + HOOKED_texOff(st2 * vec2( 0, -1));)
+ GLSL(vec4 t = p * 0.859375 + sum2 * -0.1171875 + sum1 * -0.09765625;)
+ GLSLF("color = p + t * %f;\n", param);
+ GLSLF("}\n");
+}
diff --git a/video/out/gpu/video_shaders.h b/video/out/gpu/video_shaders.h
new file mode 100644
index 0000000..27e7874
--- /dev/null
+++ b/video/out/gpu/video_shaders.h
@@ -0,0 +1,59 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MP_GL_VIDEO_SHADERS_H
+#define MP_GL_VIDEO_SHADERS_H
+
+#include <libavutil/lfg.h>
+
+#include "utils.h"
+#include "video.h"
+
+struct deband_opts {
+ int iterations;
+ float threshold;
+ float range;
+ float grain;
+};
+
+extern const struct deband_opts deband_opts_def;
+extern const struct m_sub_options deband_conf;
+
+void sampler_prelude(struct gl_shader_cache *sc, int tex_num);
+void pass_sample_separated_gen(struct gl_shader_cache *sc, struct scaler *scaler,
+ int d_x, int d_y);
+void pass_sample_polar(struct gl_shader_cache *sc, struct scaler *scaler,
+ int components, bool sup_gather);
+void pass_compute_polar(struct gl_shader_cache *sc, struct scaler *scaler,
+ int components, int bw, int bh, int iw, int ih);
+void pass_sample_bicubic_fast(struct gl_shader_cache *sc);
+void pass_sample_oversample(struct gl_shader_cache *sc, struct scaler *scaler,
+ int w, int h);
+
+void pass_linearize(struct gl_shader_cache *sc, enum mp_csp_trc trc);
+void pass_delinearize(struct gl_shader_cache *sc, enum mp_csp_trc trc);
+
+void pass_color_map(struct gl_shader_cache *sc, bool is_linear,
+ struct mp_colorspace src, struct mp_colorspace dst,
+ const struct gl_tone_map_opts *opts);
+
+void pass_sample_deband(struct gl_shader_cache *sc, struct deband_opts *opts,
+ AVLFG *lfg, enum mp_csp_trc trc);
+
+void pass_sample_unsharp(struct gl_shader_cache *sc, float param);
+
+#endif
diff --git a/video/out/gpu_next/context.c b/video/out/gpu_next/context.c
new file mode 100644
index 0000000..2887cff
--- /dev/null
+++ b/video/out/gpu_next/context.c
@@ -0,0 +1,240 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include <libplacebo/config.h>
+
+#ifdef PL_HAVE_D3D11
+#include <libplacebo/d3d11.h>
+#endif
+
+#ifdef PL_HAVE_OPENGL
+#include <libplacebo/opengl.h>
+#endif
+
+#include "context.h"
+#include "config.h"
+#include "common/common.h"
+#include "options/m_config.h"
+#include "video/out/placebo/utils.h"
+#include "video/out/gpu/video.h"
+
+#if HAVE_D3D11
+#include "osdep/windows_utils.h"
+#include "video/out/d3d11/ra_d3d11.h"
+#include "video/out/d3d11/context.h"
+#endif
+
+#if HAVE_GL
+#include "video/out/opengl/context.h"
+#include "video/out/opengl/ra_gl.h"
+# if HAVE_EGL
+#include <EGL/egl.h>
+# endif
+#endif
+
+#if HAVE_VULKAN
+#include "video/out/vulkan/context.h"
+#endif
+
+#if HAVE_D3D11
+static bool d3d11_pl_init(struct vo *vo, struct gpu_ctx *ctx,
+ struct ra_ctx_opts *ctx_opts)
+{
+#if !defined(PL_HAVE_D3D11)
+ MP_MSG(ctx, vo->probing ? MSGL_V : MSGL_ERR,
+ "libplacebo was built without D3D11 support.\n");
+ return false;
+#else // defined(PL_HAVE_D3D11)
+ bool success = false;
+
+ ID3D11Device *device = ra_d3d11_get_device(ctx->ra_ctx->ra);
+ IDXGISwapChain *swapchain = ra_d3d11_ctx_get_swapchain(ctx->ra_ctx);
+ if (!device || !swapchain) {
+ mp_err(ctx->log,
+ "Failed to receive required components from the mpv d3d11 "
+ "context! (device: %s, swap chain: %s)\n",
+ device ? "OK" : "failed",
+ swapchain ? "OK" : "failed");
+ goto err_out;
+ }
+
+ pl_d3d11 d3d11 = pl_d3d11_create(ctx->pllog,
+ pl_d3d11_params(
+ .device = device,
+ )
+ );
+ if (!d3d11) {
+ mp_err(ctx->log, "Failed to acquire a d3d11 libplacebo context!\n");
+ goto err_out;
+ }
+ ctx->gpu = d3d11->gpu;
+
+ mppl_log_set_probing(ctx->pllog, false);
+
+ ctx->swapchain = pl_d3d11_create_swapchain(d3d11,
+ pl_d3d11_swapchain_params(
+ .swapchain = swapchain,
+ )
+ );
+ if (!ctx->swapchain) {
+ mp_err(ctx->log, "Failed to acquire a d3d11 libplacebo swap chain!\n");
+ goto err_out;
+ }
+
+ success = true;
+
+err_out:
+ SAFE_RELEASE(swapchain);
+ SAFE_RELEASE(device);
+
+ return success;
+#endif // defined(PL_HAVE_D3D11)
+}
+#endif // HAVE_D3D11
+
+struct gpu_ctx *gpu_ctx_create(struct vo *vo, struct gl_video_opts *gl_opts)
+{
+ struct gpu_ctx *ctx = talloc_zero(NULL, struct gpu_ctx);
+ ctx->log = vo->log;
+
+ struct ra_ctx_opts *ctx_opts = mp_get_config_group(ctx, vo->global, &ra_ctx_conf);
+ ctx_opts->want_alpha = gl_opts->alpha_mode == ALPHA_YES;
+ ctx->ra_ctx = ra_ctx_create(vo, *ctx_opts);
+ if (!ctx->ra_ctx)
+ goto err_out;
+
+#if HAVE_VULKAN
+ struct mpvk_ctx *vkctx = ra_vk_ctx_get(ctx->ra_ctx);
+ if (vkctx) {
+ ctx->pllog = vkctx->pllog;
+ ctx->gpu = vkctx->gpu;
+ ctx->swapchain = vkctx->swapchain;
+ return ctx;
+ }
+#endif
+
+ ctx->pllog = mppl_log_create(ctx, ctx->log);
+ if (!ctx->pllog)
+ goto err_out;
+
+ mppl_log_set_probing(ctx->pllog, vo->probing);
+
+#if HAVE_D3D11
+ if (ra_is_d3d11(ctx->ra_ctx->ra)) {
+ if (!d3d11_pl_init(vo, ctx, ctx_opts))
+ goto err_out;
+
+ return ctx;
+ }
+#endif
+
+#if HAVE_GL && defined(PL_HAVE_OPENGL)
+ if (ra_is_gl(ctx->ra_ctx->ra)) {
+ struct GL *gl = ra_gl_get(ctx->ra_ctx->ra);
+ pl_opengl opengl = pl_opengl_create(ctx->pllog,
+ pl_opengl_params(
+ .debug = ctx_opts->debug,
+ .allow_software = ctx_opts->allow_sw,
+ .get_proc_addr_ex = (void *) gl->get_fn,
+ .proc_ctx = gl->fn_ctx,
+# if HAVE_EGL
+ .egl_display = eglGetCurrentDisplay(),
+ .egl_context = eglGetCurrentContext(),
+# endif
+ )
+ );
+ if (!opengl)
+ goto err_out;
+ ctx->gpu = opengl->gpu;
+
+ mppl_log_set_probing(ctx->pllog, false);
+
+ ctx->swapchain = pl_opengl_create_swapchain(opengl, pl_opengl_swapchain_params(
+ .max_swapchain_depth = vo->opts->swapchain_depth,
+ .framebuffer.flipped = gl->flipped,
+ ));
+ if (!ctx->swapchain)
+ goto err_out;
+
+ return ctx;
+ }
+#elif HAVE_GL
+ if (ra_is_gl(ctx->ra_ctx->ra)) {
+ MP_MSG(ctx, vo->probing ? MSGL_V : MSGL_ERR,
+ "libplacebo was built without OpenGL support.\n");
+ }
+#endif
+
+err_out:
+ gpu_ctx_destroy(&ctx);
+ return NULL;
+}
+
+bool gpu_ctx_resize(struct gpu_ctx *ctx, int w, int h)
+{
+#if HAVE_VULKAN
+ if (ra_vk_ctx_get(ctx->ra_ctx))
+ // vulkan RA handles this by itself
+ return true;
+#endif
+
+ return pl_swapchain_resize(ctx->swapchain, &w, &h);
+}
+
+void gpu_ctx_destroy(struct gpu_ctx **ctxp)
+{
+ struct gpu_ctx *ctx = *ctxp;
+ if (!ctx)
+ return;
+ if (!ctx->ra_ctx)
+ goto skip_common_pl_cleanup;
+
+#if HAVE_VULKAN
+ if (ra_vk_ctx_get(ctx->ra_ctx))
+ // vulkan RA context handles pl cleanup by itself,
+ // skip common local clean-up.
+ goto skip_common_pl_cleanup;
+#endif
+
+ if (ctx->swapchain)
+ pl_swapchain_destroy(&ctx->swapchain);
+
+ if (ctx->gpu) {
+#if HAVE_GL && defined(PL_HAVE_OPENGL)
+ if (ra_is_gl(ctx->ra_ctx->ra)) {
+ pl_opengl opengl = pl_opengl_get(ctx->gpu);
+ pl_opengl_destroy(&opengl);
+ }
+#endif
+
+#if HAVE_D3D11 && defined(PL_HAVE_D3D11)
+ if (ra_is_d3d11(ctx->ra_ctx->ra)) {
+ pl_d3d11 d3d11 = pl_d3d11_get(ctx->gpu);
+ pl_d3d11_destroy(&d3d11);
+ }
+#endif
+ }
+
+ if (ctx->pllog)
+ pl_log_destroy(&ctx->pllog);
+
+skip_common_pl_cleanup:
+ ra_ctx_destroy(&ctx->ra_ctx);
+
+ talloc_free(ctx);
+ *ctxp = NULL;
+}
diff --git a/video/out/gpu_next/context.h b/video/out/gpu_next/context.h
new file mode 100644
index 0000000..b98b9e7
--- /dev/null
+++ b/video/out/gpu_next/context.h
@@ -0,0 +1,40 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <libplacebo/renderer.h>
+
+struct mp_log;
+struct ra_ctx;
+struct vo;
+struct gl_video_opts;
+
+struct gpu_ctx {
+ struct mp_log *log;
+ struct ra_ctx *ra_ctx;
+
+ pl_log pllog;
+ pl_gpu gpu;
+ pl_swapchain swapchain;
+
+ void *priv;
+};
+
+struct gpu_ctx *gpu_ctx_create(struct vo *vo, struct gl_video_opts *gl_opts);
+bool gpu_ctx_resize(struct gpu_ctx *ctx, int w, int h);
+void gpu_ctx_destroy(struct gpu_ctx **ctxp);