diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-15 20:36:56 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-15 20:36:56 +0000 |
commit | 51de1d8436100f725f3576aefa24a2bd2057bc28 (patch) | |
tree | c6d1d5264b6d40a8d7ca34129f36b7d61e188af3 /video/out/placebo | |
parent | Initial commit. (diff) | |
download | mpv-51de1d8436100f725f3576aefa24a2bd2057bc28.tar.xz mpv-51de1d8436100f725f3576aefa24a2bd2057bc28.zip |
Adding upstream version 0.37.0.upstream/0.37.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'video/out/placebo')
-rw-r--r-- | video/out/placebo/ra_pl.c | 677 | ||||
-rw-r--r-- | video/out/placebo/ra_pl.h | 16 | ||||
-rw-r--r-- | video/out/placebo/utils.c | 263 | ||||
-rw-r--r-- | video/out/placebo/utils.h | 41 |
4 files changed, 997 insertions, 0 deletions
diff --git a/video/out/placebo/ra_pl.c b/video/out/placebo/ra_pl.c new file mode 100644 index 0000000..6259651 --- /dev/null +++ b/video/out/placebo/ra_pl.c @@ -0,0 +1,677 @@ +#include "common/common.h" +#include "common/msg.h" + +#include "ra_pl.h" +#include "utils.h" + +struct ra_pl { + pl_gpu gpu; + struct ra_timer_pl *active_timer; +}; + +static inline pl_gpu get_gpu(const struct ra *ra) +{ + struct ra_pl *p = ra->priv; + return p->gpu; +} + +static struct ra_fns ra_fns_pl; + +pl_gpu ra_pl_get(const struct ra *ra) +{ + return ra->fns == &ra_fns_pl ? get_gpu(ra) : NULL; +} + +static pl_timer get_active_timer(const struct ra *ra); + +struct ra *ra_create_pl(pl_gpu gpu, struct mp_log *log) +{ + assert(gpu); + + struct ra *ra = talloc_zero(NULL, struct ra); + ra->log = log; + ra->fns = &ra_fns_pl; + + struct ra_pl *p = ra->priv = talloc_zero(ra, struct ra_pl); + p->gpu = gpu; + + ra->glsl_version = gpu->glsl.version; + ra->glsl_vulkan = gpu->glsl.vulkan; + ra->glsl_es = gpu->glsl.gles; + + ra->caps = RA_CAP_DIRECT_UPLOAD | RA_CAP_NESTED_ARRAY | RA_CAP_FRAGCOORD; + + if (gpu->glsl.compute) + ra->caps |= RA_CAP_COMPUTE | RA_CAP_NUM_GROUPS; + if (gpu->limits.compute_queues > gpu->limits.fragment_queues) + ra->caps |= RA_CAP_PARALLEL_COMPUTE; + if (gpu->limits.max_variable_comps) + ra->caps |= RA_CAP_GLOBAL_UNIFORM; + if (!gpu->limits.host_cached) + ra->caps |= RA_CAP_SLOW_DR; + + if (gpu->limits.max_tex_1d_dim) + ra->caps |= RA_CAP_TEX_1D; + if (gpu->limits.max_tex_3d_dim) + ra->caps |= RA_CAP_TEX_3D; + if (gpu->limits.max_ubo_size) + ra->caps |= RA_CAP_BUF_RO; + if (gpu->limits.max_ssbo_size) + ra->caps |= RA_CAP_BUF_RW; + if (gpu->glsl.min_gather_offset && gpu->glsl.max_gather_offset) + ra->caps |= RA_CAP_GATHER; + + // Semi-hack: assume all textures are blittable if r8 is + pl_fmt r8 = pl_find_named_fmt(gpu, "r8"); + if (r8->caps & PL_FMT_CAP_BLITTABLE) + ra->caps |= RA_CAP_BLIT; + + ra->max_texture_wh = gpu->limits.max_tex_2d_dim; + ra->max_pushc_size = gpu->limits.max_pushc_size; + ra->max_compute_group_threads = gpu->glsl.max_group_threads; + ra->max_shmem = gpu->glsl.max_shmem_size; + + // Set up format wrappers + for (int i = 0; i < gpu->num_formats; i++) { + pl_fmt plfmt = gpu->formats[i]; + static const enum ra_ctype fmt_type_map[PL_FMT_TYPE_COUNT] = { + [PL_FMT_UNORM] = RA_CTYPE_UNORM, + [PL_FMT_UINT] = RA_CTYPE_UINT, + [PL_FMT_FLOAT] = RA_CTYPE_FLOAT, + }; + + enum ra_ctype type = fmt_type_map[plfmt->type]; + if (!type || !(plfmt->caps & PL_FMT_CAP_SAMPLEABLE)) + continue; + + struct ra_format *rafmt = talloc_zero(ra, struct ra_format); + *rafmt = (struct ra_format) { + .name = plfmt->name, + .priv = (void *) plfmt, + .ctype = type, + .ordered = pl_fmt_is_ordered(plfmt), + .num_components = plfmt->num_components, + .pixel_size = plfmt->texel_size, + .linear_filter = plfmt->caps & PL_FMT_CAP_LINEAR, + .renderable = plfmt->caps & PL_FMT_CAP_RENDERABLE, + .storable = plfmt->caps & PL_FMT_CAP_STORABLE, + .glsl_format = plfmt->glsl_format, + }; + + for (int c = 0; c < plfmt->num_components; c++) { + rafmt->component_size[c] = plfmt->host_bits[c]; + rafmt->component_depth[c] = plfmt->component_depth[c]; + } + + MP_TARRAY_APPEND(ra, ra->formats, ra->num_formats, rafmt); + } + + return ra; +} + +static void destroy_ra_pl(struct ra *ra) +{ + talloc_free(ra); +} + +static struct ra_format *map_fmt(struct ra *ra, pl_fmt plfmt) +{ + for (int i = 0; i < ra->num_formats; i++) { + if (ra->formats[i]->priv == plfmt) + return ra->formats[i]; + } + + MP_ERR(ra, "Failed mapping pl_fmt '%s' to ra_fmt?\n", plfmt->name); + return NULL; +} + +bool mppl_wrap_tex(struct ra *ra, pl_tex pltex, struct ra_tex *out_tex) +{ + if (!pltex) + return false; + + *out_tex = (struct ra_tex) { + .params = { + .dimensions = pl_tex_params_dimension(pltex->params), + .w = pltex->params.w, + .h = pltex->params.h, + .d = pltex->params.d, + .format = map_fmt(ra, pltex->params.format), + .render_src = pltex->params.sampleable, + .render_dst = pltex->params.renderable, + .storage_dst = pltex->params.storable, + .blit_src = pltex->params.blit_src, + .blit_dst = pltex->params.blit_dst, + .host_mutable = pltex->params.host_writable, + .downloadable = pltex->params.host_readable, + // These don't exist upstream, so just pick something reasonable + .src_linear = pltex->params.format->caps & PL_FMT_CAP_LINEAR, + .src_repeat = false, + }, + .priv = (void *) pltex, + }; + + return !!out_tex->params.format; +} + +static struct ra_tex *tex_create_pl(struct ra *ra, + const struct ra_tex_params *params) +{ + pl_gpu gpu = get_gpu(ra); + pl_tex pltex = pl_tex_create(gpu, &(struct pl_tex_params) { + .w = params->w, + .h = params->dimensions >= 2 ? params->h : 0, + .d = params->dimensions >= 3 ? params->d : 0, + .format = params->format->priv, + .sampleable = params->render_src, + .renderable = params->render_dst, + .storable = params->storage_dst, + .blit_src = params->blit_src, + .blit_dst = params->blit_dst || params->render_dst, + .host_writable = params->host_mutable, + .host_readable = params->downloadable, + .initial_data = params->initial_data, + }); + + struct ra_tex *ratex = talloc_ptrtype(NULL, ratex); + if (!mppl_wrap_tex(ra, pltex, ratex)) { + pl_tex_destroy(gpu, &pltex); + talloc_free(ratex); + return NULL; + } + + // Keep track of these, so we can correctly bind them later + ratex->params.src_repeat = params->src_repeat; + ratex->params.src_linear = params->src_linear; + + return ratex; +} + +static void tex_destroy_pl(struct ra *ra, struct ra_tex *tex) +{ + if (!tex) + return; + + pl_tex_destroy(get_gpu(ra), (pl_tex *) &tex->priv); + talloc_free(tex); +} + +static bool tex_upload_pl(struct ra *ra, const struct ra_tex_upload_params *params) +{ + pl_gpu gpu = get_gpu(ra); + pl_tex tex = params->tex->priv; + struct pl_tex_transfer_params pl_params = { + .tex = tex, + .buf = params->buf ? params->buf->priv : NULL, + .buf_offset = params->buf_offset, + .ptr = (void *) params->src, + .timer = get_active_timer(ra), + }; + + pl_buf staging = NULL; + if (params->tex->params.dimensions == 2) { + if (params->rc) { + pl_params.rc = (struct pl_rect3d) { + .x0 = params->rc->x0, .x1 = params->rc->x1, + .y0 = params->rc->y0, .y1 = params->rc->y1, + }; + } + + pl_params.row_pitch = params->stride; + } + + bool ok = pl_tex_upload(gpu, &pl_params); + pl_buf_destroy(gpu, &staging); + return ok; +} + +static bool tex_download_pl(struct ra *ra, struct ra_tex_download_params *params) +{ + pl_tex tex = params->tex->priv; + struct pl_tex_transfer_params pl_params = { + .tex = tex, + .ptr = params->dst, + .timer = get_active_timer(ra), + .row_pitch = params->stride, + }; + + return pl_tex_download(get_gpu(ra), &pl_params); +} + +static struct ra_buf *buf_create_pl(struct ra *ra, + const struct ra_buf_params *params) +{ + pl_buf plbuf = pl_buf_create(get_gpu(ra), &(struct pl_buf_params) { + .size = params->size, + .uniform = params->type == RA_BUF_TYPE_UNIFORM, + .storable = params->type == RA_BUF_TYPE_SHADER_STORAGE, + .host_mapped = params->host_mapped, + .host_writable = params->host_mutable, + .initial_data = params->initial_data, + }); + + if (!plbuf) + return NULL; + + struct ra_buf *rabuf = talloc_ptrtype(NULL, rabuf); + *rabuf = (struct ra_buf) { + .params = *params, + .data = plbuf->data, + .priv = (void *) plbuf, + }; + + rabuf->params.initial_data = NULL; + return rabuf; +} + +static void buf_destroy_pl(struct ra *ra, struct ra_buf *buf) +{ + if (!buf) + return; + + pl_buf_destroy(get_gpu(ra), (pl_buf *) &buf->priv); + talloc_free(buf); +} + +static void buf_update_pl(struct ra *ra, struct ra_buf *buf, ptrdiff_t offset, + const void *data, size_t size) +{ + pl_buf_write(get_gpu(ra), buf->priv, offset, data, size); +} + +static bool buf_poll_pl(struct ra *ra, struct ra_buf *buf) +{ + return !pl_buf_poll(get_gpu(ra), buf->priv, 0); +} + +static void clear_pl(struct ra *ra, struct ra_tex *dst, float color[4], + struct mp_rect *scissor) +{ + // TODO: implement scissor clearing by bltting a 1x1 tex instead + pl_tex_clear(get_gpu(ra), dst->priv, color); +} + +static void blit_pl(struct ra *ra, struct ra_tex *dst, struct ra_tex *src, + struct mp_rect *dst_rc, struct mp_rect *src_rc) +{ + struct pl_rect3d plsrc = {0}, pldst = {0}; + if (src_rc) { + plsrc.x0 = MPMIN(MPMAX(src_rc->x0, 0), src->params.w); + plsrc.y0 = MPMIN(MPMAX(src_rc->y0, 0), src->params.h); + plsrc.x1 = MPMIN(MPMAX(src_rc->x1, 0), src->params.w); + plsrc.y1 = MPMIN(MPMAX(src_rc->y1, 0), src->params.h); + } + + if (dst_rc) { + pldst.x0 = MPMIN(MPMAX(dst_rc->x0, 0), dst->params.w); + pldst.y0 = MPMIN(MPMAX(dst_rc->y0, 0), dst->params.h); + pldst.x1 = MPMIN(MPMAX(dst_rc->x1, 0), dst->params.w); + pldst.y1 = MPMIN(MPMAX(dst_rc->y1, 0), dst->params.h); + } + + pl_tex_blit(get_gpu(ra), &(struct pl_tex_blit_params) { + .src = src->priv, + .dst = dst->priv, + .src_rc = plsrc, + .dst_rc = pldst, + .sample_mode = src->params.src_linear ? PL_TEX_SAMPLE_LINEAR + : PL_TEX_SAMPLE_NEAREST, + }); +} + +static const enum pl_var_type var_type[RA_VARTYPE_COUNT] = { + [RA_VARTYPE_INT] = PL_VAR_SINT, + [RA_VARTYPE_FLOAT] = PL_VAR_FLOAT, +}; + +static const enum pl_desc_type desc_type[RA_VARTYPE_COUNT] = { + [RA_VARTYPE_TEX] = PL_DESC_SAMPLED_TEX, + [RA_VARTYPE_IMG_W] = PL_DESC_STORAGE_IMG, + [RA_VARTYPE_BUF_RO] = PL_DESC_BUF_UNIFORM, + [RA_VARTYPE_BUF_RW] = PL_DESC_BUF_STORAGE, +}; + +static const enum pl_fmt_type fmt_type[RA_VARTYPE_COUNT] = { + [RA_VARTYPE_INT] = PL_FMT_SINT, + [RA_VARTYPE_FLOAT] = PL_FMT_FLOAT, + [RA_VARTYPE_BYTE_UNORM] = PL_FMT_UNORM, +}; + +static const size_t var_size[RA_VARTYPE_COUNT] = { + [RA_VARTYPE_INT] = sizeof(int), + [RA_VARTYPE_FLOAT] = sizeof(float), + [RA_VARTYPE_BYTE_UNORM] = sizeof(uint8_t), +}; + +static struct ra_layout uniform_layout_pl(struct ra_renderpass_input *inp) +{ + // To get the alignment requirements, we try laying this out with + // an offset of 1 and then see where it ends up. This will always be + // the minimum alignment requirement. + struct pl_var_layout layout = pl_buf_uniform_layout(1, &(struct pl_var) { + .name = inp->name, + .type = var_type[inp->type], + .dim_v = inp->dim_v, + .dim_m = inp->dim_m, + .dim_a = 1, + }); + + return (struct ra_layout) { + .align = layout.offset, + .stride = layout.stride, + .size = layout.size, + }; +} + +static struct ra_layout push_constant_layout_pl(struct ra_renderpass_input *inp) +{ + struct pl_var_layout layout = pl_push_constant_layout(1, &(struct pl_var) { + .name = inp->name, + .type = var_type[inp->type], + .dim_v = inp->dim_v, + .dim_m = inp->dim_m, + .dim_a = 1, + }); + + return (struct ra_layout) { + .align = layout.offset, + .stride = layout.stride, + .size = layout.size, + }; +} + +static int desc_namespace_pl(struct ra *ra, enum ra_vartype type) +{ + return pl_desc_namespace(get_gpu(ra), desc_type[type]); +} + +struct pass_priv { + pl_pass pass; + uint16_t *inp_index; // index translation map + // Space to hold the descriptor bindings and variable updates + struct pl_desc_binding *binds; + struct pl_var_update *varups; + int num_varups; +}; + +static struct ra_renderpass *renderpass_create_pl(struct ra *ra, + const struct ra_renderpass_params *params) +{ + void *tmp = talloc_new(NULL); + pl_gpu gpu = get_gpu(ra); + struct ra_renderpass *pass = NULL; + + static const enum pl_pass_type pass_type[] = { + [RA_RENDERPASS_TYPE_RASTER] = PL_PASS_RASTER, + [RA_RENDERPASS_TYPE_COMPUTE] = PL_PASS_COMPUTE, + }; + + struct pl_var *vars = NULL; + struct pl_desc *descs = NULL; + int num_vars = 0, num_descs = 0; + + struct pass_priv *priv = talloc_ptrtype(tmp, priv); + priv->inp_index = talloc_zero_array(priv, uint16_t, params->num_inputs); + + for (int i = 0; i < params->num_inputs; i++) { + const struct ra_renderpass_input *inp = ¶ms->inputs[i]; + if (var_type[inp->type]) { + priv->inp_index[i] = num_vars; + MP_TARRAY_APPEND(tmp, vars, num_vars, (struct pl_var) { + .name = inp->name, + .type = var_type[inp->type], + .dim_v = inp->dim_v, + .dim_m = inp->dim_m, + .dim_a = 1, + }); + } else if (desc_type[inp->type]) { + priv->inp_index[i] = num_descs; + MP_TARRAY_APPEND(tmp, descs, num_descs, (struct pl_desc) { + .name = inp->name, + .type = desc_type[inp->type], + .binding = inp->binding, + .access = inp->type == RA_VARTYPE_IMG_W ? PL_DESC_ACCESS_WRITEONLY + : inp->type == RA_VARTYPE_BUF_RW ? PL_DESC_ACCESS_READWRITE + : PL_DESC_ACCESS_READONLY, + }); + } + } + + // Allocate space to store the bindings map persistently + priv->binds = talloc_zero_array(priv, struct pl_desc_binding, num_descs); + + struct pl_pass_params pl_params = { + .type = pass_type[params->type], + .variables = vars, + .num_variables = num_vars, + .descriptors = descs, + .num_descriptors = num_descs, + .push_constants_size = params->push_constants_size, + .glsl_shader = params->type == RA_RENDERPASS_TYPE_COMPUTE + ? params->compute_shader + : params->frag_shader, + }; + + struct pl_blend_params blend_params; + + if (params->type == RA_RENDERPASS_TYPE_RASTER) { + pl_params.vertex_shader = params->vertex_shader; + pl_params.vertex_type = PL_PRIM_TRIANGLE_LIST; + pl_params.vertex_stride = params->vertex_stride; + pl_params.load_target = !params->invalidate_target; + pl_params.target_format = params->target_format->priv; + + if (params->enable_blend) { + pl_params.blend_params = &blend_params; + blend_params = (struct pl_blend_params) { + // Same enum order as ra_blend + .src_rgb = (enum pl_blend_mode) params->blend_src_rgb, + .dst_rgb = (enum pl_blend_mode) params->blend_dst_rgb, + .src_alpha = (enum pl_blend_mode) params->blend_src_alpha, + .dst_alpha = (enum pl_blend_mode) params->blend_dst_alpha, + }; + } + + for (int i = 0; i < params->num_vertex_attribs; i++) { + const struct ra_renderpass_input *inp = ¶ms->vertex_attribs[i]; + struct pl_vertex_attrib attrib = { + .name = inp->name, + .offset = inp->offset, + .location = i, + .fmt = pl_find_fmt(gpu, fmt_type[inp->type], inp->dim_v, 0, + var_size[inp->type] * 8, PL_FMT_CAP_VERTEX), + }; + + if (!attrib.fmt) { + MP_ERR(ra, "Failed mapping vertex attrib '%s' to pl_fmt?\n", + inp->name); + goto error; + } + + MP_TARRAY_APPEND(tmp, pl_params.vertex_attribs, + pl_params.num_vertex_attribs, attrib); + } + } + + priv->pass = pl_pass_create(gpu, &pl_params); + if (!priv->pass) + goto error; + + pass = talloc_ptrtype(NULL, pass); + *pass = (struct ra_renderpass) { + .params = *ra_renderpass_params_copy(pass, params), + .priv = talloc_steal(pass, priv), + }; + + // fall through +error: + talloc_free(tmp); + return pass; +} + +static void renderpass_destroy_pl(struct ra *ra, struct ra_renderpass *pass) +{ + if (!pass) + return; + + struct pass_priv *priv = pass->priv; + pl_pass_destroy(get_gpu(ra), (pl_pass *) &priv->pass); + talloc_free(pass); +} + +static void renderpass_run_pl(struct ra *ra, + const struct ra_renderpass_run_params *params) +{ + struct pass_priv *p = params->pass->priv; + p->num_varups = 0; + + for (int i = 0; i < params->num_values; i++) { + const struct ra_renderpass_input_val *val = ¶ms->values[i]; + const struct ra_renderpass_input *inp = ¶ms->pass->params.inputs[i]; + if (var_type[inp->type]) { + MP_TARRAY_APPEND(p, p->varups, p->num_varups, (struct pl_var_update) { + .index = p->inp_index[val->index], + .data = val->data, + }); + } else { + struct pl_desc_binding bind; + switch (inp->type) { + case RA_VARTYPE_TEX: + case RA_VARTYPE_IMG_W: { + struct ra_tex *tex = *((struct ra_tex **) val->data); + bind.object = tex->priv; + bind.sample_mode = tex->params.src_linear ? PL_TEX_SAMPLE_LINEAR + : PL_TEX_SAMPLE_NEAREST; + bind.address_mode = tex->params.src_repeat ? PL_TEX_ADDRESS_REPEAT + : PL_TEX_ADDRESS_CLAMP; + break; + } + case RA_VARTYPE_BUF_RO: + case RA_VARTYPE_BUF_RW: + bind.object = (* (struct ra_buf **) val->data)->priv; + break; + default: MP_ASSERT_UNREACHABLE(); + }; + + p->binds[p->inp_index[val->index]] = bind; + }; + } + + struct pl_pass_run_params pl_params = { + .pass = p->pass, + .var_updates = p->varups, + .num_var_updates = p->num_varups, + .desc_bindings = p->binds, + .push_constants = params->push_constants, + .timer = get_active_timer(ra), + }; + + if (p->pass->params.type == PL_PASS_RASTER) { + pl_params.target = params->target->priv; + pl_params.viewport = mp_rect2d_to_pl(params->viewport); + pl_params.scissors = mp_rect2d_to_pl(params->scissors); + pl_params.vertex_data = params->vertex_data; + pl_params.vertex_count = params->vertex_count; + } else { + for (int i = 0; i < MP_ARRAY_SIZE(pl_params.compute_groups); i++) + pl_params.compute_groups[i] = params->compute_groups[i]; + } + + pl_pass_run(get_gpu(ra), &pl_params); +} + +struct ra_timer_pl { + // Because libpplacebo only supports one operation per timer, we need + // to use multiple pl_timers to sum up multiple passes/transfers + pl_timer *timers; + int num_timers; + int idx_timers; +}; + +static ra_timer *timer_create_pl(struct ra *ra) +{ + struct ra_timer_pl *t = talloc_zero(ra, struct ra_timer_pl); + return t; +} + +static void timer_destroy_pl(struct ra *ra, ra_timer *timer) +{ + pl_gpu gpu = get_gpu(ra); + struct ra_timer_pl *t = timer; + + for (int i = 0; i < t->num_timers; i++) + pl_timer_destroy(gpu, &t->timers[i]); + + talloc_free(t); +} + +static void timer_start_pl(struct ra *ra, ra_timer *timer) +{ + struct ra_pl *p = ra->priv; + struct ra_timer_pl *t = timer; + + // There's nothing easy we can do in this case, since libplacebo only + // supports one timer object per operation; so just ignore "inner" timers + // when the user is nesting different timer queries + if (p->active_timer) + return; + + p->active_timer = t; + t->idx_timers = 0; +} + +static uint64_t timer_stop_pl(struct ra *ra, ra_timer *timer) +{ + struct ra_pl *p = ra->priv; + struct ra_timer_pl *t = timer; + + if (p->active_timer != t) + return 0; + + p->active_timer = NULL; + + // Sum up all of the active results + uint64_t res = 0; + for (int i = 0; i < t->idx_timers; i++) + res += pl_timer_query(p->gpu, t->timers[i]); + + return res; +} + +static pl_timer get_active_timer(const struct ra *ra) +{ + struct ra_pl *p = ra->priv; + if (!p->active_timer) + return NULL; + + struct ra_timer_pl *t = p->active_timer; + if (t->idx_timers == t->num_timers) + MP_TARRAY_APPEND(t, t->timers, t->num_timers, pl_timer_create(p->gpu)); + + return t->timers[t->idx_timers++]; +} + +static struct ra_fns ra_fns_pl = { + .destroy = destroy_ra_pl, + .tex_create = tex_create_pl, + .tex_destroy = tex_destroy_pl, + .tex_upload = tex_upload_pl, + .tex_download = tex_download_pl, + .buf_create = buf_create_pl, + .buf_destroy = buf_destroy_pl, + .buf_update = buf_update_pl, + .buf_poll = buf_poll_pl, + .clear = clear_pl, + .blit = blit_pl, + .uniform_layout = uniform_layout_pl, + .push_constant_layout = push_constant_layout_pl, + .desc_namespace = desc_namespace_pl, + .renderpass_create = renderpass_create_pl, + .renderpass_destroy = renderpass_destroy_pl, + .renderpass_run = renderpass_run_pl, + .timer_create = timer_create_pl, + .timer_destroy = timer_destroy_pl, + .timer_start = timer_start_pl, + .timer_stop = timer_stop_pl, +}; + diff --git a/video/out/placebo/ra_pl.h b/video/out/placebo/ra_pl.h new file mode 100644 index 0000000..1290c9c --- /dev/null +++ b/video/out/placebo/ra_pl.h @@ -0,0 +1,16 @@ +#pragma once + +#include "video/out/gpu/ra.h" +#include <libplacebo/gpu.h> + +struct ra *ra_create_pl(pl_gpu gpu, struct mp_log *log); + +pl_gpu ra_pl_get(const struct ra *ra); + +static inline pl_fmt ra_pl_fmt_get(const struct ra_format *format) +{ + return format->priv; +} + +// Wrap a pl_tex into a ra_tex struct, returns if successful +bool mppl_wrap_tex(struct ra *ra, pl_tex pltex, struct ra_tex *out_tex); diff --git a/video/out/placebo/utils.c b/video/out/placebo/utils.c new file mode 100644 index 0000000..1209b72 --- /dev/null +++ b/video/out/placebo/utils.c @@ -0,0 +1,263 @@ +#include "common/common.h" +#include "utils.h" + +#include <libplacebo/utils/dolbyvision.h> + +static const int pl_log_to_msg_lev[PL_LOG_ALL+1] = { + [PL_LOG_FATAL] = MSGL_FATAL, + [PL_LOG_ERR] = MSGL_ERR, + [PL_LOG_WARN] = MSGL_WARN, + [PL_LOG_INFO] = MSGL_V, + [PL_LOG_DEBUG] = MSGL_DEBUG, + [PL_LOG_TRACE] = MSGL_TRACE, +}; + +static const enum pl_log_level msg_lev_to_pl_log[MSGL_MAX+1] = { + [MSGL_FATAL] = PL_LOG_FATAL, + [MSGL_ERR] = PL_LOG_ERR, + [MSGL_WARN] = PL_LOG_WARN, + [MSGL_INFO] = PL_LOG_WARN, + [MSGL_STATUS] = PL_LOG_WARN, + [MSGL_V] = PL_LOG_INFO, + [MSGL_DEBUG] = PL_LOG_DEBUG, + [MSGL_TRACE] = PL_LOG_TRACE, + [MSGL_MAX] = PL_LOG_ALL, +}; + +// translates log levels while probing +static const enum pl_log_level probing_map(enum pl_log_level level) +{ + switch (level) { + case PL_LOG_FATAL: + case PL_LOG_ERR: + case PL_LOG_WARN: + return PL_LOG_INFO; + + default: + return level; + } +} + +static void log_cb(void *priv, enum pl_log_level level, const char *msg) +{ + struct mp_log *log = priv; + mp_msg(log, pl_log_to_msg_lev[level], "%s\n", msg); +} + +static void log_cb_probing(void *priv, enum pl_log_level level, const char *msg) +{ + struct mp_log *log = priv; + mp_msg(log, pl_log_to_msg_lev[probing_map(level)], "%s\n", msg); +} + +pl_log mppl_log_create(void *tactx, struct mp_log *log) +{ + return pl_log_create(PL_API_VER, &(struct pl_log_params) { + .log_cb = log_cb, + .log_level = msg_lev_to_pl_log[mp_msg_level(log)], + .log_priv = mp_log_new(tactx, log, "libplacebo"), + }); +} + +void mppl_log_set_probing(pl_log log, bool probing) +{ + struct pl_log_params params = log->params; + params.log_cb = probing ? log_cb_probing : log_cb; + pl_log_update(log, ¶ms); +} + +enum pl_color_primaries mp_prim_to_pl(enum mp_csp_prim prim) +{ + switch (prim) { + case MP_CSP_PRIM_AUTO: return PL_COLOR_PRIM_UNKNOWN; + case MP_CSP_PRIM_BT_601_525: return PL_COLOR_PRIM_BT_601_525; + case MP_CSP_PRIM_BT_601_625: return PL_COLOR_PRIM_BT_601_625; + case MP_CSP_PRIM_BT_709: return PL_COLOR_PRIM_BT_709; + case MP_CSP_PRIM_BT_2020: return PL_COLOR_PRIM_BT_2020; + case MP_CSP_PRIM_BT_470M: return PL_COLOR_PRIM_BT_470M; + case MP_CSP_PRIM_APPLE: return PL_COLOR_PRIM_APPLE; + case MP_CSP_PRIM_ADOBE: return PL_COLOR_PRIM_ADOBE; + case MP_CSP_PRIM_PRO_PHOTO: return PL_COLOR_PRIM_PRO_PHOTO; + case MP_CSP_PRIM_CIE_1931: return PL_COLOR_PRIM_CIE_1931; + case MP_CSP_PRIM_DCI_P3: return PL_COLOR_PRIM_DCI_P3; + case MP_CSP_PRIM_DISPLAY_P3: return PL_COLOR_PRIM_DISPLAY_P3; + case MP_CSP_PRIM_V_GAMUT: return PL_COLOR_PRIM_V_GAMUT; + case MP_CSP_PRIM_S_GAMUT: return PL_COLOR_PRIM_S_GAMUT; + case MP_CSP_PRIM_EBU_3213: return PL_COLOR_PRIM_EBU_3213; + case MP_CSP_PRIM_FILM_C: return PL_COLOR_PRIM_FILM_C; + case MP_CSP_PRIM_ACES_AP0: return PL_COLOR_PRIM_ACES_AP0; + case MP_CSP_PRIM_ACES_AP1: return PL_COLOR_PRIM_ACES_AP1; + case MP_CSP_PRIM_COUNT: return PL_COLOR_PRIM_COUNT; + } + + MP_ASSERT_UNREACHABLE(); +} + +enum mp_csp_prim mp_prim_from_pl(enum pl_color_primaries prim) +{ + switch (prim){ + case PL_COLOR_PRIM_UNKNOWN: return MP_CSP_PRIM_AUTO; + case PL_COLOR_PRIM_BT_601_525: return MP_CSP_PRIM_BT_601_525; + case PL_COLOR_PRIM_BT_601_625: return MP_CSP_PRIM_BT_601_625; + case PL_COLOR_PRIM_BT_709: return MP_CSP_PRIM_BT_709; + case PL_COLOR_PRIM_BT_2020: return MP_CSP_PRIM_BT_2020; + case PL_COLOR_PRIM_BT_470M: return MP_CSP_PRIM_BT_470M; + case PL_COLOR_PRIM_APPLE: return MP_CSP_PRIM_APPLE; + case PL_COLOR_PRIM_ADOBE: return MP_CSP_PRIM_ADOBE; + case PL_COLOR_PRIM_PRO_PHOTO: return MP_CSP_PRIM_PRO_PHOTO; + case PL_COLOR_PRIM_CIE_1931: return MP_CSP_PRIM_CIE_1931; + case PL_COLOR_PRIM_DCI_P3: return MP_CSP_PRIM_DCI_P3; + case PL_COLOR_PRIM_DISPLAY_P3: return MP_CSP_PRIM_DISPLAY_P3; + case PL_COLOR_PRIM_V_GAMUT: return MP_CSP_PRIM_V_GAMUT; + case PL_COLOR_PRIM_S_GAMUT: return MP_CSP_PRIM_S_GAMUT; + case PL_COLOR_PRIM_EBU_3213: return MP_CSP_PRIM_EBU_3213; + case PL_COLOR_PRIM_FILM_C: return MP_CSP_PRIM_FILM_C; + case PL_COLOR_PRIM_ACES_AP0: return MP_CSP_PRIM_ACES_AP0; + case PL_COLOR_PRIM_ACES_AP1: return MP_CSP_PRIM_ACES_AP1; + case PL_COLOR_PRIM_COUNT: return MP_CSP_PRIM_COUNT; + } + + MP_ASSERT_UNREACHABLE(); +} + +enum pl_color_transfer mp_trc_to_pl(enum mp_csp_trc trc) +{ + switch (trc) { + case MP_CSP_TRC_AUTO: return PL_COLOR_TRC_UNKNOWN; + case MP_CSP_TRC_BT_1886: return PL_COLOR_TRC_BT_1886; + case MP_CSP_TRC_SRGB: return PL_COLOR_TRC_SRGB; + case MP_CSP_TRC_LINEAR: return PL_COLOR_TRC_LINEAR; + case MP_CSP_TRC_GAMMA18: return PL_COLOR_TRC_GAMMA18; + case MP_CSP_TRC_GAMMA20: return PL_COLOR_TRC_GAMMA20; + case MP_CSP_TRC_GAMMA22: return PL_COLOR_TRC_GAMMA22; + case MP_CSP_TRC_GAMMA24: return PL_COLOR_TRC_GAMMA24; + case MP_CSP_TRC_GAMMA26: return PL_COLOR_TRC_GAMMA26; + case MP_CSP_TRC_GAMMA28: return PL_COLOR_TRC_GAMMA28; + case MP_CSP_TRC_PRO_PHOTO: return PL_COLOR_TRC_PRO_PHOTO; + case MP_CSP_TRC_PQ: return PL_COLOR_TRC_PQ; + case MP_CSP_TRC_HLG: return PL_COLOR_TRC_HLG; + case MP_CSP_TRC_V_LOG: return PL_COLOR_TRC_V_LOG; + case MP_CSP_TRC_S_LOG1: return PL_COLOR_TRC_S_LOG1; + case MP_CSP_TRC_S_LOG2: return PL_COLOR_TRC_S_LOG2; + case MP_CSP_TRC_ST428: return PL_COLOR_TRC_ST428; + case MP_CSP_TRC_COUNT: return PL_COLOR_TRC_COUNT; + } + + MP_ASSERT_UNREACHABLE(); +} + +enum mp_csp_trc mp_trc_from_pl(enum pl_color_transfer trc) +{ + switch (trc){ + case PL_COLOR_TRC_UNKNOWN: return MP_CSP_TRC_AUTO; + case PL_COLOR_TRC_BT_1886: return MP_CSP_TRC_BT_1886; + case PL_COLOR_TRC_SRGB: return MP_CSP_TRC_SRGB; + case PL_COLOR_TRC_LINEAR: return MP_CSP_TRC_LINEAR; + case PL_COLOR_TRC_GAMMA18: return MP_CSP_TRC_GAMMA18; + case PL_COLOR_TRC_GAMMA20: return MP_CSP_TRC_GAMMA20; + case PL_COLOR_TRC_GAMMA22: return MP_CSP_TRC_GAMMA22; + case PL_COLOR_TRC_GAMMA24: return MP_CSP_TRC_GAMMA24; + case PL_COLOR_TRC_GAMMA26: return MP_CSP_TRC_GAMMA26; + case PL_COLOR_TRC_GAMMA28: return MP_CSP_TRC_GAMMA28; + case PL_COLOR_TRC_PRO_PHOTO: return MP_CSP_TRC_PRO_PHOTO; + case PL_COLOR_TRC_PQ: return MP_CSP_TRC_PQ; + case PL_COLOR_TRC_HLG: return MP_CSP_TRC_HLG; + case PL_COLOR_TRC_V_LOG: return MP_CSP_TRC_V_LOG; + case PL_COLOR_TRC_S_LOG1: return MP_CSP_TRC_S_LOG1; + case PL_COLOR_TRC_S_LOG2: return MP_CSP_TRC_S_LOG2; + case PL_COLOR_TRC_ST428: return MP_CSP_TRC_ST428; + case PL_COLOR_TRC_COUNT: return MP_CSP_TRC_COUNT; + } + + MP_ASSERT_UNREACHABLE(); +} + +enum pl_color_system mp_csp_to_pl(enum mp_csp csp) +{ + switch (csp) { + case MP_CSP_AUTO: return PL_COLOR_SYSTEM_UNKNOWN; + case MP_CSP_BT_601: return PL_COLOR_SYSTEM_BT_601; + case MP_CSP_BT_709: return PL_COLOR_SYSTEM_BT_709; + case MP_CSP_SMPTE_240M: return PL_COLOR_SYSTEM_SMPTE_240M; + case MP_CSP_BT_2020_NC: return PL_COLOR_SYSTEM_BT_2020_NC; + case MP_CSP_BT_2020_C: return PL_COLOR_SYSTEM_BT_2020_C; + case MP_CSP_RGB: return PL_COLOR_SYSTEM_RGB; + case MP_CSP_XYZ: return PL_COLOR_SYSTEM_XYZ; + case MP_CSP_YCGCO: return PL_COLOR_SYSTEM_YCGCO; + case MP_CSP_COUNT: return PL_COLOR_SYSTEM_COUNT; + } + + MP_ASSERT_UNREACHABLE(); +} + +enum pl_color_levels mp_levels_to_pl(enum mp_csp_levels levels) +{ + switch (levels) { + case MP_CSP_LEVELS_AUTO: return PL_COLOR_LEVELS_UNKNOWN; + case MP_CSP_LEVELS_TV: return PL_COLOR_LEVELS_TV; + case MP_CSP_LEVELS_PC: return PL_COLOR_LEVELS_PC; + case MP_CSP_LEVELS_COUNT: return PL_COLOR_LEVELS_COUNT; + } + + MP_ASSERT_UNREACHABLE(); +} + +enum mp_csp_levels mp_levels_from_pl(enum pl_color_levels levels) +{ + switch (levels){ + case PL_COLOR_LEVELS_UNKNOWN: return MP_CSP_LEVELS_AUTO; + case PL_COLOR_LEVELS_TV: return MP_CSP_LEVELS_TV; + case PL_COLOR_LEVELS_PC: return MP_CSP_LEVELS_PC; + case PL_COLOR_LEVELS_COUNT: return MP_CSP_LEVELS_COUNT; + } + + MP_ASSERT_UNREACHABLE(); +} + +enum pl_alpha_mode mp_alpha_to_pl(enum mp_alpha_type alpha) +{ + switch (alpha) { + case MP_ALPHA_AUTO: return PL_ALPHA_UNKNOWN; + case MP_ALPHA_STRAIGHT: return PL_ALPHA_INDEPENDENT; + case MP_ALPHA_PREMUL: return PL_ALPHA_PREMULTIPLIED; + } + + MP_ASSERT_UNREACHABLE(); +} + +enum pl_chroma_location mp_chroma_to_pl(enum mp_chroma_location chroma) +{ + switch (chroma) { + case MP_CHROMA_AUTO: return PL_CHROMA_UNKNOWN; + case MP_CHROMA_TOPLEFT: return PL_CHROMA_TOP_LEFT; + case MP_CHROMA_LEFT: return PL_CHROMA_LEFT; + case MP_CHROMA_CENTER: return PL_CHROMA_CENTER; + case MP_CHROMA_COUNT: return PL_CHROMA_COUNT; + } + + MP_ASSERT_UNREACHABLE(); +} + +void mp_map_dovi_metadata_to_pl(struct mp_image *mpi, + struct pl_frame *frame) +{ +#ifdef PL_HAVE_LAV_DOLBY_VISION + if (mpi->dovi) { + const AVDOVIMetadata *metadata = (AVDOVIMetadata *) mpi->dovi->data; + const AVDOVIRpuDataHeader *header = av_dovi_get_header(metadata); + + if (header->disable_residual_flag) { + // Only automatically map DoVi RPUs that don't require an EL + struct pl_dovi_metadata *dovi = talloc_ptrtype(mpi, dovi); + pl_frame_map_avdovi_metadata(frame, dovi, metadata); + } + } + +#if defined(PL_HAVE_LIBDOVI) + if (mpi->dovi_buf) + pl_hdr_metadata_from_dovi_rpu(&frame->color.hdr, mpi->dovi_buf->data, + mpi->dovi_buf->size); +#endif + +#endif // PL_HAVE_LAV_DOLBY_VISION +} diff --git a/video/out/placebo/utils.h b/video/out/placebo/utils.h new file mode 100644 index 0000000..bf780a8 --- /dev/null +++ b/video/out/placebo/utils.h @@ -0,0 +1,41 @@ +#pragma once + +#include "config.h" +#include "common/common.h" +#include "common/msg.h" +#include "video/csputils.h" +#include "video/mp_image.h" + +#include <libavutil/buffer.h> + +#include <libplacebo/common.h> +#include <libplacebo/log.h> +#include <libplacebo/colorspace.h> +#include <libplacebo/renderer.h> +#include <libplacebo/utils/libav.h> + +pl_log mppl_log_create(void *tactx, struct mp_log *log); +void mppl_log_set_probing(pl_log log, bool probing); + +static inline struct pl_rect2d mp_rect2d_to_pl(struct mp_rect rc) +{ + return (struct pl_rect2d) { + .x0 = rc.x0, + .y0 = rc.y0, + .x1 = rc.x1, + .y1 = rc.y1, + }; +} + +enum pl_color_primaries mp_prim_to_pl(enum mp_csp_prim prim); +enum mp_csp_prim mp_prim_from_pl(enum pl_color_primaries prim); +enum pl_color_transfer mp_trc_to_pl(enum mp_csp_trc trc); +enum mp_csp_trc mp_trc_from_pl(enum pl_color_transfer trc); +enum pl_color_system mp_csp_to_pl(enum mp_csp csp); +enum pl_color_levels mp_levels_to_pl(enum mp_csp_levels levels); +enum mp_csp_levels mp_levels_from_pl(enum pl_color_levels levels); +enum pl_alpha_mode mp_alpha_to_pl(enum mp_alpha_type alpha); +enum pl_chroma_location mp_chroma_to_pl(enum mp_chroma_location chroma); + +void mp_map_dovi_metadata_to_pl(struct mp_image *mpi, + struct pl_frame *frame); |