From 7a46c07230b8d8108c0e8e80df4522d0ac116538 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:28:17 +0200 Subject: Adding upstream version 0.3.65. Signed-off-by: Daniel Baumann --- spa/plugins/vulkan/meson.build | 12 + spa/plugins/vulkan/plugin.c | 50 + spa/plugins/vulkan/shaders/disk-intersection.comp | 143 +++ spa/plugins/vulkan/shaders/filter-color.comp | 39 + spa/plugins/vulkan/shaders/filter.comp | 44 + spa/plugins/vulkan/shaders/filter.spv | Bin 0 -> 4884 bytes spa/plugins/vulkan/shaders/main.comp | 50 + spa/plugins/vulkan/shaders/main.spv | Bin 0 -> 9980 bytes spa/plugins/vulkan/vulkan-compute-filter.c | 808 ++++++++++++++++ spa/plugins/vulkan/vulkan-compute-source.c | 1016 +++++++++++++++++++++ spa/plugins/vulkan/vulkan-utils.c | 758 +++++++++++++++ spa/plugins/vulkan/vulkan-utils.h | 86 ++ 12 files changed, 3006 insertions(+) create mode 100644 spa/plugins/vulkan/meson.build create mode 100644 spa/plugins/vulkan/plugin.c create mode 100644 spa/plugins/vulkan/shaders/disk-intersection.comp create mode 100644 spa/plugins/vulkan/shaders/filter-color.comp create mode 100644 spa/plugins/vulkan/shaders/filter.comp create mode 100644 spa/plugins/vulkan/shaders/filter.spv create mode 100644 spa/plugins/vulkan/shaders/main.comp create mode 100644 spa/plugins/vulkan/shaders/main.spv create mode 100644 spa/plugins/vulkan/vulkan-compute-filter.c create mode 100644 spa/plugins/vulkan/vulkan-compute-source.c create mode 100644 spa/plugins/vulkan/vulkan-utils.c create mode 100644 spa/plugins/vulkan/vulkan-utils.h (limited to 'spa/plugins/vulkan') diff --git a/spa/plugins/vulkan/meson.build b/spa/plugins/vulkan/meson.build new file mode 100644 index 0000000..0657d21 --- /dev/null +++ b/spa/plugins/vulkan/meson.build @@ -0,0 +1,12 @@ +spa_vulkan_sources = [ + 'plugin.c', + 'vulkan-compute-filter.c', + 'vulkan-compute-source.c', + 'vulkan-utils.c' +] + +spa_vulkan = shared_library('spa-vulkan', + spa_vulkan_sources, + dependencies : [ spa_dep, vulkan_dep, mathlib ], + install : true, + install_dir : spa_plugindir / 'vulkan') diff --git a/spa/plugins/vulkan/plugin.c b/spa/plugins/vulkan/plugin.c new file mode 100644 index 0000000..e9f40ba --- /dev/null +++ b/spa/plugins/vulkan/plugin.c @@ -0,0 +1,50 @@ +/* Spa vulkan plugin + * + * Copyright © 2019 Wim Taymans + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include + +extern const struct spa_handle_factory spa_vulkan_compute_filter_factory; +extern const struct spa_handle_factory spa_vulkan_compute_source_factory; + +SPA_EXPORT +int spa_handle_factory_enum(const struct spa_handle_factory **factory, uint32_t *index) +{ + spa_return_val_if_fail(factory != NULL, -EINVAL); + spa_return_val_if_fail(index != NULL, -EINVAL); + + switch (*index) { + case 0: + *factory = &spa_vulkan_compute_source_factory; + break; + case 1: + *factory = &spa_vulkan_compute_filter_factory; + break; + default: + return 0; + } + (*index)++; + return 1; +} diff --git a/spa/plugins/vulkan/shaders/disk-intersection.comp b/spa/plugins/vulkan/shaders/disk-intersection.comp new file mode 100644 index 0000000..7b92fdf --- /dev/null +++ b/spa/plugins/vulkan/shaders/disk-intersection.comp @@ -0,0 +1,143 @@ +// The MIT License +// Copyright © 2013 Inigo Quilez +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +// Other intersectors: +// +// Box: https://www.shadertoy.com/view/ld23DV +// Triangle: https://www.shadertoy.com/view/MlGcDz +// Capsule: https://www.shadertoy.com/view/Xt3SzX +// Ellipsoid: https://www.shadertoy.com/view/MlsSzn +// Sphere: https://www.shadertoy.com/view/4d2XWV +// Capped Cylinder: https://www.shadertoy.com/view/4lcSRn +// Disk: https://www.shadertoy.com/view/lsfGDB +// Capped Cone: https://www.shadertoy.com/view/llcfRf +// Rounded Box: https://www.shadertoy.com/view/WlSXRW +// Rounded Cone: https://www.shadertoy.com/view/MlKfzm +// Torus: https://www.shadertoy.com/view/4sBGDy +// Sphere4: https://www.shadertoy.com/view/3tj3DW +// Goursat: https://www.shadertoy.com/view/3lj3DW + + +#define SC 3.0 + +#if 1 +// +// Elegant way to intersect a planar coordinate system (3x3 linear system) +// +vec3 intersectCoordSys(in vec3 o, in vec3 d, vec3 c, vec3 u, vec3 v) +{ + vec3 q = o - c; + return vec3(dot(cross(u, v), q), + dot(cross(q, u), d), + dot(cross(v, q), d)) / dot(cross(v, u), d); +} + +#else +// +// Ugly (but faster) way to intersect a planar coordinate system: plane + projection +// +vec3 intersectCoordSys(in vec3 o, in vec3 d, vec3 c, vec3 u, vec3 v) +{ + vec3 q = o - c; + vec3 n = cross(u, v); + float t = -dot(n, q) / dot(d, n); + float r = dot(u, q + d * t); + float s = dot(v, q + d * t); + return vec3(t, s, r); +} + +#endif + +vec3 hash3(float n) +{ + return fract(sin(vec3(n, n + 1.0, n + 2.0)) * + vec3(43758.5453123, 12578.1459123, 19642.3490423)); +} + +vec3 shade(in vec4 res) +{ + float ra = length(res.yz); + float an = atan(res.y, res.z) + 8.0 * iTime; + float pa = sin(3.0 * an); + + vec3 cola = + 0.5 + 0.5 * sin((res.w / 64.0) * 3.5 + vec3(0.0, 1.0, 2.0)); + + vec3 col = vec3(0.0); + col += cola * 0.4 * (1.0 - smoothstep(0.90, 1.00, ra)); + col += + cola * 1.0 * (1.0 - + smoothstep(0.00, 0.03, + abs(ra - 0.8))) * (0.5 + 0.5 * pa); + col += + cola * 1.0 * (1.0 - + smoothstep(0.00, 0.20, + abs(ra - 0.8))) * (0.5 + 0.5 * pa); + col += + cola * 0.5 * (1.0 - + smoothstep(0.05, 0.10, + abs(ra - 0.5))) * (0.5 + 0.5 * pa); + col += + cola * 0.7 * (1.0 - + smoothstep(0.00, 0.30, + abs(ra - 0.5))) * (0.5 + 0.5 * pa); + + return col * 0.3; +} + +vec3 render(in vec3 ro, in vec3 rd) +{ + // raytrace + vec3 col = vec3(0.0); + for (int i = 0; i < 64; i++) { + // position disk + vec3 r = 2.5 * (-1.0 + 2.0 * hash3(float (i))); + r *= SC; + // orientate disk + vec3 u = normalize(r.zxy); + vec3 v = normalize(cross(u, vec3(0.0, 1.0, 0.0))); + + // intersect coord sys + vec3 tmp = intersectCoordSys(ro, rd, r, u, v); + tmp /= SC; + if (dot(tmp.yz, tmp.yz) < 1.0 && tmp.x > 0.0) { + // shade + col += shade(vec4(tmp, float (i))); + } + } + + return col; +} + +void mainImage(out vec4 fragColor, in vec2 fragCoord) +{ + vec2 q = fragCoord.xy / iResolution.xy; + vec2 p = -1.0 + 2.0 * q; + p.x *= iResolution.x / iResolution.y; + + // camera + vec3 ro = + 2.0 * vec3(cos(0.5 * iTime * 1.1), 0.0, + sin(0.5 * iTime * 1.1)); + vec3 ta = vec3(0.0, 0.0, 0.0); + // camera matrix + vec3 ww = normalize(ta - ro); + vec3 uu = normalize(cross(ww, vec3(0.0, 1.0, 0.0))); + vec3 vv = normalize(cross(uu, ww)); + // create view ray + vec3 rd = normalize(p.x * uu + p.y * vv + 1.0 * ww); + + vec3 col = render(ro * SC, rd); + + fragColor = vec4(col, 1.0); +} + +void mainVR(out vec4 fragColor, in vec2 fragCoord, in vec3 fragRayOri, + in vec3 fragRayDir) +{ + vec3 col = render(fragRayOri + vec3(0.0, 0.0, 0.0), fragRayDir); + + fragColor = vec4(col, 1.0); +} diff --git a/spa/plugins/vulkan/shaders/filter-color.comp b/spa/plugins/vulkan/shaders/filter-color.comp new file mode 100644 index 0000000..e08b715 --- /dev/null +++ b/spa/plugins/vulkan/shaders/filter-color.comp @@ -0,0 +1,39 @@ +void mainImage( out vec4 fragColor, in vec2 fragCoord ) +{ + vec2 p = fragCoord.xy/iResolution.xy; + + vec4 col = texture(iChannel0, p); + + + //Desaturate + if(p.x<.25) + { + col = vec4( (col.r+col.g+col.b)/3. ); + } + //Invert + else if (p.x<.5) + { + col = vec4(1.) - texture(iChannel0, p); + } + //Chromatic aberration + else if (p.x<.75) + { + vec2 offset = vec2(.01,.0); + col.r = texture(iChannel0, p+offset.xy).r; + col.g = texture(iChannel0, p ).g; + col.b = texture(iChannel0, p+offset.yx).b; + } + //Color switching + else + { + col.rgb = texture(iChannel0, p).brg; + } + + + //Line + if( mod(abs(p.x+.5/iResolution.y),.25)<0.5/iResolution.y ) + col = vec4(1.); + + + fragColor = col; +} diff --git a/spa/plugins/vulkan/shaders/filter.comp b/spa/plugins/vulkan/shaders/filter.comp new file mode 100644 index 0000000..f1ca486 --- /dev/null +++ b/spa/plugins/vulkan/shaders/filter.comp @@ -0,0 +1,44 @@ +#version 450 +#extension GL_ARB_separate_shader_objects : enable + +#define WORKGROUP_SIZE 32 +layout (local_size_x = WORKGROUP_SIZE, local_size_y = WORKGROUP_SIZE, local_size_z = 1 ) in; + +layout(rgba32f, set = 0, binding = 0) uniform image2D resultImage; +layout(set = 0, binding = 1) uniform sampler2D iChannel0; + +layout( push_constant ) uniform Constants { + float time; + int frame; + int width; + int height; +} PushConstant; + +float iTime; +int iFrame; +vec3 iResolution; +vec4 iMouse; + +void mainImage( out vec4 fragColor, in vec2 fragCoord ); + +void main() +{ + iTime = PushConstant.time; + iFrame = PushConstant.frame; + iResolution = vec3(float(PushConstant.width), float(PushConstant.height), 0.0); + iMouse = vec4(0.0, 0.0, 0.0, 0.0); + vec2 coord = vec2(float(gl_GlobalInvocationID.x), + iResolution.y - float(gl_GlobalInvocationID.y)); + vec4 outColor; + + if(coord.x >= iResolution.x || coord.y >= iResolution.y) + return; + + mainImage(outColor, coord); + + imageStore(resultImage, ivec2(gl_GlobalInvocationID.xy), outColor); +} + +//#include "smearcam.comp" +#include "filter-color.comp" +//#include "filter-ripple.comp" diff --git a/spa/plugins/vulkan/shaders/filter.spv b/spa/plugins/vulkan/shaders/filter.spv new file mode 100644 index 0000000..5bfd245 Binary files /dev/null and b/spa/plugins/vulkan/shaders/filter.spv differ diff --git a/spa/plugins/vulkan/shaders/main.comp b/spa/plugins/vulkan/shaders/main.comp new file mode 100644 index 0000000..368b7cf --- /dev/null +++ b/spa/plugins/vulkan/shaders/main.comp @@ -0,0 +1,50 @@ +#version 450 +#extension GL_ARB_separate_shader_objects : enable + +#define WORKGROUP_SIZE 32 +layout (local_size_x = WORKGROUP_SIZE, local_size_y = WORKGROUP_SIZE, local_size_z = 1 ) in; + +layout(rgba32f, binding = 0) uniform image2D resultImage; + +layout( push_constant ) uniform Constants { + float time; + int frame; + int width; + int height; +} PushConstant; + +float iTime; +int iFrame; +vec3 iResolution; +vec4 iMouse; + +void mainImage( out vec4 fragColor, in vec2 fragCoord ); + +void main() +{ + iTime = PushConstant.time; + iFrame = PushConstant.frame; + iResolution = vec3(float(PushConstant.width), float(PushConstant.height), 0.0); + iMouse = vec4(0.0, 0.0, 0.0, 0.0); + vec2 coord = vec2(float(gl_GlobalInvocationID.x), + iResolution.y - float(gl_GlobalInvocationID.y)); + vec4 outColor; + + if(coord.x >= iResolution.x || coord.y >= iResolution.y) + return; + + mainImage(outColor, coord); + + imageStore(resultImage, ivec2(gl_GlobalInvocationID.xy), outColor); +} + +//#include "plasma-globe.comp" +//#include "mandelbrot-distance.comp" +#include "disk-intersection.comp" +//#include "ring-twister.comp" +//#include "gears.comp" +//#include "protean-clouds.comp" +//#include "flame.comp" +//#include "shader.comp" +//#include "raymarching-primitives.comp" +//#include "3d-primitives.comp" diff --git a/spa/plugins/vulkan/shaders/main.spv b/spa/plugins/vulkan/shaders/main.spv new file mode 100644 index 0000000..5da1755 Binary files /dev/null and b/spa/plugins/vulkan/shaders/main.spv differ diff --git a/spa/plugins/vulkan/vulkan-compute-filter.c b/spa/plugins/vulkan/vulkan-compute-filter.c new file mode 100644 index 0000000..94efec3 --- /dev/null +++ b/spa/plugins/vulkan/vulkan-compute-filter.c @@ -0,0 +1,808 @@ +/* Spa + * + * Copyright © 2019 Wim Taymans + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vulkan-utils.h" + +#define NAME "vulkan-compute-filter" + +struct buffer { + uint32_t id; +#define BUFFER_FLAG_OUT (1<<0) + uint32_t flags; + struct spa_buffer *outbuf; + struct spa_meta_header *h; + struct spa_list link; +}; + +struct port { + uint64_t info_all; + struct spa_port_info info; + + enum spa_direction direction; + struct spa_param_info params[5]; + + struct spa_io_buffers *io; + + bool have_format; + struct spa_video_info current_format; + + struct buffer buffers[MAX_BUFFERS]; + uint32_t n_buffers; + + struct spa_list empty; + struct spa_list ready; + uint32_t stream_id; +}; + +struct impl { + struct spa_handle handle; + struct spa_node node; + + struct spa_log *log; + + struct spa_io_position *position; + + uint64_t info_all; + struct spa_node_info info; + struct spa_param_info params[2]; + + struct spa_hook_list hooks; + struct spa_callbacks callbacks; + + bool started; + + struct vulkan_state state; + struct port port[2]; +}; + +#define CHECK_PORT(this,d,p) ((p) < 1) + +static int impl_node_enum_params(void *object, int seq, + uint32_t id, uint32_t start, uint32_t num, + const struct spa_pod *filter) +{ + struct impl *this = object; + struct spa_pod *param; + struct spa_pod_builder b = { 0 }; + uint8_t buffer[1024]; + struct spa_result_node_params result; + uint32_t count = 0; + + spa_return_val_if_fail(this != NULL, -EINVAL); + spa_return_val_if_fail(num != 0, -EINVAL); + + result.id = id; + result.next = start; + next: + result.index = result.next++; + + spa_pod_builder_init(&b, buffer, sizeof(buffer)); + + switch (id) { + default: + return -ENOENT; + } + + if (spa_pod_filter(&b, &result.param, param, filter) < 0) + goto next; + + spa_node_emit_result(&this->hooks, seq, 0, SPA_RESULT_TYPE_NODE_PARAMS, &result); + + if (++count != num) + goto next; + + return 0; +} + +static int impl_node_set_io(void *object, uint32_t id, void *data, size_t size) +{ + struct impl *this = object; + + spa_return_val_if_fail(this != NULL, -EINVAL); + + switch (id) { + case SPA_IO_Position: + if (size > 0 && size < sizeof(struct spa_io_position)) + return -EINVAL; + this->position = data; + break; + default: + return -ENOENT; + } + return 0; +} +static int impl_node_set_param(void *object, uint32_t id, uint32_t flags, + const struct spa_pod *param) +{ + struct impl *this = object; + + spa_return_val_if_fail(this != NULL, -EINVAL); + + switch (id) { + default: + return -ENOENT; + } + return 0; +} + +static inline void reuse_buffer(struct impl *this, struct port *port, uint32_t id) +{ + struct buffer *b = &port->buffers[id]; + + if (SPA_FLAG_IS_SET(b->flags, BUFFER_FLAG_OUT)) { + spa_log_debug(this->log, NAME " %p: reuse buffer %d", this, id); + + SPA_FLAG_CLEAR(b->flags, BUFFER_FLAG_OUT); + spa_list_append(&port->empty, &b->link); + } +} + +static int impl_node_send_command(void *object, const struct spa_command *command) +{ + struct impl *this = object; + + spa_return_val_if_fail(this != NULL, -EINVAL); + spa_return_val_if_fail(command != NULL, -EINVAL); + + switch (SPA_NODE_COMMAND_ID(command)) { + case SPA_NODE_COMMAND_Start: + if (this->started) + return 0; + + this->started = true; + spa_vulkan_start(&this->state); + break; + + case SPA_NODE_COMMAND_Suspend: + case SPA_NODE_COMMAND_Pause: + if (!this->started) + return 0; + + this->started = false; + spa_vulkan_stop(&this->state); + break; + default: + return -ENOTSUP; + } + return 0; +} + +static const struct spa_dict_item node_info_items[] = { + { SPA_KEY_MEDIA_CLASS, "Video/Filter" }, +}; + +static void emit_node_info(struct impl *this, bool full) +{ + uint64_t old = full ? this->info.change_mask : 0; + if (full) + this->info.change_mask = this->info_all; + if (this->info.change_mask) { + this->info.props = &SPA_DICT_INIT_ARRAY(node_info_items); + spa_node_emit_info(&this->hooks, &this->info); + this->info.change_mask = old; + } +} + +static void emit_port_info(struct impl *this, struct port *port, bool full) +{ + uint64_t old = full ? port->info.change_mask : 0; + if (full) + port->info.change_mask = port->info_all; + if (port->info.change_mask) { + struct spa_dict_item items[1]; + + items[0] = SPA_DICT_ITEM_INIT(SPA_KEY_FORMAT_DSP, "32 bit float RGBA video"); + port->info.props = &SPA_DICT_INIT(items, 1); + spa_node_emit_port_info(&this->hooks, + port->direction, 0, &port->info); + port->info.change_mask = old; + } +} + +static int +impl_node_add_listener(void *object, + struct spa_hook *listener, + const struct spa_node_events *events, + void *data) +{ + struct impl *this = object; + struct spa_hook_list save; + + spa_return_val_if_fail(this != NULL, -EINVAL); + + spa_hook_list_isolate(&this->hooks, &save, listener, events, data); + + emit_node_info(this, true); + emit_port_info(this, &this->port[0], true); + emit_port_info(this, &this->port[1], true); + + spa_hook_list_join(&this->hooks, &save); + + return 0; +} + +static int +impl_node_set_callbacks(void *object, + const struct spa_node_callbacks *callbacks, + void *data) +{ + struct impl *this = object; + + spa_return_val_if_fail(this != NULL, -EINVAL); + + this->callbacks = SPA_CALLBACKS_INIT(callbacks, data); + + return 0; +} + +static int impl_node_add_port(void *object, enum spa_direction direction, uint32_t port_id, + const struct spa_dict *props) +{ + return -ENOTSUP; +} + +static int +impl_node_remove_port(void *object, enum spa_direction direction, uint32_t port_id) +{ + return -ENOTSUP; +} + +static int port_enum_formats(void *object, + enum spa_direction direction, uint32_t port_id, + uint32_t index, + const struct spa_pod *filter, + struct spa_pod **param, + struct spa_pod_builder *builder) +{ + switch (index) { + case 0: + *param = spa_pod_builder_add_object(builder, + SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat, + SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video), + SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_dsp), + SPA_FORMAT_VIDEO_format, SPA_POD_Id(SPA_VIDEO_FORMAT_DSP_F32)); + break; + default: + return 0; + } + return 1; +} + +static int +impl_node_port_enum_params(void *object, int seq, + enum spa_direction direction, uint32_t port_id, + uint32_t id, uint32_t start, uint32_t num, + const struct spa_pod *filter) +{ + struct impl *this = object; + struct port *port; + struct spa_pod_builder b = { 0 }; + uint8_t buffer[1024]; + struct spa_pod *param; + struct spa_result_node_params result; + uint32_t count = 0; + int res; + + spa_return_val_if_fail(this != NULL, -EINVAL); + spa_return_val_if_fail(num != 0, -EINVAL); + + spa_return_val_if_fail(CHECK_PORT(this, direction, port_id), -EINVAL); + port = &this->port[direction]; + + result.id = id; + result.next = start; + next: + result.index = result.next++; + + spa_pod_builder_init(&b, buffer, sizeof(buffer)); + + switch (id) { + case SPA_PARAM_EnumFormat: + if ((res = port_enum_formats(this, direction, port_id, + result.index, filter, ¶m, &b)) <= 0) + return res; + break; + + case SPA_PARAM_Format: + if (!port->have_format) + return -EIO; + if (result.index > 0) + return 0; + + param = spa_format_video_dsp_build(&b, id, &port->current_format.info.dsp); + break; + + case SPA_PARAM_Buffers: + { + if (!port->have_format) + return -EIO; + if (this->position == NULL) + return -EIO; + if (result.index > 0) + return 0; + + spa_log_debug(this->log, NAME" %p: %dx%d stride %d", this, + this->position->video.size.width, + this->position->video.size.height, + this->position->video.stride); + + param = spa_pod_builder_add_object(&b, + SPA_TYPE_OBJECT_ParamBuffers, id, + SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(2, 1, MAX_BUFFERS), + SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1), + SPA_PARAM_BUFFERS_size, SPA_POD_Int(this->position->video.stride * + this->position->video.size.height), + SPA_PARAM_BUFFERS_stride, SPA_POD_Int(this->position->video.stride)); + break; + } + case SPA_PARAM_Meta: + switch (result.index) { + case 0: + param = spa_pod_builder_add_object(&b, + SPA_TYPE_OBJECT_ParamMeta, id, + SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Header), + SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header))); + break; + + default: + return 0; + } + break; + default: + return -ENOENT; + } + + if (spa_pod_filter(&b, &result.param, param, filter) < 0) + goto next; + + spa_node_emit_result(&this->hooks, seq, 0, SPA_RESULT_TYPE_NODE_PARAMS, &result); + + if (++count != num) + goto next; + + return 0; +} + +static int clear_buffers(struct impl *this, struct port *port) +{ + if (port->n_buffers > 0) { + spa_log_debug(this->log, NAME " %p: clear buffers", this); + spa_vulkan_stop(&this->state); + spa_vulkan_use_buffers(&this->state, &this->state.streams[port->stream_id], 0, 0, NULL); + port->n_buffers = 0; + spa_list_init(&port->empty); + spa_list_init(&port->ready); + this->started = false; + } + return 0; +} + +static int port_set_format(struct impl *this, struct port *port, + uint32_t flags, + const struct spa_pod *format) +{ + int res; + + if (format == NULL) { + port->have_format = false; + clear_buffers(this, port); + spa_vulkan_unprepare(&this->state); + } else { + struct spa_video_info info = { 0 }; + + if ((res = spa_format_parse(format, &info.media_type, &info.media_subtype)) < 0) + return res; + + if (info.media_type != SPA_MEDIA_TYPE_video && + info.media_subtype != SPA_MEDIA_SUBTYPE_dsp) + return -EINVAL; + + if (spa_format_video_dsp_parse(format, &info.info.dsp) < 0) + return -EINVAL; + + if (info.info.dsp.format != SPA_VIDEO_FORMAT_DSP_F32) + return -EINVAL; + + this->state.constants.width = this->position->video.size.width; + this->state.constants.height = this->position->video.size.height; + + port->current_format = info; + port->have_format = true; + } + + port->info.change_mask |= SPA_PORT_CHANGE_MASK_PARAMS; + if (port->have_format) { + port->params[3] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_READWRITE); + port->params[4] = SPA_PARAM_INFO(SPA_PARAM_Buffers, SPA_PARAM_INFO_READ); + } else { + port->params[3] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_WRITE); + port->params[4] = SPA_PARAM_INFO(SPA_PARAM_Buffers, 0); + } + emit_port_info(this, port, false); + + return 0; +} + +static int +impl_node_port_set_param(void *object, + enum spa_direction direction, uint32_t port_id, + uint32_t id, uint32_t flags, + const struct spa_pod *param) +{ + struct impl *this = object; + struct port *port; + int res; + + spa_return_val_if_fail(this != NULL, -EINVAL); + spa_return_val_if_fail(CHECK_PORT(node, direction, port_id), -EINVAL); + port = &this->port[direction]; + + switch (id) { + case SPA_PARAM_Format: + res = port_set_format(this, port, flags, param); + break; + default: + return -ENOENT; + } + return res; +} + +static int +impl_node_port_use_buffers(void *object, + enum spa_direction direction, + uint32_t port_id, + uint32_t flags, + struct spa_buffer **buffers, + uint32_t n_buffers) +{ + struct impl *this = object; + struct port *port; + uint32_t i; + + spa_return_val_if_fail(this != NULL, -EINVAL); + spa_return_val_if_fail(CHECK_PORT(this, direction, port_id), -EINVAL); + port = &this->port[direction]; + + clear_buffers(this, port); + + if (n_buffers > 0 && !port->have_format) + return -EIO; + if (n_buffers > MAX_BUFFERS) + return -ENOSPC; + + for (i = 0; i < n_buffers; i++) { + struct buffer *b; + + b = &port->buffers[i]; + b->id = i; + b->outbuf = buffers[i]; + b->flags = 0; + b->h = spa_buffer_find_meta_data(buffers[i], SPA_META_Header, sizeof(*b->h)); + + spa_log_info(this->log, "%p: %d:%d add buffer %p", port, direction, port_id, b); + spa_list_append(&port->empty, &b->link); + } + spa_vulkan_use_buffers(&this->state, &this->state.streams[port->stream_id], flags, n_buffers, buffers); + port->n_buffers = n_buffers; + + return 0; +} + +static int +impl_node_port_set_io(void *object, + enum spa_direction direction, + uint32_t port_id, + uint32_t id, + void *data, size_t size) +{ + struct impl *this = object; + struct port *port; + + spa_return_val_if_fail(this != NULL, -EINVAL); + spa_return_val_if_fail(CHECK_PORT(this, direction, port_id), -EINVAL); + port = &this->port[direction]; + + switch (id) { + case SPA_IO_Buffers: + port->io = data; + break; + default: + return -ENOENT; + } + return 0; +} + +static int impl_node_port_reuse_buffer(void *object, uint32_t port_id, uint32_t buffer_id) +{ + struct impl *this = object; + struct port *port; + + spa_return_val_if_fail(this != NULL, -EINVAL); + spa_return_val_if_fail(port_id == 0, -EINVAL); + + port = &this->port[SPA_DIRECTION_OUTPUT]; + spa_return_val_if_fail(buffer_id < port->n_buffers, -EINVAL); + + reuse_buffer(this, port, buffer_id); + + return 0; +} + +static int impl_node_process(void *object) +{ + struct impl *this = object; + struct port *inport, *outport; + struct spa_io_buffers *inio, *outio; + struct buffer *b; + + spa_return_val_if_fail(this != NULL, -EINVAL); + + inport = &this->port[SPA_DIRECTION_INPUT]; + if ((inio = inport->io) == NULL) + return -EIO; + + if (inio->status != SPA_STATUS_HAVE_DATA) + return inio->status; + + if (inio->buffer_id >= inport->n_buffers) { + inio->status = -EINVAL; + return -EINVAL; + } + + outport = &this->port[SPA_DIRECTION_OUTPUT]; + if ((outio = outport->io) == NULL) + return -EIO; + + if (outio->status == SPA_STATUS_HAVE_DATA) + return SPA_STATUS_HAVE_DATA; + + if (outio->buffer_id < outport->n_buffers) { + reuse_buffer(this, outport, outio->buffer_id); + outio->buffer_id = SPA_ID_INVALID; + } + + if (spa_list_is_empty(&outport->empty)) { + spa_log_debug(this->log, NAME " %p: out of buffers", this); + return -EPIPE; + } + b = &inport->buffers[inio->buffer_id]; + this->state.streams[inport->stream_id].pending_buffer_id = b->id; + inio->status = SPA_STATUS_NEED_DATA; + + b = spa_list_first(&outport->empty, struct buffer, link); + spa_list_remove(&b->link); + SPA_FLAG_SET(b->flags, BUFFER_FLAG_OUT); + this->state.streams[outport->stream_id].pending_buffer_id = b->id; + + this->state.constants.time += 0.025; + this->state.constants.frame++; + + spa_log_debug(this->log, "filter into %d", b->id); + + spa_vulkan_process(&this->state); + + b->outbuf->datas[0].chunk->offset = 0; + b->outbuf->datas[0].chunk->size = b->outbuf->datas[0].maxsize; + b->outbuf->datas[0].chunk->stride = this->position->video.stride; + + outio->buffer_id = b->id; + outio->status = SPA_STATUS_HAVE_DATA; + + return SPA_STATUS_NEED_DATA | SPA_STATUS_HAVE_DATA; +} + +static const struct spa_node_methods impl_node = { + SPA_VERSION_NODE_METHODS, + .add_listener = impl_node_add_listener, + .set_callbacks = impl_node_set_callbacks, + .enum_params = impl_node_enum_params, + .set_param = impl_node_set_param, + .set_io = impl_node_set_io, + .send_command = impl_node_send_command, + .add_port = impl_node_add_port, + .remove_port = impl_node_remove_port, + .port_enum_params = impl_node_port_enum_params, + .port_set_param = impl_node_port_set_param, + .port_use_buffers = impl_node_port_use_buffers, + .port_set_io = impl_node_port_set_io, + .port_reuse_buffer = impl_node_port_reuse_buffer, + .process = impl_node_process, +}; + +static int impl_get_interface(struct spa_handle *handle, const char *type, void **interface) +{ + struct impl *this; + + spa_return_val_if_fail(handle != NULL, -EINVAL); + spa_return_val_if_fail(interface != NULL, -EINVAL); + + this = (struct impl *) handle; + + if (spa_streq(type, SPA_TYPE_INTERFACE_Node)) + *interface = &this->node; + else + return -ENOENT; + + return 0; +} + +static int impl_clear(struct spa_handle *handle) +{ + return 0; +} + +static size_t +impl_get_size(const struct spa_handle_factory *factory, + const struct spa_dict *params) +{ + return sizeof(struct impl); +} + +static int +impl_init(const struct spa_handle_factory *factory, + struct spa_handle *handle, + const struct spa_dict *info, + const struct spa_support *support, + uint32_t n_support) +{ + struct impl *this; + struct port *port; + + spa_return_val_if_fail(factory != NULL, -EINVAL); + spa_return_val_if_fail(handle != NULL, -EINVAL); + + handle->get_interface = impl_get_interface; + handle->clear = impl_clear; + + this = (struct impl *) handle; + + this->log = spa_support_find(support, n_support, SPA_TYPE_INTERFACE_Log); + this->state.log = this->log; + this->state.shaderName = "spa/plugins/vulkan/shaders/filter.spv"; + + spa_hook_list_init(&this->hooks); + + this->node.iface = SPA_INTERFACE_INIT( + SPA_TYPE_INTERFACE_Node, + SPA_VERSION_NODE, + &impl_node, this); + + this->info_all = SPA_NODE_CHANGE_MASK_FLAGS | + SPA_NODE_CHANGE_MASK_PROPS | + SPA_NODE_CHANGE_MASK_PARAMS; + this->info = SPA_NODE_INFO_INIT(); + this->info.max_output_ports = 1; + this->info.max_input_ports = 1; + this->info.flags = SPA_NODE_FLAG_RT; + this->params[0] = SPA_PARAM_INFO(SPA_PARAM_PropInfo, SPA_PARAM_INFO_READ); + this->params[1] = SPA_PARAM_INFO(SPA_PARAM_Props, SPA_PARAM_INFO_READWRITE); + this->info.params = this->params; + this->info.n_params = 2; + + port = &this->port[0]; + port->stream_id = 1; + port->direction = SPA_DIRECTION_INPUT; + port->info_all = SPA_PORT_CHANGE_MASK_FLAGS | + SPA_PORT_CHANGE_MASK_PARAMS | + SPA_PORT_CHANGE_MASK_PROPS; + port->info = SPA_PORT_INFO_INIT(); + port->info.flags = SPA_PORT_FLAG_NO_REF | SPA_PORT_FLAG_CAN_ALLOC_BUFFERS; + port->params[0] = SPA_PARAM_INFO(SPA_PARAM_EnumFormat, SPA_PARAM_INFO_READ); + port->params[1] = SPA_PARAM_INFO(SPA_PARAM_Meta, SPA_PARAM_INFO_READ); + port->params[2] = SPA_PARAM_INFO(SPA_PARAM_IO, SPA_PARAM_INFO_READ); + port->params[3] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_WRITE); + port->params[4] = SPA_PARAM_INFO(SPA_PARAM_Buffers, 0); + port->info.params = port->params; + port->info.n_params = 5; + spa_vulkan_init_stream(&this->state, &this->state.streams[port->stream_id], + SPA_DIRECTION_INPUT, NULL); + spa_list_init(&port->empty); + spa_list_init(&port->ready); + + port = &this->port[1]; + port->stream_id = 0; + port->direction = SPA_DIRECTION_OUTPUT; + port->info_all = SPA_PORT_CHANGE_MASK_FLAGS | + SPA_PORT_CHANGE_MASK_PARAMS | + SPA_PORT_CHANGE_MASK_PROPS; + port->info = SPA_PORT_INFO_INIT(); + port->info.flags = SPA_PORT_FLAG_NO_REF | SPA_PORT_FLAG_CAN_ALLOC_BUFFERS; + port->params[0] = SPA_PARAM_INFO(SPA_PARAM_EnumFormat, SPA_PARAM_INFO_READ); + port->params[1] = SPA_PARAM_INFO(SPA_PARAM_Meta, SPA_PARAM_INFO_READ); + port->params[2] = SPA_PARAM_INFO(SPA_PARAM_IO, SPA_PARAM_INFO_READ); + port->params[3] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_WRITE); + port->params[4] = SPA_PARAM_INFO(SPA_PARAM_Buffers, 0); + port->info.params = port->params; + port->info.n_params = 5; + spa_list_init(&port->empty); + spa_list_init(&port->ready); + spa_vulkan_init_stream(&this->state, &this->state.streams[port->stream_id], + SPA_DIRECTION_OUTPUT, NULL); + + this->state.n_streams = 2; + spa_vulkan_prepare(&this->state); + + return 0; +} + +static const struct spa_interface_info impl_interfaces[] = { + {SPA_TYPE_INTERFACE_Node,}, +}; + +static int +impl_enum_interface_info(const struct spa_handle_factory *factory, + const struct spa_interface_info **info, + uint32_t *index) +{ + spa_return_val_if_fail(factory != NULL, -EINVAL); + spa_return_val_if_fail(info != NULL, -EINVAL); + spa_return_val_if_fail(index != NULL, -EINVAL); + + switch (*index) { + case 0: + *info = &impl_interfaces[*index]; + break; + default: + return 0; + } + (*index)++; + return 1; +} + +static const struct spa_dict_item info_items[] = { + { SPA_KEY_FACTORY_AUTHOR, "Wim Taymans " }, + { SPA_KEY_FACTORY_DESCRIPTION, "Filter video frames using a vulkan compute shader" }, +}; + +static const struct spa_dict info = SPA_DICT_INIT_ARRAY(info_items); + +const struct spa_handle_factory spa_vulkan_compute_filter_factory = { + SPA_VERSION_HANDLE_FACTORY, + SPA_NAME_API_VULKAN_COMPUTE_FILTER, + &info, + impl_get_size, + impl_init, + impl_enum_interface_info, +}; diff --git a/spa/plugins/vulkan/vulkan-compute-source.c b/spa/plugins/vulkan/vulkan-compute-source.c new file mode 100644 index 0000000..4602e5d --- /dev/null +++ b/spa/plugins/vulkan/vulkan-compute-source.c @@ -0,0 +1,1016 @@ +/* Spa + * + * Copyright © 2019 Wim Taymans + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vulkan-utils.h" + +#define NAME "vulkan-compute-source" + +#define FRAMES_TO_TIME(this,f) ((this->position->video.framerate.denom * (f) * SPA_NSEC_PER_SEC) / \ + (this->position->video.framerate.num)) + +#define DEFAULT_LIVE true + +struct props { + bool live; +}; + +static void reset_props(struct props *props) +{ + props->live = DEFAULT_LIVE; +} + +struct buffer { + uint32_t id; +#define BUFFER_FLAG_OUT (1<<0) + uint32_t flags; + struct spa_buffer *outbuf; + struct spa_meta_header *h; + struct spa_list link; +}; + +struct port { + uint64_t info_all; + struct spa_port_info info; + struct spa_param_info params[5]; + + struct spa_io_buffers *io; + + bool have_format; + struct spa_video_info current_format; + + struct buffer buffers[MAX_BUFFERS]; + uint32_t n_buffers; + + struct spa_list empty; + struct spa_list ready; +}; + +struct impl { + struct spa_handle handle; + struct spa_node node; + + struct spa_log *log; + struct spa_loop *data_loop; + struct spa_system *data_system; + + struct spa_io_clock *clock; + struct spa_io_position *position; + + uint64_t info_all; + struct spa_node_info info; + struct spa_param_info params[2]; + struct props props; + + struct spa_hook_list hooks; + struct spa_callbacks callbacks; + + bool async; + struct spa_source timer_source; + struct itimerspec timerspec; + + bool started; + uint64_t start_time; + uint64_t elapsed_time; + + uint64_t frame_count; + + struct vulkan_state state; + struct port port; +}; + +#define CHECK_PORT(this,d,p) ((d) == SPA_DIRECTION_OUTPUT && (p) < 1) + +static int impl_node_enum_params(void *object, int seq, + uint32_t id, uint32_t start, uint32_t num, + const struct spa_pod *filter) +{ + struct impl *this = object; + struct spa_pod *param; + struct spa_pod_builder b = { 0 }; + uint8_t buffer[1024]; + struct spa_result_node_params result; + uint32_t count = 0; + + spa_return_val_if_fail(this != NULL, -EINVAL); + spa_return_val_if_fail(num != 0, -EINVAL); + + result.id = id; + result.next = start; + next: + result.index = result.next++; + + spa_pod_builder_init(&b, buffer, sizeof(buffer)); + + switch (id) { + case SPA_PARAM_PropInfo: + { + struct props *p = &this->props; + + switch (result.index) { + case 0: + param = spa_pod_builder_add_object(&b, + SPA_TYPE_OBJECT_PropInfo, id, + SPA_PROP_INFO_id, SPA_POD_Id(SPA_PROP_live), + SPA_PROP_INFO_description, SPA_POD_String("Configure live mode of the source"), + SPA_PROP_INFO_type, SPA_POD_Bool(p->live)); + break; + default: + return 0; + } + break; + } + case SPA_PARAM_Props: + { + struct props *p = &this->props; + + switch (result.index) { + case 0: + param = spa_pod_builder_add_object(&b, + SPA_TYPE_OBJECT_Props, id, + SPA_PROP_live, SPA_POD_Bool(p->live)); + break; + default: + return 0; + } + break; + } + default: + return -ENOENT; + } + + if (spa_pod_filter(&b, &result.param, param, filter) < 0) + goto next; + + spa_node_emit_result(&this->hooks, seq, 0, SPA_RESULT_TYPE_NODE_PARAMS, &result); + + if (++count != num) + goto next; + + return 0; +} + +static int impl_node_set_io(void *object, uint32_t id, void *data, size_t size) +{ + struct impl *this = object; + + spa_return_val_if_fail(this != NULL, -EINVAL); + + switch (id) { + case SPA_IO_Clock: + if (size > 0 && size < sizeof(struct spa_io_clock)) + return -EINVAL; + this->clock = data; + break; + case SPA_IO_Position: + this->position = data; + break; + default: + return -ENOENT; + } + return 0; +} + +static int impl_node_set_param(void *object, uint32_t id, uint32_t flags, + const struct spa_pod *param) +{ + struct impl *this = object; + + spa_return_val_if_fail(this != NULL, -EINVAL); + + switch (id) { + case SPA_PARAM_Props: + { + struct props *p = &this->props; + struct port *port = &this->port; + + if (param == NULL) { + reset_props(p); + return 0; + } + spa_pod_parse_object(param, + SPA_TYPE_OBJECT_Props, NULL, + SPA_PROP_live, SPA_POD_OPT_Bool(&p->live)); + + if (p->live) + port->info.flags |= SPA_PORT_FLAG_LIVE; + else + port->info.flags &= ~SPA_PORT_FLAG_LIVE; + break; + } + default: + return -ENOENT; + } + return 0; +} + + +static void set_timer(struct impl *this, bool enabled) +{ + if (this->async || this->props.live) { + if (enabled) { + if (this->props.live) { + uint64_t next_time = this->start_time + this->elapsed_time; + this->timerspec.it_value.tv_sec = next_time / SPA_NSEC_PER_SEC; + this->timerspec.it_value.tv_nsec = next_time % SPA_NSEC_PER_SEC; + } else { + this->timerspec.it_value.tv_sec = 0; + this->timerspec.it_value.tv_nsec = 1; + } + } else { + this->timerspec.it_value.tv_sec = 0; + this->timerspec.it_value.tv_nsec = 0; + } + spa_system_timerfd_settime(this->data_system, + this->timer_source.fd, SPA_FD_TIMER_ABSTIME, &this->timerspec, NULL); + } +} + +static int read_timer(struct impl *this) +{ + uint64_t expirations; + int res = 0; + + if (this->async || this->props.live) { + if ((res = spa_system_timerfd_read(this->data_system, + this->timer_source.fd, &expirations)) < 0) { + if (res != -EAGAIN) + spa_log_error(this->log, NAME " %p: timerfd error: %s", + this, spa_strerror(res)); + } + } + return res; +} + +static int make_buffer(struct impl *this) +{ + struct buffer *b; + struct port *port = &this->port; + uint32_t n_bytes; + int res; + + if (read_timer(this) < 0) + return 0; + + if ((res = spa_vulkan_ready(&this->state)) < 0) { + res = SPA_STATUS_OK; + goto next; + } + + if (spa_list_is_empty(&port->empty)) { + set_timer(this, false); + spa_log_error(this->log, NAME " %p: out of buffers", this); + return -EPIPE; + } + b = spa_list_first(&port->empty, struct buffer, link); + spa_list_remove(&b->link); + + n_bytes = b->outbuf->datas[0].maxsize; + + spa_log_trace(this->log, NAME " %p: dequeue buffer %d", this, b->id); + + this->state.constants.time = this->elapsed_time / (float) SPA_NSEC_PER_SEC; + this->state.constants.frame = this->frame_count; + + this->state.streams[0].pending_buffer_id = b->id; + spa_vulkan_process(&this->state); + + if (this->state.streams[0].ready_buffer_id != SPA_ID_INVALID) { + struct buffer *b = &port->buffers[this->state.streams[0].ready_buffer_id]; + + this->state.streams[0].ready_buffer_id = SPA_ID_INVALID; + + spa_log_trace(this->log, NAME " %p: ready buffer %d", this, b->id); + + b->outbuf->datas[0].chunk->offset = 0; + b->outbuf->datas[0].chunk->size = n_bytes; + b->outbuf->datas[0].chunk->stride = this->position->video.stride; + + if (b->h) { + b->h->seq = this->frame_count; + b->h->pts = this->start_time + this->elapsed_time; + b->h->dts_offset = 0; + } + + spa_list_append(&port->ready, &b->link); + + res = SPA_STATUS_HAVE_DATA; + } +next: + this->frame_count++; + this->elapsed_time = FRAMES_TO_TIME(this, this->frame_count); + set_timer(this, true); + + return res; +} + +static inline void reuse_buffer(struct impl *this, struct port *port, uint32_t id) +{ + struct buffer *b = &port->buffers[id]; + + if (SPA_FLAG_IS_SET(b->flags, BUFFER_FLAG_OUT)) { + spa_log_trace(this->log, NAME " %p: reuse buffer %d", this, id); + + SPA_FLAG_CLEAR(b->flags, BUFFER_FLAG_OUT); + spa_list_append(&port->empty, &b->link); + + if (!this->props.live) + set_timer(this, true); + } +} + +static void on_output(struct spa_source *source) +{ + struct impl *this = source->data; + struct port *port = &this->port; + struct spa_io_buffers *io = port->io; + int res; + + if (io == NULL) + return; + + if (io->status == SPA_STATUS_HAVE_DATA) + return; + + if (io->buffer_id < port->n_buffers) { + reuse_buffer(this, port, io->buffer_id); + io->buffer_id = SPA_ID_INVALID; + } + + res = make_buffer(this); + + if (!spa_list_is_empty(&port->ready)) { + struct buffer *b = spa_list_first(&port->ready, struct buffer, link); + spa_list_remove(&b->link); + SPA_FLAG_SET(b->flags, BUFFER_FLAG_OUT); + + io->buffer_id = b->id; + io->status = SPA_STATUS_HAVE_DATA; + } + spa_node_call_ready(&this->callbacks, res); +} + +static int impl_node_send_command(void *object, const struct spa_command *command) +{ + struct impl *this = object; + struct port *port; + + spa_return_val_if_fail(this != NULL, -EINVAL); + spa_return_val_if_fail(command != NULL, -EINVAL); + + port = &this->port; + + switch (SPA_NODE_COMMAND_ID(command)) { + case SPA_NODE_COMMAND_Start: + { + struct timespec now; + + if (!port->have_format) + return -EIO; + if (port->n_buffers == 0) + return -EIO; + + if (this->started) + return 0; + + clock_gettime(CLOCK_MONOTONIC, &now); + if (this->props.live) + this->start_time = SPA_TIMESPEC_TO_NSEC(&now); + else + this->start_time = 0; + this->frame_count = 0; + this->elapsed_time = 0; + + this->started = true; + set_timer(this, true); + spa_vulkan_start(&this->state); + break; + } + case SPA_NODE_COMMAND_Suspend: + case SPA_NODE_COMMAND_Pause: + if (!this->started) + return 0; + + this->started = false; + set_timer(this, false); + spa_vulkan_stop(&this->state); + break; + default: + return -ENOTSUP; + } + return 0; +} + +static const struct spa_dict_item node_info_items[] = { + { SPA_KEY_MEDIA_CLASS, "Video/Source" }, + { SPA_KEY_NODE_DRIVER, "true" }, +}; + +static void emit_node_info(struct impl *this, bool full) +{ + uint64_t old = full ? this->info.change_mask : 0; + if (full) + this->info.change_mask = this->info_all; + if (this->info.change_mask) { + this->info.props = &SPA_DICT_INIT_ARRAY(node_info_items); + spa_node_emit_info(&this->hooks, &this->info); + this->info.change_mask = old; + } +} + +static void emit_port_info(struct impl *this, struct port *port, bool full) +{ + uint64_t old = full ? port->info.change_mask : 0; + if (full) + port->info.change_mask = port->info_all; + if (port->info.change_mask) { + struct spa_dict_item items[1]; + + items[0] = SPA_DICT_ITEM_INIT(SPA_KEY_FORMAT_DSP, "32 bit float RGBA video"); + port->info.props = &SPA_DICT_INIT(items, 1); + spa_node_emit_port_info(&this->hooks, + SPA_DIRECTION_OUTPUT, 0, &port->info); + port->info.change_mask = old; + } +} + +static int +impl_node_add_listener(void *object, + struct spa_hook *listener, + const struct spa_node_events *events, + void *data) +{ + struct impl *this = object; + struct spa_hook_list save; + + spa_return_val_if_fail(this != NULL, -EINVAL); + + spa_hook_list_isolate(&this->hooks, &save, listener, events, data); + + emit_node_info(this, true); + emit_port_info(this, &this->port, true); + + spa_hook_list_join(&this->hooks, &save); + + return 0; +} + +static int +impl_node_set_callbacks(void *object, + const struct spa_node_callbacks *callbacks, + void *data) +{ + struct impl *this = object; + + spa_return_val_if_fail(this != NULL, -EINVAL); + + this->callbacks = SPA_CALLBACKS_INIT(callbacks, data); + + return 0; +} + +static int impl_node_add_port(void *object, enum spa_direction direction, uint32_t port_id, + const struct spa_dict *props) +{ + return -ENOTSUP; +} + +static int +impl_node_remove_port(void *object, enum spa_direction direction, uint32_t port_id) +{ + return -ENOTSUP; +} + +static int port_enum_formats(void *object, + enum spa_direction direction, uint32_t port_id, + uint32_t index, + const struct spa_pod *filter, + struct spa_pod **param, + struct spa_pod_builder *builder) +{ + switch (index) { + case 0: + *param = spa_pod_builder_add_object(builder, + SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat, + SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video), + SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_dsp), + SPA_FORMAT_VIDEO_format, SPA_POD_Id(SPA_VIDEO_FORMAT_DSP_F32)); + break; + default: + return 0; + } + return 1; +} + +static int +impl_node_port_enum_params(void *object, int seq, + enum spa_direction direction, uint32_t port_id, + uint32_t id, uint32_t start, uint32_t num, + const struct spa_pod *filter) +{ + struct impl *this = object; + struct port *port; + struct spa_pod_builder b = { 0 }; + uint8_t buffer[1024]; + struct spa_pod *param; + struct spa_result_node_params result; + uint32_t count = 0; + int res; + + spa_return_val_if_fail(this != NULL, -EINVAL); + spa_return_val_if_fail(num != 0, -EINVAL); + + spa_return_val_if_fail(CHECK_PORT(this, direction, port_id), -EINVAL); + port = &this->port; + + result.id = id; + result.next = start; + next: + result.index = result.next++; + + spa_pod_builder_init(&b, buffer, sizeof(buffer)); + + switch (id) { + case SPA_PARAM_EnumFormat: + if ((res = port_enum_formats(this, direction, port_id, + result.index, filter, ¶m, &b)) <= 0) + return res; + break; + + case SPA_PARAM_Format: + if (!port->have_format) + return -EIO; + if (result.index > 0) + return 0; + + param = spa_format_video_dsp_build(&b, id, &port->current_format.info.dsp); + break; + + case SPA_PARAM_Buffers: + { + if (!port->have_format) + return -EIO; + if (this->position == NULL) + return -EIO; + if (result.index > 0) + return 0; + + spa_log_debug(this->log, NAME" %p: %dx%d stride %d", this, + this->position->video.size.width, + this->position->video.size.height, + this->position->video.stride); + + param = spa_pod_builder_add_object(&b, + SPA_TYPE_OBJECT_ParamBuffers, id, + SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(2, 1, MAX_BUFFERS), + SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1), + SPA_PARAM_BUFFERS_size, SPA_POD_Int(this->position->video.stride * + this->position->video.size.height), + SPA_PARAM_BUFFERS_stride, SPA_POD_Int(this->position->video.stride)); + break; + } + case SPA_PARAM_Meta: + switch (result.index) { + case 0: + param = spa_pod_builder_add_object(&b, + SPA_TYPE_OBJECT_ParamMeta, id, + SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Header), + SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header))); + break; + + default: + return 0; + } + break; + default: + return -ENOENT; + } + + if (spa_pod_filter(&b, &result.param, param, filter) < 0) + goto next; + + spa_node_emit_result(&this->hooks, seq, 0, SPA_RESULT_TYPE_NODE_PARAMS, &result); + + if (++count != num) + goto next; + + return 0; +} + +static int clear_buffers(struct impl *this, struct port *port) +{ + if (port->n_buffers > 0) { + spa_log_debug(this->log, NAME " %p: clear buffers", this); + spa_vulkan_use_buffers(&this->state, &this->state.streams[0], 0, 0, NULL); + port->n_buffers = 0; + spa_list_init(&port->empty); + spa_list_init(&port->ready); + this->started = false; + set_timer(this, false); + } + return 0; +} + +static int port_set_format(struct impl *this, struct port *port, + uint32_t flags, + const struct spa_pod *format) +{ + int res; + + if (format == NULL) { + port->have_format = false; + clear_buffers(this, port); + spa_vulkan_unprepare(&this->state); + } else { + struct spa_video_info info = { 0 }; + + if ((res = spa_format_parse(format, &info.media_type, &info.media_subtype)) < 0) + return res; + + if (info.media_type != SPA_MEDIA_TYPE_video && + info.media_subtype != SPA_MEDIA_SUBTYPE_dsp) + return -EINVAL; + + if (spa_format_video_dsp_parse(format, &info.info.dsp) < 0) + return -EINVAL; + + if (info.info.dsp.format != SPA_VIDEO_FORMAT_DSP_F32) + return -EINVAL; + + this->state.constants.width = this->position->video.size.width; + this->state.constants.height = this->position->video.size.height; + + port->current_format = info; + port->have_format = true; + spa_vulkan_prepare(&this->state); + } + + port->info.change_mask |= SPA_PORT_CHANGE_MASK_PARAMS; + if (port->have_format) { + port->params[3] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_READWRITE); + port->params[4] = SPA_PARAM_INFO(SPA_PARAM_Buffers, SPA_PARAM_INFO_READ); + } else { + port->params[3] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_WRITE); + port->params[4] = SPA_PARAM_INFO(SPA_PARAM_Buffers, 0); + } + emit_port_info(this, port, false); + + return 0; +} + +static int +impl_node_port_set_param(void *object, + enum spa_direction direction, uint32_t port_id, + uint32_t id, uint32_t flags, + const struct spa_pod *param) +{ + struct impl *this = object; + struct port *port; + int res; + + spa_return_val_if_fail(this != NULL, -EINVAL); + spa_return_val_if_fail(CHECK_PORT(node, direction, port_id), -EINVAL); + port = &this->port; + + switch (id) { + case SPA_PARAM_Format: + res = port_set_format(this, port, flags, param); + break; + default: + return -ENOENT; + } + return res; +} + +static int +impl_node_port_use_buffers(void *object, + enum spa_direction direction, + uint32_t port_id, + uint32_t flags, + struct spa_buffer **buffers, + uint32_t n_buffers) +{ + struct impl *this = object; + struct port *port; + uint32_t i; + + spa_return_val_if_fail(this != NULL, -EINVAL); + spa_return_val_if_fail(CHECK_PORT(this, direction, port_id), -EINVAL); + port = &this->port; + + clear_buffers(this, port); + + if (n_buffers > 0 && !port->have_format) + return -EIO; + if (n_buffers > MAX_BUFFERS) + return -ENOSPC; + + for (i = 0; i < n_buffers; i++) { + struct buffer *b; + + b = &port->buffers[i]; + b->id = i; + b->outbuf = buffers[i]; + b->flags = 0; + b->h = spa_buffer_find_meta_data(buffers[i], SPA_META_Header, sizeof(*b->h)); + + spa_list_append(&port->empty, &b->link); + } + spa_vulkan_use_buffers(&this->state, &this->state.streams[0], flags, n_buffers, buffers); + port->n_buffers = n_buffers; + + return 0; +} + +static int +impl_node_port_set_io(void *object, + enum spa_direction direction, + uint32_t port_id, + uint32_t id, + void *data, size_t size) +{ + struct impl *this = object; + struct port *port; + + spa_return_val_if_fail(this != NULL, -EINVAL); + spa_return_val_if_fail(CHECK_PORT(this, direction, port_id), -EINVAL); + port = &this->port; + + switch (id) { + case SPA_IO_Buffers: + port->io = data; + break; + default: + return -ENOENT; + } + return 0; +} + +static int impl_node_port_reuse_buffer(void *object, uint32_t port_id, uint32_t buffer_id) +{ + struct impl *this = object; + struct port *port; + + spa_return_val_if_fail(this != NULL, -EINVAL); + spa_return_val_if_fail(port_id == 0, -EINVAL); + port = &this->port; + spa_return_val_if_fail(buffer_id < port->n_buffers, -EINVAL); + + reuse_buffer(this, port, buffer_id); + + return 0; +} + +static int impl_node_process(void *object) +{ + struct impl *this = object; + struct port *port; + struct spa_io_buffers *io; + + spa_return_val_if_fail(this != NULL, -EINVAL); + + port = &this->port; + if ((io = port->io) == NULL) + return -EIO; + + if (io->status == SPA_STATUS_HAVE_DATA) + return SPA_STATUS_HAVE_DATA; + + if (io->buffer_id < port->n_buffers) { + reuse_buffer(this, port, io->buffer_id); + io->buffer_id = SPA_ID_INVALID; + } + + if (!this->props.live) + return make_buffer(this); + else + return SPA_STATUS_OK; +} + +static const struct spa_node_methods impl_node = { + SPA_VERSION_NODE_METHODS, + .add_listener = impl_node_add_listener, + .set_callbacks = impl_node_set_callbacks, + .enum_params = impl_node_enum_params, + .set_param = impl_node_set_param, + .set_io = impl_node_set_io, + .send_command = impl_node_send_command, + .add_port = impl_node_add_port, + .remove_port = impl_node_remove_port, + .port_enum_params = impl_node_port_enum_params, + .port_set_param = impl_node_port_set_param, + .port_use_buffers = impl_node_port_use_buffers, + .port_set_io = impl_node_port_set_io, + .port_reuse_buffer = impl_node_port_reuse_buffer, + .process = impl_node_process, +}; + +static int impl_get_interface(struct spa_handle *handle, const char *type, void **interface) +{ + struct impl *this; + + spa_return_val_if_fail(handle != NULL, -EINVAL); + spa_return_val_if_fail(interface != NULL, -EINVAL); + + this = (struct impl *) handle; + + if (spa_streq(type, SPA_TYPE_INTERFACE_Node)) + *interface = &this->node; + else + return -ENOENT; + + return 0; +} + +static int do_remove_timer(struct spa_loop *loop, bool async, uint32_t seq, const void *data, size_t size, void *user_data) +{ + struct impl *this = user_data; + spa_loop_remove_source(this->data_loop, &this->timer_source); + return 0; +} + +static int impl_clear(struct spa_handle *handle) +{ + struct impl *this; + + spa_return_val_if_fail(handle != NULL, -EINVAL); + + this = (struct impl *) handle; + + if (this->data_loop) + spa_loop_invoke(this->data_loop, do_remove_timer, 0, NULL, 0, true, this); + spa_system_close(this->data_system, this->timer_source.fd); + + return 0; +} + +static size_t +impl_get_size(const struct spa_handle_factory *factory, + const struct spa_dict *params) +{ + return sizeof(struct impl); +} + +static int +impl_init(const struct spa_handle_factory *factory, + struct spa_handle *handle, + const struct spa_dict *info, + const struct spa_support *support, + uint32_t n_support) +{ + struct impl *this; + struct port *port; + + spa_return_val_if_fail(factory != NULL, -EINVAL); + spa_return_val_if_fail(handle != NULL, -EINVAL); + + handle->get_interface = impl_get_interface; + handle->clear = impl_clear; + + this = (struct impl *) handle; + + this->log = spa_support_find(support, n_support, SPA_TYPE_INTERFACE_Log); + this->data_loop = spa_support_find(support, n_support, SPA_TYPE_INTERFACE_DataLoop); + this->data_system = spa_support_find(support, n_support, SPA_TYPE_INTERFACE_DataSystem); + + spa_hook_list_init(&this->hooks); + + this->node.iface = SPA_INTERFACE_INIT( + SPA_TYPE_INTERFACE_Node, + SPA_VERSION_NODE, + &impl_node, this); + + this->info_all = SPA_NODE_CHANGE_MASK_FLAGS | + SPA_NODE_CHANGE_MASK_PROPS | + SPA_NODE_CHANGE_MASK_PARAMS; + this->info = SPA_NODE_INFO_INIT(); + this->info.max_output_ports = 1; + this->info.flags = SPA_NODE_FLAG_RT; + this->params[0] = SPA_PARAM_INFO(SPA_PARAM_PropInfo, SPA_PARAM_INFO_READ); + this->params[1] = SPA_PARAM_INFO(SPA_PARAM_Props, SPA_PARAM_INFO_READWRITE); + this->info.params = this->params; + this->info.n_params = 2; + reset_props(&this->props); + + this->timer_source.func = on_output; + this->timer_source.data = this; + this->timer_source.fd = spa_system_timerfd_create(this->data_system, CLOCK_MONOTONIC, + SPA_FD_CLOEXEC | SPA_FD_NONBLOCK); + this->timer_source.mask = SPA_IO_IN; + this->timer_source.rmask = 0; + this->timerspec.it_value.tv_sec = 0; + this->timerspec.it_value.tv_nsec = 0; + this->timerspec.it_interval.tv_sec = 0; + this->timerspec.it_interval.tv_nsec = 0; + + if (this->data_loop) + spa_loop_add_source(this->data_loop, &this->timer_source); + + port = &this->port; + port->info_all = SPA_PORT_CHANGE_MASK_FLAGS | + SPA_PORT_CHANGE_MASK_PARAMS | + SPA_PORT_CHANGE_MASK_PROPS; + port->info = SPA_PORT_INFO_INIT(); + port->info.flags = SPA_PORT_FLAG_NO_REF | SPA_PORT_FLAG_CAN_ALLOC_BUFFERS; + if (this->props.live) + port->info.flags |= SPA_PORT_FLAG_LIVE; + port->params[0] = SPA_PARAM_INFO(SPA_PARAM_EnumFormat, SPA_PARAM_INFO_READ); + port->params[1] = SPA_PARAM_INFO(SPA_PARAM_Meta, SPA_PARAM_INFO_READ); + port->params[2] = SPA_PARAM_INFO(SPA_PARAM_IO, SPA_PARAM_INFO_READ); + port->params[3] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_WRITE); + port->params[4] = SPA_PARAM_INFO(SPA_PARAM_Buffers, 0); + port->info.params = port->params; + port->info.n_params = 5; + spa_list_init(&port->empty); + spa_list_init(&port->ready); + + this->state.log = this->log; + spa_vulkan_init_stream(&this->state, &this->state.streams[0], + SPA_DIRECTION_OUTPUT, NULL); + this->state.shaderName = "spa/plugins/vulkan/shaders/main.spv"; + this->state.n_streams = 1; + + return 0; +} + +static const struct spa_interface_info impl_interfaces[] = { + {SPA_TYPE_INTERFACE_Node,}, +}; + +static int +impl_enum_interface_info(const struct spa_handle_factory *factory, + const struct spa_interface_info **info, + uint32_t *index) +{ + spa_return_val_if_fail(factory != NULL, -EINVAL); + spa_return_val_if_fail(info != NULL, -EINVAL); + spa_return_val_if_fail(index != NULL, -EINVAL); + + switch (*index) { + case 0: + *info = &impl_interfaces[*index]; + break; + default: + return 0; + } + (*index)++; + return 1; +} + +static const struct spa_dict_item info_items[] = { + { SPA_KEY_FACTORY_AUTHOR, "Wim Taymans " }, + { SPA_KEY_FACTORY_DESCRIPTION, "Generate video frames using a vulkan compute shader" }, +}; + +static const struct spa_dict info = SPA_DICT_INIT_ARRAY(info_items); + +const struct spa_handle_factory spa_vulkan_compute_source_factory = { + SPA_VERSION_HANDLE_FACTORY, + SPA_NAME_API_VULKAN_COMPUTE_SOURCE, + &info, + impl_get_size, + impl_init, + impl_enum_interface_info, +}; diff --git a/spa/plugins/vulkan/vulkan-utils.c b/spa/plugins/vulkan/vulkan-utils.c new file mode 100644 index 0000000..ae3337b --- /dev/null +++ b/spa/plugins/vulkan/vulkan-utils.c @@ -0,0 +1,758 @@ +#include + +#include +#include +#include +#include +#include +#include +#if !defined(__FreeBSD__) && !defined(__MidnightBSD__) +#include +#endif +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "vulkan-utils.h" + +//#define ENABLE_VALIDATION + +#define VULKAN_INSTANCE_FUNCTION(name) \ + PFN_##name name = (PFN_##name)vkGetInstanceProcAddr(s->instance, #name) + +static int vkresult_to_errno(VkResult result) +{ + switch (result) { + case VK_SUCCESS: + case VK_EVENT_SET: + case VK_EVENT_RESET: + return 0; + case VK_NOT_READY: + case VK_INCOMPLETE: + case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR: + return EBUSY; + case VK_TIMEOUT: + return ETIMEDOUT; + case VK_ERROR_OUT_OF_HOST_MEMORY: + case VK_ERROR_OUT_OF_DEVICE_MEMORY: + case VK_ERROR_MEMORY_MAP_FAILED: + case VK_ERROR_OUT_OF_POOL_MEMORY: + case VK_ERROR_FRAGMENTED_POOL: +#ifdef VK_ERROR_FRAGMENTATION_EXT + case VK_ERROR_FRAGMENTATION_EXT: +#endif + return ENOMEM; + case VK_ERROR_INITIALIZATION_FAILED: + return EIO; + case VK_ERROR_DEVICE_LOST: + case VK_ERROR_SURFACE_LOST_KHR: +#ifdef VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT + case VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT: +#endif + return ENODEV; + case VK_ERROR_LAYER_NOT_PRESENT: + case VK_ERROR_EXTENSION_NOT_PRESENT: + case VK_ERROR_FEATURE_NOT_PRESENT: + return ENOENT; + case VK_ERROR_INCOMPATIBLE_DRIVER: + case VK_ERROR_FORMAT_NOT_SUPPORTED: + case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR: + return ENOTSUP; + case VK_ERROR_TOO_MANY_OBJECTS: + return ENFILE; + case VK_SUBOPTIMAL_KHR: + case VK_ERROR_OUT_OF_DATE_KHR: + return EIO; + case VK_ERROR_INVALID_EXTERNAL_HANDLE: + case VK_ERROR_INVALID_SHADER_NV: +#ifdef VK_ERROR_VALIDATION_FAILED_EXT + case VK_ERROR_VALIDATION_FAILED_EXT: +#endif +#ifdef VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT + case VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT: +#endif +#ifdef VK_ERROR_INVALID_DEVICE_ADDRESS_EXT + case VK_ERROR_INVALID_DEVICE_ADDRESS_EXT: +#endif + return EINVAL; +#ifdef VK_ERROR_NOT_PERMITTED_EXT + case VK_ERROR_NOT_PERMITTED_EXT: + return EPERM; +#endif + default: + return EIO; + } +} + +#define VK_CHECK_RESULT(f) \ +{ \ + VkResult _result = (f); \ + int _r = -vkresult_to_errno(_result); \ + if (_result != VK_SUCCESS) { \ + spa_log_error(s->log, "error: %d (%d %s)", _result, _r, spa_strerror(_r)); \ + return _r; \ + } \ +} +#define CHECK(f) \ +{ \ + int _res = (f); \ + if (_res < 0) \ + return _res; \ +} + +static int createInstance(struct vulkan_state *s) +{ + static const VkApplicationInfo applicationInfo = { + .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, + .pApplicationName = "PipeWire", + .applicationVersion = 0, + .pEngineName = "PipeWire Vulkan Engine", + .engineVersion = 0, + .apiVersion = VK_API_VERSION_1_1 + }; + static const char * const extensions[] = { + VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME + }; + static const char * const checkLayers[] = { +#ifdef ENABLE_VALIDATION + "VK_LAYER_KHRONOS_validation", +#endif + NULL + }; + uint32_t i, j, layerCount, n_layers = 0; + const char *layers[1]; + vkEnumerateInstanceLayerProperties(&layerCount, NULL); + + VkLayerProperties availableLayers[layerCount]; + vkEnumerateInstanceLayerProperties(&layerCount, availableLayers); + + for (i = 0; i < layerCount; i++) { + for (j = 0; j < SPA_N_ELEMENTS(checkLayers); j++) { + if (spa_streq(availableLayers[i].layerName, checkLayers[j])) + layers[n_layers++] = checkLayers[j]; + } + } + + const VkInstanceCreateInfo createInfo = { + .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, + .pApplicationInfo = &applicationInfo, + .enabledExtensionCount = 1, + .ppEnabledExtensionNames = extensions, + .enabledLayerCount = n_layers, + .ppEnabledLayerNames = layers, + }; + + VK_CHECK_RESULT(vkCreateInstance(&createInfo, NULL, &s->instance)); + + return 0; +} + +static uint32_t getComputeQueueFamilyIndex(struct vulkan_state *s) +{ + uint32_t i, queueFamilyCount; + VkQueueFamilyProperties *queueFamilies; + + vkGetPhysicalDeviceQueueFamilyProperties(s->physicalDevice, &queueFamilyCount, NULL); + + queueFamilies = alloca(queueFamilyCount * sizeof(VkQueueFamilyProperties)); + vkGetPhysicalDeviceQueueFamilyProperties(s->physicalDevice, &queueFamilyCount, queueFamilies); + + for (i = 0; i < queueFamilyCount; i++) { + VkQueueFamilyProperties props = queueFamilies[i]; + + if (props.queueCount > 0 && (props.queueFlags & VK_QUEUE_COMPUTE_BIT)) + break; + } + if (i == queueFamilyCount) + return -ENODEV; + + return i; +} + +static int findPhysicalDevice(struct vulkan_state *s) +{ + uint32_t deviceCount; + VkPhysicalDevice *devices; + + vkEnumeratePhysicalDevices(s->instance, &deviceCount, NULL); + if (deviceCount == 0) + return -ENODEV; + + devices = alloca(deviceCount * sizeof(VkPhysicalDevice)); + vkEnumeratePhysicalDevices(s->instance, &deviceCount, devices); + + s->physicalDevice = devices[0]; + + s->queueFamilyIndex = getComputeQueueFamilyIndex(s); + + return 0; +} + +static int createDevice(struct vulkan_state *s) +{ + + const VkDeviceQueueCreateInfo queueCreateInfo = { + .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, + .queueFamilyIndex = s->queueFamilyIndex, + .queueCount = 1, + .pQueuePriorities = (const float[]) { 1.0f } + }; + static const char * const extensions[] = { + VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME, + VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME + }; + const VkDeviceCreateInfo deviceCreateInfo = { + .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, + .queueCreateInfoCount = 1, + .pQueueCreateInfos = &queueCreateInfo, + .enabledExtensionCount = 2, + .ppEnabledExtensionNames = extensions, + }; + + VK_CHECK_RESULT(vkCreateDevice(s->physicalDevice, &deviceCreateInfo, NULL, &s->device)); + + vkGetDeviceQueue(s->device, s->queueFamilyIndex, 0, &s->queue); + + static const VkFenceCreateInfo fenceCreateInfo = { + .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, + .flags = 0, + }; + VK_CHECK_RESULT(vkCreateFence(s->device, &fenceCreateInfo, NULL, &s->fence)); + + return 0; +} + +static uint32_t findMemoryType(struct vulkan_state *s, + uint32_t memoryTypeBits, VkMemoryPropertyFlags properties) +{ + uint32_t i; + VkPhysicalDeviceMemoryProperties memoryProperties; + + vkGetPhysicalDeviceMemoryProperties(s->physicalDevice, &memoryProperties); + + for (i = 0; i < memoryProperties.memoryTypeCount; i++) { + if ((memoryTypeBits & (1 << i)) && + ((memoryProperties.memoryTypes[i].propertyFlags & properties) == properties)) + return i; + } + return -1; +} + +static int createDescriptors(struct vulkan_state *s) +{ + uint32_t i; + + VkDescriptorPoolSize descriptorPoolSizes[2] = { + { + .type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, + .descriptorCount = 1, + }, + { + .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = s->n_streams - 1, + }, + }; + const VkDescriptorPoolCreateInfo descriptorPoolCreateInfo = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + .maxSets = s->n_streams, + .poolSizeCount = s->n_streams > 1 ? 2 : 1, + .pPoolSizes = descriptorPoolSizes, + }; + + VK_CHECK_RESULT(vkCreateDescriptorPool(s->device, + &descriptorPoolCreateInfo, NULL, + &s->descriptorPool)); + + VkDescriptorSetLayoutBinding descriptorSetLayoutBinding[s->n_streams]; + descriptorSetLayoutBinding[0] = (VkDescriptorSetLayoutBinding) { + .binding = 0, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT + }; + for (i = 1; i < s->n_streams; i++) { + descriptorSetLayoutBinding[i] = (VkDescriptorSetLayoutBinding) { + .binding = i, + .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT + }; + }; + const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + .bindingCount = s->n_streams, + .pBindings = descriptorSetLayoutBinding + }; + VK_CHECK_RESULT(vkCreateDescriptorSetLayout(s->device, + &descriptorSetLayoutCreateInfo, NULL, + &s->descriptorSetLayout)); + + const VkDescriptorSetAllocateInfo descriptorSetAllocateInfo = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, + .descriptorPool = s->descriptorPool, + .descriptorSetCount = 1, + .pSetLayouts = &s->descriptorSetLayout + }; + + VK_CHECK_RESULT(vkAllocateDescriptorSets(s->device, + &descriptorSetAllocateInfo, + &s->descriptorSet)); + + const VkSamplerCreateInfo samplerInfo = { + .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, + .magFilter = VK_FILTER_LINEAR, + .minFilter = VK_FILTER_LINEAR, + .mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR, + .addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, + .addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, + .addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, + .borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK, + .unnormalizedCoordinates = VK_FALSE, + .compareEnable = VK_FALSE, + .compareOp = VK_COMPARE_OP_ALWAYS, + .mipLodBias = 0.0f, + .minLod = 0, + .maxLod = 5, + }; + VK_CHECK_RESULT(vkCreateSampler(s->device, &samplerInfo, NULL, &s->sampler)); + + return 0; +} + +static int updateDescriptors(struct vulkan_state *s) +{ + uint32_t i; + VkDescriptorImageInfo descriptorImageInfo[s->n_streams]; + VkWriteDescriptorSet writeDescriptorSet[s->n_streams]; + + for (i = 0; i < s->n_streams; i++) { + struct vulkan_stream *p = &s->streams[i]; + + if (p->current_buffer_id == p->pending_buffer_id || + p->pending_buffer_id == SPA_ID_INVALID) + continue; + + p->current_buffer_id = p->pending_buffer_id; + p->busy_buffer_id = p->current_buffer_id; + p->pending_buffer_id = SPA_ID_INVALID; + + descriptorImageInfo[i] = (VkDescriptorImageInfo) { + .sampler = s->sampler, + .imageView = p->buffers[p->current_buffer_id].view, + .imageLayout = VK_IMAGE_LAYOUT_GENERAL, + }; + writeDescriptorSet[i] = (VkWriteDescriptorSet) { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstSet = s->descriptorSet, + .dstBinding = i, + .descriptorCount = 1, + .descriptorType = i == 0 ? + VK_DESCRIPTOR_TYPE_STORAGE_IMAGE : + VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .pImageInfo = &descriptorImageInfo[i], + }; + } + vkUpdateDescriptorSets(s->device, s->n_streams, + writeDescriptorSet, 0, NULL); + + return 0; +} + +static VkShaderModule createShaderModule(struct vulkan_state *s, const char* shaderFile) +{ + VkShaderModule shaderModule = VK_NULL_HANDLE; + VkResult result; + void *data; + int fd; + struct stat stat; + + if ((fd = open(shaderFile, 0, O_RDONLY)) == -1) { + spa_log_error(s->log, "can't open %s: %m", shaderFile); + return VK_NULL_HANDLE; + } + if (fstat(fd, &stat) < 0) { + spa_log_error(s->log, "can't stat %s: %m", shaderFile); + close(fd); + return VK_NULL_HANDLE; + } + + data = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + + const VkShaderModuleCreateInfo shaderModuleCreateInfo = { + .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, + .codeSize = stat.st_size, + .pCode = data, + }; + result = vkCreateShaderModule(s->device, + &shaderModuleCreateInfo, 0, &shaderModule); + + munmap(data, stat.st_size); + close(fd); + + if (result != VK_SUCCESS) { + spa_log_error(s->log, "can't create shader %s: %m", shaderFile); + return VK_NULL_HANDLE; + } + return shaderModule; +} + +static int createComputePipeline(struct vulkan_state *s, const char *shader_file) +{ + static const VkPushConstantRange range = { + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .offset = 0, + .size = sizeof(struct push_constants) + }; + + const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, + .setLayoutCount = 1, + .pSetLayouts = &s->descriptorSetLayout, + .pushConstantRangeCount = 1, + .pPushConstantRanges = &range, + }; + VK_CHECK_RESULT(vkCreatePipelineLayout(s->device, + &pipelineLayoutCreateInfo, NULL, + &s->pipelineLayout)); + + s->computeShaderModule = createShaderModule(s, shader_file); + if (s->computeShaderModule == VK_NULL_HANDLE) + return -ENOENT; + + const VkPipelineShaderStageCreateInfo shaderStageCreateInfo = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, + .stage = VK_SHADER_STAGE_COMPUTE_BIT, + .module = s->computeShaderModule, + .pName = "main", + }; + const VkComputePipelineCreateInfo pipelineCreateInfo = { + .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, + .stage = shaderStageCreateInfo, + .layout = s->pipelineLayout, + }; + VK_CHECK_RESULT(vkCreateComputePipelines(s->device, VK_NULL_HANDLE, + 1, &pipelineCreateInfo, NULL, + &s->pipeline)); + return 0; +} + +static int createCommandBuffer(struct vulkan_state *s) +{ + const VkCommandPoolCreateInfo commandPoolCreateInfo = { + .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, + .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, + .queueFamilyIndex = s->queueFamilyIndex, + }; + VK_CHECK_RESULT(vkCreateCommandPool(s->device, + &commandPoolCreateInfo, NULL, + &s->commandPool)); + + const VkCommandBufferAllocateInfo commandBufferAllocateInfo = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .commandPool = s->commandPool, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandBufferCount = 1, + }; + VK_CHECK_RESULT(vkAllocateCommandBuffers(s->device, + &commandBufferAllocateInfo, + &s->commandBuffer)); + + return 0; +} + +static int runCommandBuffer(struct vulkan_state *s) +{ + static const VkCommandBufferBeginInfo beginInfo = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + }; + VK_CHECK_RESULT(vkBeginCommandBuffer(s->commandBuffer, &beginInfo)); + + VkImageMemoryBarrier barrier[s->n_streams]; + uint32_t i; + + for (i = 0; i < s->n_streams; i++) { + struct vulkan_stream *p = &s->streams[i]; + + barrier[i]= (VkImageMemoryBarrier) { + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .subresourceRange.levelCount = 1, + .subresourceRange.layerCount = 1, + .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .newLayout = VK_IMAGE_LAYOUT_GENERAL, + .srcAccessMask = 0, + .dstAccessMask = 0, + .image = p->buffers[p->current_buffer_id].image, + }; + } + + vkCmdPipelineBarrier(s->commandBuffer, + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, + 0, 0, NULL, 0, NULL, + s->n_streams, barrier); + + vkCmdBindPipeline(s->commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, s->pipeline); + vkCmdPushConstants (s->commandBuffer, + s->pipelineLayout, VK_SHADER_STAGE_COMPUTE_BIT, + 0, sizeof(struct push_constants), (const void *) &s->constants); + vkCmdBindDescriptorSets(s->commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, + s->pipelineLayout, 0, 1, &s->descriptorSet, 0, NULL); + + vkCmdDispatch(s->commandBuffer, + (uint32_t)ceil(s->constants.width / (float)WORKGROUP_SIZE), + (uint32_t)ceil(s->constants.height / (float)WORKGROUP_SIZE), 1); + + VK_CHECK_RESULT(vkEndCommandBuffer(s->commandBuffer)); + + VK_CHECK_RESULT(vkResetFences(s->device, 1, &s->fence)); + + const VkSubmitInfo submitInfo = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .commandBufferCount = 1, + .pCommandBuffers = &s->commandBuffer, + }; + VK_CHECK_RESULT(vkQueueSubmit(s->queue, 1, &submitInfo, s->fence)); + s->started = true; + + return 0; +} + +static void clear_buffers(struct vulkan_state *s, struct vulkan_stream *p) +{ + uint32_t i; + + for (i = 0; i < p->n_buffers; i++) { + if (p->buffers[i].fd != -1) + close(p->buffers[i].fd); + vkFreeMemory(s->device, p->buffers[i].memory, NULL); + vkDestroyImage(s->device, p->buffers[i].image, NULL); + vkDestroyImageView(s->device, p->buffers[i].view, NULL); + } + p->n_buffers = 0; +} + +static void clear_streams(struct vulkan_state *s) +{ + uint32_t i; + for (i = 0; i < s->n_streams; i++) { + struct vulkan_stream *p = &s->streams[i]; + clear_buffers(s, p); + } +} + +int spa_vulkan_use_buffers(struct vulkan_state *s, struct vulkan_stream *p, uint32_t flags, + uint32_t n_buffers, struct spa_buffer **buffers) +{ + uint32_t i; + VULKAN_INSTANCE_FUNCTION(vkGetMemoryFdKHR); + + clear_buffers(s, p); + + for (i = 0; i < n_buffers; i++) { + VkExternalMemoryImageCreateInfo extInfo; + VkImageCreateInfo imageCreateInfo = { + .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, + .imageType = VK_IMAGE_TYPE_2D, + .format = VK_FORMAT_R32G32B32A32_SFLOAT, + .extent.width = s->constants.width, + .extent.height = s->constants.height, + .extent.depth = 1, + .mipLevels = 1, + .arrayLayers = 1, + .samples = VK_SAMPLE_COUNT_1_BIT, + .tiling = VK_IMAGE_TILING_LINEAR, + .usage = p->direction == SPA_DIRECTION_OUTPUT ? + VK_IMAGE_USAGE_STORAGE_BIT: + VK_IMAGE_USAGE_SAMPLED_BIT, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + }; + + if (!(flags & SPA_NODE_BUFFERS_FLAG_ALLOC)) { + extInfo = (VkExternalMemoryImageCreateInfo) { + .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, + .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT, + }; + imageCreateInfo.pNext = &extInfo; + } + + VK_CHECK_RESULT(vkCreateImage(s->device, + &imageCreateInfo, NULL, &p->buffers[i].image)); + + VkMemoryRequirements memoryRequirements; + vkGetImageMemoryRequirements(s->device, + p->buffers[i].image, &memoryRequirements); + + VkMemoryAllocateInfo allocateInfo = { + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .allocationSize = memoryRequirements.size, + .memoryTypeIndex = findMemoryType(s, + memoryRequirements.memoryTypeBits, + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT), + }; + + if (flags & SPA_NODE_BUFFERS_FLAG_ALLOC) { + VK_CHECK_RESULT(vkAllocateMemory(s->device, + &allocateInfo, NULL, &p->buffers[i].memory)); + + const VkMemoryGetFdInfoKHR getFdInfo = { + .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR, + .memory = p->buffers[i].memory, + .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT + }; + int fd; + + VK_CHECK_RESULT(vkGetMemoryFdKHR(s->device, &getFdInfo, &fd)); + + spa_log_info(s->log, "export DMABUF %zd", memoryRequirements.size); + +// buffers[i]->datas[0].type = SPA_DATA_DmaBuf; + buffers[i]->datas[0].type = SPA_DATA_MemFd; + buffers[i]->datas[0].fd = fd; + buffers[i]->datas[0].flags = SPA_DATA_FLAG_READABLE; + buffers[i]->datas[0].mapoffset = 0; + buffers[i]->datas[0].maxsize = memoryRequirements.size; + p->buffers[i].fd = fd; + } else { + VkImportMemoryFdInfoKHR importInfo = { + .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, + .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT, + .fd = fcntl(buffers[i]->datas[0].fd, F_DUPFD_CLOEXEC, 0), + }; + allocateInfo.pNext = &importInfo; + p->buffers[i].fd = -1; + spa_log_info(s->log, "import DMABUF"); + + VK_CHECK_RESULT(vkAllocateMemory(s->device, + &allocateInfo, NULL, &p->buffers[i].memory)); + } + VK_CHECK_RESULT(vkBindImageMemory(s->device, + p->buffers[i].image, p->buffers[i].memory, 0)); + + VkImageViewCreateInfo viewInfo = { + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .image = p->buffers[i].image, + .viewType = VK_IMAGE_VIEW_TYPE_2D, + .format = VK_FORMAT_R32G32B32A32_SFLOAT, + .components.r = VK_COMPONENT_SWIZZLE_R, + .components.g = VK_COMPONENT_SWIZZLE_G, + .components.b = VK_COMPONENT_SWIZZLE_B, + .components.a = VK_COMPONENT_SWIZZLE_A, + .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .subresourceRange.levelCount = 1, + .subresourceRange.layerCount = 1, + }; + + VK_CHECK_RESULT(vkCreateImageView(s->device, + &viewInfo, NULL, &p->buffers[i].view)); + } + p->n_buffers = n_buffers; + + return 0; +} + +int spa_vulkan_init_stream(struct vulkan_state *s, struct vulkan_stream *stream, + enum spa_direction direction, struct spa_dict *props) +{ + spa_zero(*stream); + stream->direction = direction; + stream->current_buffer_id = SPA_ID_INVALID; + stream->busy_buffer_id = SPA_ID_INVALID; + stream->ready_buffer_id = SPA_ID_INVALID; + return 0; +} + +int spa_vulkan_prepare(struct vulkan_state *s) +{ + if (!s->prepared) { + CHECK(createInstance(s)); + CHECK(findPhysicalDevice(s)); + CHECK(createDevice(s)); + CHECK(createDescriptors(s)); + CHECK(createComputePipeline(s, s->shaderName)); + CHECK(createCommandBuffer(s)); + s->prepared = true; + } + return 0; +} + +int spa_vulkan_unprepare(struct vulkan_state *s) +{ + if (s->prepared) { + vkDestroyShaderModule(s->device, s->computeShaderModule, NULL); + vkDestroySampler(s->device, s->sampler, NULL); + vkDestroyDescriptorPool(s->device, s->descriptorPool, NULL); + vkDestroyDescriptorSetLayout(s->device, s->descriptorSetLayout, NULL); + vkDestroyPipelineLayout(s->device, s->pipelineLayout, NULL); + vkDestroyPipeline(s->device, s->pipeline, NULL); + vkDestroyCommandPool(s->device, s->commandPool, NULL); + vkDestroyFence(s->device, s->fence, NULL); + vkDestroyDevice(s->device, NULL); + vkDestroyInstance(s->instance, NULL); + s->prepared = false; + } + return 0; +} + +int spa_vulkan_start(struct vulkan_state *s) +{ + uint32_t i; + + for (i = 0; i < s->n_streams; i++) { + struct vulkan_stream *p = &s->streams[i]; + p->current_buffer_id = SPA_ID_INVALID; + p->busy_buffer_id = SPA_ID_INVALID; + p->ready_buffer_id = SPA_ID_INVALID; + } + return 0; +} + +int spa_vulkan_stop(struct vulkan_state *s) +{ + VK_CHECK_RESULT(vkDeviceWaitIdle(s->device)); + clear_streams(s); + s->started = false; + return 0; +} + +int spa_vulkan_ready(struct vulkan_state *s) +{ + uint32_t i; + VkResult result; + + if (!s->started) + return 0; + + result = vkGetFenceStatus(s->device, s->fence); + if (result == VK_NOT_READY) + return -EBUSY; + VK_CHECK_RESULT(result); + + s->started = false; + + for (i = 0; i < s->n_streams; i++) { + struct vulkan_stream *p = &s->streams[i]; + p->ready_buffer_id = p->busy_buffer_id; + p->busy_buffer_id = SPA_ID_INVALID; + } + return 0; +} + +int spa_vulkan_process(struct vulkan_state *s) +{ + CHECK(updateDescriptors(s)); + CHECK(runCommandBuffer(s)); + VK_CHECK_RESULT(vkDeviceWaitIdle(s->device)); + + return 0; +} diff --git a/spa/plugins/vulkan/vulkan-utils.h b/spa/plugins/vulkan/vulkan-utils.h new file mode 100644 index 0000000..c818322 --- /dev/null +++ b/spa/plugins/vulkan/vulkan-utils.h @@ -0,0 +1,86 @@ +#include + +#include +#include + +#define MAX_STREAMS 2 +#define MAX_BUFFERS 16 +#define WORKGROUP_SIZE 32 + +struct pixel { + float r, g, b, a; +}; + +struct push_constants { + float time; + int frame; + int width; + int height; +}; + +struct vulkan_buffer { + int fd; + VkImage image; + VkImageView view; + VkDeviceMemory memory; +}; + +struct vulkan_stream { + enum spa_direction direction; + + uint32_t pending_buffer_id; + uint32_t current_buffer_id; + uint32_t busy_buffer_id; + uint32_t ready_buffer_id; + + struct vulkan_buffer buffers[MAX_BUFFERS]; + uint32_t n_buffers; +}; + +struct vulkan_state { + struct spa_log *log; + + struct push_constants constants; + + VkInstance instance; + + VkPhysicalDevice physicalDevice; + VkDevice device; + + VkPipeline pipeline; + VkPipelineLayout pipelineLayout; + const char *shaderName; + VkShaderModule computeShaderModule; + + VkCommandPool commandPool; + VkCommandBuffer commandBuffer; + + VkQueue queue; + uint32_t queueFamilyIndex; + VkFence fence; + unsigned int prepared:1; + unsigned int started:1; + + VkDescriptorPool descriptorPool; + VkDescriptorSetLayout descriptorSetLayout; + + VkSampler sampler; + + uint32_t n_streams; + VkDescriptorSet descriptorSet; + struct vulkan_stream streams[MAX_STREAMS]; +}; + +int spa_vulkan_init_stream(struct vulkan_state *s, struct vulkan_stream *stream, enum spa_direction, + struct spa_dict *props); + +int spa_vulkan_prepare(struct vulkan_state *s); +int spa_vulkan_use_buffers(struct vulkan_state *s, struct vulkan_stream *stream, uint32_t flags, + uint32_t n_buffers, struct spa_buffer **buffers); +int spa_vulkan_unprepare(struct vulkan_state *s); + +int spa_vulkan_start(struct vulkan_state *s); +int spa_vulkan_stop(struct vulkan_state *s); +int spa_vulkan_ready(struct vulkan_state *s); +int spa_vulkan_process(struct vulkan_state *s); +int spa_vulkan_cleanup(struct vulkan_state *s); -- cgit v1.2.3