summaryrefslogtreecommitdiffstats
path: root/src/examples
diff options
context:
space:
mode:
Diffstat (limited to 'src/examples')
-rw-r--r--src/examples/audio-capture.c209
-rw-r--r--src/examples/audio-dsp-filter.c180
-rw-r--r--src/examples/audio-dsp-src.c165
-rw-r--r--src/examples/audio-src.c186
-rw-r--r--src/examples/bluez-session.c400
-rw-r--r--src/examples/export-sink.c584
-rw-r--r--src/examples/export-source.c566
-rw-r--r--src/examples/export-spa-device.c144
-rw-r--r--src/examples/export-spa.c183
-rw-r--r--src/examples/local-v4l2.c469
-rw-r--r--src/examples/meson.build51
-rw-r--r--src/examples/sdl.h198
-rw-r--r--src/examples/video-dsp-play.c315
-rw-r--r--src/examples/video-play-fixate.c516
-rw-r--r--src/examples/video-play-pull.c588
-rw-r--r--src/examples/video-play-reneg.c438
-rw-r--r--src/examples/video-play.c529
-rw-r--r--src/examples/video-src-alloc.c464
-rw-r--r--src/examples/video-src-fixate.c602
-rw-r--r--src/examples/video-src-reneg.c509
-rw-r--r--src/examples/video-src.c357
21 files changed, 7653 insertions, 0 deletions
diff --git a/src/examples/audio-capture.c b/src/examples/audio-capture.c
new file mode 100644
index 0000000..4c1afbb
--- /dev/null
+++ b/src/examples/audio-capture.c
@@ -0,0 +1,209 @@
+/* PipeWire
+ *
+ * Copyright © 2022 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Audio capture using \ref pw_stream "pw_stream".
+ [title]
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <math.h>
+#include <signal.h>
+
+#include <spa/param/audio/format-utils.h>
+
+#include <pipewire/pipewire.h>
+
+struct data {
+ struct pw_main_loop *loop;
+ struct pw_stream *stream;
+
+ struct spa_audio_info format;
+ unsigned move:1;
+};
+
+/* our data processing function is in general:
+ *
+ * struct pw_buffer *b;
+ * b = pw_stream_dequeue_buffer(stream);
+ *
+ * .. consume stuff in the buffer ...
+ *
+ * pw_stream_queue_buffer(stream, b);
+ */
+static void on_process(void *userdata)
+{
+ struct data *data = userdata;
+ struct pw_buffer *b;
+ struct spa_buffer *buf;
+ float *samples, max;
+ uint32_t c, n, n_channels, n_samples, peak;
+
+ if ((b = pw_stream_dequeue_buffer(data->stream)) == NULL) {
+ pw_log_warn("out of buffers: %m");
+ return;
+ }
+
+ buf = b->buffer;
+ if ((samples = buf->datas[0].data) == NULL)
+ return;
+
+ n_channels = data->format.info.raw.channels;
+ n_samples = buf->datas[0].chunk->size / sizeof(float);
+
+ /* move cursor up */
+ if (data->move)
+ fprintf(stdout, "%c[%dA", 0x1b, n_channels + 1);
+ fprintf(stdout, "captured %d samples\n", n_samples / n_channels);
+ for (c = 0; c < data->format.info.raw.channels; c++) {
+ max = 0.0f;
+ for (n = c; n < n_samples; n += n_channels)
+ max = fmaxf(max, fabsf(samples[n]));
+
+ peak = SPA_CLAMP(max * 30, 0, 39);
+
+ fprintf(stdout, "channel %d: |%*s%*s| peak:%f\n",
+ c, peak+1, "*", 40 - peak, "", max);
+ }
+ data->move = true;
+ fflush(stdout);
+
+ pw_stream_queue_buffer(data->stream, b);
+}
+
+/* Be notified when the stream param changes. We're only looking at the
+ * format changes.
+ */
+static void
+on_stream_param_changed(void *_data, uint32_t id, const struct spa_pod *param)
+{
+ struct data *data = _data;
+
+ /* NULL means to clear the format */
+ if (param == NULL || id != SPA_PARAM_Format)
+ return;
+
+ if (spa_format_parse(param, &data->format.media_type, &data->format.media_subtype) < 0)
+ return;
+
+ /* only accept raw audio */
+ if (data->format.media_type != SPA_MEDIA_TYPE_audio ||
+ data->format.media_subtype != SPA_MEDIA_SUBTYPE_raw)
+ return;
+
+ /* call a helper function to parse the format for us. */
+ spa_format_audio_raw_parse(param, &data->format.info.raw);
+
+ fprintf(stdout, "capturing rate:%d channels:%d\n",
+ data->format.info.raw.rate, data->format.info.raw.channels);
+
+}
+
+static const struct pw_stream_events stream_events = {
+ PW_VERSION_STREAM_EVENTS,
+ .param_changed = on_stream_param_changed,
+ .process = on_process,
+};
+
+static void do_quit(void *userdata, int signal_number)
+{
+ struct data *data = userdata;
+ pw_main_loop_quit(data->loop);
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+ const struct spa_pod *params[1];
+ uint8_t buffer[1024];
+ struct pw_properties *props;
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+
+ pw_init(&argc, &argv);
+
+ /* make a main loop. If you already have another main loop, you can add
+ * the fd of this pipewire mainloop to it. */
+ data.loop = pw_main_loop_new(NULL);
+
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGINT, do_quit, &data);
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGTERM, do_quit, &data);
+
+ /* Create a simple stream, the simple stream manages the core and remote
+ * objects for you if you don't need to deal with them.
+ *
+ * If you plan to autoconnect your stream, you need to provide at least
+ * media, category and role properties.
+ *
+ * Pass your events and a user_data pointer as the last arguments. This
+ * will inform you about the stream state. The most important event
+ * you need to listen to is the process event where you need to produce
+ * the data.
+ */
+ props = pw_properties_new(PW_KEY_MEDIA_TYPE, "Audio",
+ PW_KEY_MEDIA_CATEGORY, "Capture",
+ PW_KEY_MEDIA_ROLE, "Music",
+ NULL);
+ if (argc > 1)
+ /* Set stream target if given on command line */
+ pw_properties_set(props, PW_KEY_TARGET_OBJECT, argv[1]);
+
+ /* uncomment if you want to capture from the sink monitor ports */
+ /* pw_properties_set(props, PW_KEY_STREAM_CAPTURE_SINK, "true"); */
+
+ data.stream = pw_stream_new_simple(
+ pw_main_loop_get_loop(data.loop),
+ "audio-capture",
+ props,
+ &stream_events,
+ &data);
+
+ /* Make one parameter with the supported formats. The SPA_PARAM_EnumFormat
+ * id means that this is a format enumeration (of 1 value).
+ * We leave the channels and rate empty to accept the native graph
+ * rate and channels. */
+ params[0] = spa_format_audio_raw_build(&b, SPA_PARAM_EnumFormat,
+ &SPA_AUDIO_INFO_RAW_INIT(
+ .format = SPA_AUDIO_FORMAT_F32));
+
+ /* Now connect this stream. We ask that our process function is
+ * called in a realtime thread. */
+ pw_stream_connect(data.stream,
+ PW_DIRECTION_INPUT,
+ PW_ID_ANY,
+ PW_STREAM_FLAG_AUTOCONNECT |
+ PW_STREAM_FLAG_MAP_BUFFERS |
+ PW_STREAM_FLAG_RT_PROCESS,
+ params, 1);
+
+ /* and wait while we let things run */
+ pw_main_loop_run(data.loop);
+
+ pw_stream_destroy(data.stream);
+ pw_main_loop_destroy(data.loop);
+ pw_deinit();
+
+ return 0;
+}
diff --git a/src/examples/audio-dsp-filter.c b/src/examples/audio-dsp-filter.c
new file mode 100644
index 0000000..fbcc226
--- /dev/null
+++ b/src/examples/audio-dsp-filter.c
@@ -0,0 +1,180 @@
+/* PipeWire
+ *
+ * Copyright © 2019 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Audio filter using \ref pw_filter "pw_filter".
+ [title]
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <math.h>
+#include <signal.h>
+
+#include <spa/pod/builder.h>
+#include <spa/param/latency-utils.h>
+
+#include <pipewire/pipewire.h>
+#include <pipewire/filter.h>
+
+struct data;
+
+struct port {
+ struct data *data;
+};
+
+struct data {
+ struct pw_main_loop *loop;
+ struct pw_filter *filter;
+ struct port *in_port;
+ struct port *out_port;
+};
+
+/* our data processing function is in general:
+ *
+ * struct pw_buffer *b;
+ * in = pw_filter_dequeue_buffer(filter, in_port);
+ * out = pw_filter_dequeue_buffer(filter, out_port);
+ *
+ * .. do stuff with buffers ...
+ *
+ * pw_filter_queue_buffer(filter, in_port, in);
+ * pw_filter_queue_buffer(filter, out_port, out);
+ *
+ * For DSP ports, there is a shortcut to directly dequeue, get
+ * the data and requeue the buffer with pw_filter_get_dsp_buffer().
+ *
+ *
+ */
+static void on_process(void *userdata, struct spa_io_position *position)
+{
+ struct data *data = userdata;
+ float *in, *out;
+ uint32_t n_samples = position->clock.duration;
+
+ pw_log_trace("do process %d", n_samples);
+
+ in = pw_filter_get_dsp_buffer(data->in_port, n_samples);
+ out = pw_filter_get_dsp_buffer(data->out_port, n_samples);
+
+ if (in == NULL || out == NULL)
+ return;
+
+ memcpy(out, in, n_samples * sizeof(float));
+}
+
+static const struct pw_filter_events filter_events = {
+ PW_VERSION_FILTER_EVENTS,
+ .process = on_process,
+};
+
+static void do_quit(void *userdata, int signal_number)
+{
+ struct data *data = userdata;
+ pw_main_loop_quit(data->loop);
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+ const struct spa_pod *params[1];
+ uint8_t buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+
+ pw_init(&argc, &argv);
+
+ /* make a main loop. If you already have another main loop, you can add
+ * the fd of this pipewire mainloop to it. */
+ data.loop = pw_main_loop_new(NULL);
+
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGINT, do_quit, &data);
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGTERM, do_quit, &data);
+
+ /* Create a simple filter, the simple filter manages the core and remote
+ * objects for you if you don't need to deal with them.
+ *
+ * Pass your events and a user_data pointer as the last arguments. This
+ * will inform you about the filter state. The most important event
+ * you need to listen to is the process event where you need to process
+ * the data.
+ */
+ data.filter = pw_filter_new_simple(
+ pw_main_loop_get_loop(data.loop),
+ "audio-filter",
+ pw_properties_new(
+ PW_KEY_MEDIA_TYPE, "Audio",
+ PW_KEY_MEDIA_CATEGORY, "Filter",
+ PW_KEY_MEDIA_ROLE, "DSP",
+ NULL),
+ &filter_events,
+ &data);
+
+ /* make an audio DSP input port */
+ data.in_port = pw_filter_add_port(data.filter,
+ PW_DIRECTION_INPUT,
+ PW_FILTER_PORT_FLAG_MAP_BUFFERS,
+ sizeof(struct port),
+ pw_properties_new(
+ PW_KEY_FORMAT_DSP, "32 bit float mono audio",
+ PW_KEY_PORT_NAME, "input",
+ NULL),
+ NULL, 0);
+
+ /* make an audio DSP output port */
+ data.out_port = pw_filter_add_port(data.filter,
+ PW_DIRECTION_OUTPUT,
+ PW_FILTER_PORT_FLAG_MAP_BUFFERS,
+ sizeof(struct port),
+ pw_properties_new(
+ PW_KEY_FORMAT_DSP, "32 bit float mono audio",
+ PW_KEY_PORT_NAME, "output",
+ NULL),
+ NULL, 0);
+
+ params[0] = spa_process_latency_build(&b,
+ SPA_PARAM_ProcessLatency,
+ &SPA_PROCESS_LATENCY_INFO_INIT(
+ .ns = 10 * SPA_NSEC_PER_MSEC
+ ));
+
+
+ /* Now connect this filter. We ask that our process function is
+ * called in a realtime thread. */
+ if (pw_filter_connect(data.filter,
+ PW_FILTER_FLAG_RT_PROCESS,
+ params, 1) < 0) {
+ fprintf(stderr, "can't connect\n");
+ return -1;
+ }
+
+ /* and wait while we let things run */
+ pw_main_loop_run(data.loop);
+
+ pw_filter_destroy(data.filter);
+ pw_main_loop_destroy(data.loop);
+ pw_deinit();
+
+ return 0;
+}
diff --git a/src/examples/audio-dsp-src.c b/src/examples/audio-dsp-src.c
new file mode 100644
index 0000000..d7bf523
--- /dev/null
+++ b/src/examples/audio-dsp-src.c
@@ -0,0 +1,165 @@
+/* PipeWire
+ *
+ * Copyright © 2020 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Audio source using \ref pw_filter "pw_filter"
+ [title]
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <math.h>
+#include <signal.h>
+
+#include <pipewire/pipewire.h>
+#include <pipewire/filter.h>
+
+#define M_PI_M2 ( M_PI + M_PI )
+
+#define DEFAULT_RATE 44100
+#define DEFAULT_FREQ 440
+#define DEFAULT_VOLUME 0.7
+
+struct data;
+
+struct port {
+ struct data *data;
+ double accumulator;
+};
+
+struct data {
+ struct pw_main_loop *loop;
+ struct pw_filter *filter;
+ struct port *out_port;
+};
+
+/* our data processing function is in general:
+ *
+ * struct pw_buffer *b;
+ * out = pw_filter_dequeue_buffer(filter, out_port);
+ *
+ * .. generate data in the buffer ...
+ *
+ * pw_filter_queue_buffer(filter, out_port, out);
+ *
+ * For DSP ports, there is a shortcut to directly dequeue, get
+ * the data and requeue the buffer with pw_filter_get_dsp_buffer().
+ */
+static void on_process(void *userdata, struct spa_io_position *position)
+{
+ struct data *data = userdata;
+ float *out;
+ struct port *out_port = data->out_port;
+ uint32_t i, n_samples = position->clock.duration;
+
+ pw_log_trace("do process %d", n_samples);
+
+ out = pw_filter_get_dsp_buffer(out_port, n_samples);
+ if (out == NULL)
+ return;
+
+ for (i = 0; i < n_samples; i++) {
+ out_port->accumulator += M_PI_M2 * DEFAULT_FREQ / DEFAULT_RATE;
+ if (out_port->accumulator >= M_PI_M2)
+ out_port->accumulator -= M_PI_M2;
+
+ *out++ = sin(out_port->accumulator) * DEFAULT_VOLUME;
+ }
+}
+
+static const struct pw_filter_events filter_events = {
+ PW_VERSION_FILTER_EVENTS,
+ .process = on_process,
+};
+
+static void do_quit(void *userdata, int signal_number)
+{
+ struct data *data = userdata;
+ pw_main_loop_quit(data->loop);
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+
+ pw_init(&argc, &argv);
+
+ /* make a main loop. If you already have another main loop, you can add
+ * the fd of this pipewire mainloop to it. */
+ data.loop = pw_main_loop_new(NULL);
+
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGINT, do_quit, &data);
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGTERM, do_quit, &data);
+
+ /* Create a simple filter, the simple filter manages the core and remote
+ * objects for you if you don't need to deal with them.
+ *
+ * Pass your events and a user_data pointer as the last arguments. This
+ * will inform you about the filter state. The most important event
+ * you need to listen to is the process event where you need to process
+ * the data.
+ */
+ data.filter = pw_filter_new_simple(
+ pw_main_loop_get_loop(data.loop),
+ "audio-dsp-src",
+ pw_properties_new(
+ PW_KEY_MEDIA_TYPE, "Audio",
+ PW_KEY_MEDIA_CATEGORY, "Source",
+ PW_KEY_MEDIA_ROLE, "DSP",
+ PW_KEY_MEDIA_CLASS, "Stream/Output/Audio",
+ PW_KEY_NODE_AUTOCONNECT, "true",
+ NULL),
+ &filter_events,
+ &data);
+
+ /* make an audio DSP output port */
+ data.out_port = pw_filter_add_port(data.filter,
+ PW_DIRECTION_OUTPUT,
+ PW_FILTER_PORT_FLAG_MAP_BUFFERS,
+ sizeof(struct port),
+ pw_properties_new(
+ PW_KEY_FORMAT_DSP, "32 bit float mono audio",
+ PW_KEY_PORT_NAME, "output",
+ NULL),
+ NULL, 0);
+
+ /* Now connect this filter. We ask that our process function is
+ * called in a realtime thread. */
+ if (pw_filter_connect(data.filter,
+ PW_FILTER_FLAG_RT_PROCESS,
+ NULL, 0) < 0) {
+ fprintf(stderr, "can't connect\n");
+ return -1;
+ }
+
+ /* and wait while we let things run */
+ pw_main_loop_run(data.loop);
+
+ pw_filter_destroy(data.filter);
+ pw_main_loop_destroy(data.loop);
+ pw_deinit();
+
+ return 0;
+}
diff --git a/src/examples/audio-src.c b/src/examples/audio-src.c
new file mode 100644
index 0000000..1217732
--- /dev/null
+++ b/src/examples/audio-src.c
@@ -0,0 +1,186 @@
+/* PipeWire
+ *
+ * Copyright © 2018 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Audio source using \ref pw_stream "pw_stream".
+ [title]
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <math.h>
+#include <signal.h>
+
+#include <spa/param/audio/format-utils.h>
+
+#include <pipewire/pipewire.h>
+
+#define M_PI_M2 ( M_PI + M_PI )
+
+#define DEFAULT_RATE 44100
+#define DEFAULT_CHANNELS 2
+#define DEFAULT_VOLUME 0.7
+
+struct data {
+ struct pw_main_loop *loop;
+ struct pw_stream *stream;
+
+ double accumulator;
+};
+
+static void fill_f32(struct data *d, void *dest, int n_frames)
+{
+ float *dst = dest, val;
+ int i, c;
+
+ for (i = 0; i < n_frames; i++) {
+ d->accumulator += M_PI_M2 * 440 / DEFAULT_RATE;
+ if (d->accumulator >= M_PI_M2)
+ d->accumulator -= M_PI_M2;
+
+ val = sin(d->accumulator) * DEFAULT_VOLUME;
+ for (c = 0; c < DEFAULT_CHANNELS; c++)
+ *dst++ = val;
+ }
+}
+
+/* our data processing function is in general:
+ *
+ * struct pw_buffer *b;
+ * b = pw_stream_dequeue_buffer(stream);
+ *
+ * .. generate stuff in the buffer ...
+ *
+ * pw_stream_queue_buffer(stream, b);
+ */
+static void on_process(void *userdata)
+{
+ struct data *data = userdata;
+ struct pw_buffer *b;
+ struct spa_buffer *buf;
+ int n_frames, stride;
+ uint8_t *p;
+
+ if ((b = pw_stream_dequeue_buffer(data->stream)) == NULL) {
+ pw_log_warn("out of buffers: %m");
+ return;
+ }
+
+ buf = b->buffer;
+ if ((p = buf->datas[0].data) == NULL)
+ return;
+
+ stride = sizeof(float) * DEFAULT_CHANNELS;
+ n_frames = SPA_MIN(b->requested, buf->datas[0].maxsize / stride);
+
+ fill_f32(data, p, n_frames);
+
+ buf->datas[0].chunk->offset = 0;
+ buf->datas[0].chunk->stride = stride;
+ buf->datas[0].chunk->size = n_frames * stride;
+
+ pw_stream_queue_buffer(data->stream, b);
+}
+
+static const struct pw_stream_events stream_events = {
+ PW_VERSION_STREAM_EVENTS,
+ .process = on_process,
+};
+
+static void do_quit(void *userdata, int signal_number)
+{
+ struct data *data = userdata;
+ pw_main_loop_quit(data->loop);
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+ const struct spa_pod *params[1];
+ uint8_t buffer[1024];
+ struct pw_properties *props;
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+
+ pw_init(&argc, &argv);
+
+ /* make a main loop. If you already have another main loop, you can add
+ * the fd of this pipewire mainloop to it. */
+ data.loop = pw_main_loop_new(NULL);
+
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGINT, do_quit, &data);
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGTERM, do_quit, &data);
+
+ /* Create a simple stream, the simple stream manages the core and remote
+ * objects for you if you don't need to deal with them.
+ *
+ * If you plan to autoconnect your stream, you need to provide at least
+ * media, category and role properties.
+ *
+ * Pass your events and a user_data pointer as the last arguments. This
+ * will inform you about the stream state. The most important event
+ * you need to listen to is the process event where you need to produce
+ * the data.
+ */
+ props = pw_properties_new(PW_KEY_MEDIA_TYPE, "Audio",
+ PW_KEY_MEDIA_CATEGORY, "Playback",
+ PW_KEY_MEDIA_ROLE, "Music",
+ NULL);
+ if (argc > 1)
+ /* Set stream target if given on command line */
+ pw_properties_set(props, PW_KEY_TARGET_OBJECT, argv[1]);
+ data.stream = pw_stream_new_simple(
+ pw_main_loop_get_loop(data.loop),
+ "audio-src",
+ props,
+ &stream_events,
+ &data);
+
+ /* Make one parameter with the supported formats. The SPA_PARAM_EnumFormat
+ * id means that this is a format enumeration (of 1 value). */
+ params[0] = spa_format_audio_raw_build(&b, SPA_PARAM_EnumFormat,
+ &SPA_AUDIO_INFO_RAW_INIT(
+ .format = SPA_AUDIO_FORMAT_F32,
+ .channels = DEFAULT_CHANNELS,
+ .rate = DEFAULT_RATE ));
+
+ /* Now connect this stream. We ask that our process function is
+ * called in a realtime thread. */
+ pw_stream_connect(data.stream,
+ PW_DIRECTION_OUTPUT,
+ PW_ID_ANY,
+ PW_STREAM_FLAG_AUTOCONNECT |
+ PW_STREAM_FLAG_MAP_BUFFERS |
+ PW_STREAM_FLAG_RT_PROCESS,
+ params, 1);
+
+ /* and wait while we let things run */
+ pw_main_loop_run(data.loop);
+
+ pw_stream_destroy(data.stream);
+ pw_main_loop_destroy(data.loop);
+ pw_deinit();
+
+ return 0;
+}
diff --git a/src/examples/bluez-session.c b/src/examples/bluez-session.c
new file mode 100644
index 0000000..7d7e94c
--- /dev/null
+++ b/src/examples/bluez-session.c
@@ -0,0 +1,400 @@
+/* PipeWire
+ *
+ * Copyright © 2019 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Using the \ref spa_device "SPA Device API", among other things.
+ [title]
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <math.h>
+#include <time.h>
+
+#include "config.h"
+
+#include <spa/monitor/device.h>
+#include <spa/node/node.h>
+#include <spa/utils/hook.h>
+#include <spa/utils/names.h>
+#include <spa/utils/result.h>
+#include <spa/utils/string.h>
+#include <spa/param/audio/format-utils.h>
+#include <spa/param/props.h>
+#include <spa/debug/dict.h>
+
+#include "pipewire/pipewire.h"
+
+#define NAME "bluez-session"
+
+struct impl;
+struct object;
+
+struct node {
+ struct impl *impl;
+ struct object *object;
+ struct spa_list link;
+ uint32_t id;
+
+ struct spa_handle *handle;
+ struct pw_proxy *proxy;
+ struct spa_node *node;
+};
+
+struct object {
+ struct impl *impl;
+ struct spa_list link;
+ uint32_t id;
+
+ struct spa_handle *handle;
+ struct pw_proxy *proxy;
+ struct spa_device *device;
+ struct spa_hook listener;
+
+ struct spa_list node_list;
+};
+
+struct impl {
+ struct timespec now;
+
+ struct pw_main_loop *loop;
+ struct pw_context *context;
+
+ struct pw_core *core;
+ struct spa_hook core_listener;
+
+ struct spa_handle *handle;
+ struct spa_device *device;
+ struct spa_hook listener;
+
+ struct spa_list device_list;
+};
+
+static struct node *find_node(struct object *obj, uint32_t id)
+{
+ struct node *node;
+
+ spa_list_for_each(node, &obj->node_list, link) {
+ if (node->id == id)
+ return node;
+ }
+ return NULL;
+}
+
+static void update_node(struct object *obj, struct node *node,
+ const struct spa_device_object_info *info)
+{
+ pw_log_debug("update node %u", node->id);
+ spa_debug_dict(0, info->props);
+}
+
+static struct node *create_node(struct object *obj, uint32_t id,
+ const struct spa_device_object_info *info)
+{
+ struct node *node;
+ struct impl *impl = obj->impl;
+ struct pw_context *context = impl->context;
+ struct spa_handle *handle;
+ int res;
+ void *iface;
+
+ pw_log_debug("new node %u", id);
+
+ if (!spa_streq(info->type, SPA_TYPE_INTERFACE_Node))
+ return NULL;
+
+ handle = pw_context_load_spa_handle(context,
+ info->factory_name,
+ info->props);
+ if (handle == NULL) {
+ pw_log_error("can't make factory instance: %m");
+ goto exit;
+ }
+
+ if ((res = spa_handle_get_interface(handle, info->type, &iface)) < 0) {
+ pw_log_error("can't get %s interface: %s", info->type, spa_strerror(res));
+ goto unload_handle;
+ }
+
+ node = calloc(1, sizeof(*node));
+ if (node == NULL)
+ goto unload_handle;
+
+ node->impl = impl;
+ node->object = obj;
+ node->id = id;
+ node->handle = handle;
+ node->node = iface;
+ node->proxy = pw_core_export(impl->core,
+ info->type, info->props, node->node, 0);
+ if (node->proxy == NULL)
+ goto clean_node;
+
+ spa_list_append(&obj->node_list, &node->link);
+
+ update_node(obj, node, info);
+
+ return node;
+
+clean_node:
+ free(node);
+unload_handle:
+ pw_unload_spa_handle(handle);
+exit:
+ return NULL;
+}
+
+static void remove_node(struct object *obj, struct node *node)
+{
+ pw_log_debug("remove node %u", node->id);
+ spa_list_remove(&node->link);
+ pw_proxy_destroy(node->proxy);
+ free(node->handle);
+ free(node);
+}
+
+static void device_object_info(void *data, uint32_t id,
+ const struct spa_device_object_info *info)
+{
+ struct object *obj = data;
+ struct node *node;
+
+ node = find_node(obj, id);
+
+ if (info == NULL) {
+ if (node == NULL) {
+ pw_log_warn("object %p: unknown node %u", obj, id);
+ return;
+ }
+ remove_node(obj, node);
+ } else if (node == NULL) {
+ create_node(obj, id, info);
+ } else {
+ update_node(obj, node, info);
+ }
+
+}
+
+static const struct spa_device_events device_events = {
+ SPA_VERSION_DEVICE_EVENTS,
+ .object_info = device_object_info
+};
+
+static struct object *find_object(struct impl *impl, uint32_t id)
+{
+ struct object *obj;
+
+ spa_list_for_each(obj, &impl->device_list, link) {
+ if (obj->id == id)
+ return obj;
+ }
+ return NULL;
+}
+
+static void update_object(struct impl *impl, struct object *obj,
+ const struct spa_device_object_info *info)
+{
+ pw_log_debug("update object %u", obj->id);
+ spa_debug_dict(0, info->props);
+}
+
+static struct object *create_object(struct impl *impl, uint32_t id,
+ const struct spa_device_object_info *info)
+{
+ struct pw_context *context = impl->context;
+ struct object *obj;
+ struct spa_handle *handle;
+ int res;
+ void *iface;
+
+ pw_log_debug("new object %u", id);
+
+ if (!spa_streq(info->type, SPA_TYPE_INTERFACE_Device))
+ return NULL;
+
+ handle = pw_context_load_spa_handle(context,
+ info->factory_name,
+ info->props);
+ if (handle == NULL) {
+ pw_log_error("can't make factory instance: %m");
+ goto exit;
+ }
+
+ if ((res = spa_handle_get_interface(handle, info->type, &iface)) < 0) {
+ pw_log_error("can't get %s interface: %s", info->type, spa_strerror(res));
+ goto unload_handle;
+ }
+
+ obj = calloc(1, sizeof(*obj));
+ if (obj == NULL)
+ goto unload_handle;
+
+ obj->impl = impl;
+ obj->id = id;
+ obj->handle = handle;
+ obj->device = iface;
+ obj->proxy = pw_core_export(impl->core,
+ info->type, info->props, obj->device, 0);
+ if (obj->proxy == NULL)
+ goto clean_object;
+
+ spa_list_init(&obj->node_list);
+
+ spa_device_add_listener(obj->device,
+ &obj->listener, &device_events, obj);
+
+ spa_list_append(&impl->device_list, &obj->link);
+
+ update_object(impl, obj, info);
+
+ return obj;
+
+clean_object:
+ free(obj);
+unload_handle:
+ pw_unload_spa_handle(handle);
+exit:
+ return NULL;
+}
+
+static void remove_object(struct impl *impl, struct object *obj)
+{
+ pw_log_debug("remove object %u", obj->id);
+ spa_list_remove(&obj->link);
+ spa_hook_remove(&obj->listener);
+ pw_proxy_destroy(obj->proxy);
+ free(obj->handle);
+ free(obj);
+}
+
+static void dbus_device_object_info(void *data, uint32_t id,
+ const struct spa_device_object_info *info)
+{
+ struct impl *impl = data;
+ struct object *obj;
+
+ obj = find_object(impl, id);
+
+ if (info == NULL) {
+ if (obj == NULL)
+ return;
+ remove_object(impl, obj);
+ } else if (obj == NULL) {
+ if (create_object(impl, id, info) == NULL)
+ return;
+ } else {
+ update_object(impl, obj, info);
+ }
+}
+
+static const struct spa_device_events dbus_device_events =
+{
+ SPA_VERSION_DEVICE_EVENTS,
+ .object_info = dbus_device_object_info,
+};
+
+static int start_monitor(struct impl *impl)
+{
+ struct spa_handle *handle;
+ int res;
+ void *iface;
+
+ handle = pw_context_load_spa_handle(impl->context, SPA_NAME_API_BLUEZ5_ENUM_DBUS, NULL);
+ if (handle == NULL) {
+ res = -errno;
+ goto out;
+ }
+
+ if ((res = spa_handle_get_interface(handle, SPA_TYPE_INTERFACE_Device, &iface)) < 0) {
+ pw_log_error("can't get MONITOR interface: %d", res);
+ goto out_unload;
+ }
+
+ impl->handle = handle;
+ impl->device = iface;
+
+ spa_device_add_listener(impl->device, &impl->listener, &dbus_device_events, impl);
+
+ return 0;
+
+ out_unload:
+ pw_unload_spa_handle(handle);
+ out:
+ return res;
+}
+
+static void on_core_error(void *data, uint32_t id, int seq, int res, const char *message)
+{
+ struct impl *impl = data;
+
+ pw_log_error("error id:%u seq:%d res:%d (%s): %s",
+ id, seq, res, spa_strerror(res), message);
+
+ if (id == PW_ID_CORE && res == -EPIPE)
+ pw_main_loop_quit(impl->loop);
+}
+
+static const struct pw_core_events core_events = {
+ PW_VERSION_CORE_EVENTS,
+ .error = on_core_error,
+};
+
+int main(int argc, char *argv[])
+{
+ struct impl impl = { 0, };
+ int res;
+
+ pw_init(&argc, &argv);
+
+ impl.loop = pw_main_loop_new(NULL);
+ impl.context = pw_context_new(pw_main_loop_get_loop(impl.loop), NULL, 0);
+
+ clock_gettime(CLOCK_MONOTONIC, &impl.now);
+
+ spa_list_init(&impl.device_list);
+
+ impl.core = pw_context_connect(impl.context, NULL, 0);
+ if (impl.core == NULL) {
+ pw_log_error(NAME" %p: can't connect %m", &impl);
+ return -1;
+ }
+
+ pw_core_add_listener(impl.core,
+ &impl.core_listener,
+ &core_events, &impl);
+
+ if ((res = start_monitor(&impl)) < 0) {
+ pw_log_error(NAME" %p: error starting monitor: %s", &impl, spa_strerror(res));
+ return -1;
+ }
+
+ pw_main_loop_run(impl.loop);
+
+ pw_context_destroy(impl.context);
+ pw_main_loop_destroy(impl.loop);
+
+ return 0;
+}
diff --git a/src/examples/export-sink.c b/src/examples/export-sink.c
new file mode 100644
index 0000000..ee8a57c
--- /dev/null
+++ b/src/examples/export-sink.c
@@ -0,0 +1,584 @@
+/* PipeWire
+ *
+ * Copyright © 2018 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Exporting and implementing a video sink SPA node, using \ref api_pw_core.
+ [title]
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <sys/mman.h>
+
+#include <spa/utils/result.h>
+#include <spa/param/video/format-utils.h>
+#include <spa/param/props.h>
+#include <spa/node/utils.h>
+#include <spa/node/io.h>
+#include <spa/pod/filter.h>
+#include <spa/debug/format.h>
+#include <spa/debug/pod.h>
+
+#include <pipewire/pipewire.h>
+
+#define WIDTH 640
+#define HEIGHT 480
+#define BPP 3
+
+#include "sdl.h"
+
+#define M_PI_M2 ( M_PI + M_PI )
+
+#define MAX_BUFFERS 64
+
+#define DEFAULT_PARAM 0.1
+
+struct props {
+ double param;
+};
+
+static void reset_props(struct props *props)
+{
+ props->param = DEFAULT_PARAM;
+}
+
+struct data {
+ struct props props;
+
+ const char *path;
+
+ SDL_Renderer *renderer;
+ SDL_Window *window;
+ SDL_Texture *texture;
+
+ struct pw_main_loop *loop;
+
+ struct pw_context *context;
+
+ struct pw_core *core;
+ struct spa_hook core_listener;
+
+ struct spa_node impl_node;
+ struct spa_hook_list hooks;
+ struct spa_io_buffers *io;
+ struct spa_io_sequence *io_notify;
+ uint32_t io_notify_size;
+ double param_accum;
+
+ uint8_t buffer[1024];
+
+ struct spa_video_info_raw format;
+ int32_t stride;
+
+ struct spa_port_info info;
+ struct spa_param_info params[5];
+
+ struct spa_region region;
+
+ struct spa_buffer *buffers[MAX_BUFFERS];
+ uint32_t n_buffers;
+};
+
+static void handle_events(struct data *data)
+{
+ SDL_Event event;
+ while (SDL_PollEvent(&event)) {
+ switch (event.type) {
+ case SDL_QUIT:
+ pw_main_loop_quit(data->loop);
+ break;
+ }
+ }
+}
+
+static void update_param(struct data *data)
+{
+ struct spa_pod_builder b = { 0, };
+ struct spa_pod_frame f[2];
+
+ if (data->io_notify == NULL)
+ return;
+
+ spa_pod_builder_init(&b, data->io_notify, data->io_notify_size);
+ spa_pod_builder_push_sequence(&b, &f[0], 0);
+ spa_pod_builder_control(&b, 0, SPA_CONTROL_Properties);
+ spa_pod_builder_push_object(&b, &f[1], SPA_TYPE_OBJECT_Props, 0);
+ spa_pod_builder_prop(&b, SPA_PROP_contrast, 0);
+ spa_pod_builder_float(&b, (sin(data->param_accum) * 127.0) + 127.0);
+ spa_pod_builder_pop(&b, &f[1]);
+ spa_pod_builder_pop(&b, &f[0]);
+
+ data->param_accum += M_PI_M2 / 30.0;
+ if (data->param_accum >= M_PI_M2)
+ data->param_accum -= M_PI_M2;
+}
+
+static int impl_send_command(void *object, const struct spa_command *command)
+{
+ return 0;
+}
+
+static int impl_add_listener(void *object,
+ struct spa_hook *listener,
+ const struct spa_node_events *events,
+ void *data)
+{
+ struct data *d = object;
+ struct spa_hook_list save;
+ uint64_t old;
+
+ spa_hook_list_isolate(&d->hooks, &save, listener, events, data);
+
+ old = d->info.change_mask;
+ d->info.change_mask = SPA_PORT_CHANGE_MASK_FLAGS |
+ SPA_PORT_CHANGE_MASK_PARAMS;
+ spa_node_emit_port_info(&d->hooks, SPA_DIRECTION_INPUT, 0, &d->info);
+ d->info.change_mask = old;
+
+ spa_hook_list_join(&d->hooks, &save);
+
+ return 0;
+}
+
+static int impl_set_callbacks(void *object,
+ const struct spa_node_callbacks *callbacks, void *data)
+{
+ return 0;
+}
+
+static int impl_set_io(void *object,
+ uint32_t id, void *data, size_t size)
+{
+ return 0;
+}
+
+static int impl_port_set_io(void *object,
+ enum spa_direction direction, uint32_t port_id,
+ uint32_t id, void *data, size_t size)
+{
+ struct data *d = object;
+
+ switch (id) {
+ case SPA_IO_Buffers:
+ d->io = data;
+ break;
+ case SPA_IO_Notify:
+ d->io_notify = data;
+ d->io_notify_size = size;
+ break;
+ default:
+ return -ENOENT;
+ }
+ return 0;
+}
+
+static int impl_port_enum_params(void *object, int seq,
+ enum spa_direction direction, uint32_t port_id,
+ uint32_t id, uint32_t start, uint32_t num,
+ const struct spa_pod *filter)
+{
+ struct data *d = object;
+ struct spa_pod *param;
+ struct spa_pod_builder b = { 0 };
+ uint8_t buffer[1024];
+ struct spa_result_node_params result;
+ uint32_t count = 0;
+
+ result.id = id;
+ result.next = start;
+ next:
+ result.index = result.next++;
+
+ spa_pod_builder_init(&b, buffer, sizeof(buffer));
+
+ switch (id) {
+ case SPA_PARAM_EnumFormat:
+ {
+ SDL_RendererInfo info;
+
+ if (result.index != 0)
+ return 0;
+
+ SDL_GetRendererInfo(d->renderer, &info);
+ param = sdl_build_formats(&info, &b);
+ break;
+ }
+ case SPA_PARAM_Format:
+ if (result.index != 0 || d->format.format == 0)
+ return 0;
+ param = spa_format_video_raw_build(&b, id, &d->format);
+ break;
+
+ case SPA_PARAM_Buffers:
+ if (result.index != 0)
+ return 0;
+
+ param = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamBuffers, id,
+ SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(2, 2, MAX_BUFFERS),
+ SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1),
+ SPA_PARAM_BUFFERS_size, SPA_POD_Int(d->stride * d->format.size.height),
+ SPA_PARAM_BUFFERS_stride, SPA_POD_Int(d->stride));
+ break;
+
+ case SPA_PARAM_Meta:
+ switch (result.index) {
+ case 0:
+ param = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, id,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Header),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header)));
+ break;
+ case 1:
+ param = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, id,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoDamage),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_region)));
+ break;
+ default:
+ return 0;
+ }
+ break;
+
+ case SPA_PARAM_IO:
+ switch (result.index) {
+ case 0:
+ param = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamIO, id,
+ SPA_PARAM_IO_id, SPA_POD_Id(SPA_IO_Buffers),
+ SPA_PARAM_IO_size, SPA_POD_Int(sizeof(struct spa_io_buffers)));
+ break;
+ case 1:
+ param = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamIO, id,
+ SPA_PARAM_IO_id, SPA_POD_Id(SPA_IO_Notify),
+ SPA_PARAM_IO_size, SPA_POD_Int(sizeof(struct spa_io_sequence) + 1024));
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (spa_pod_filter(&b, &result.param, param, filter) < 0)
+ goto next;
+
+ spa_node_emit_result(&d->hooks, seq, 0, SPA_RESULT_TYPE_NODE_PARAMS, &result);
+
+ if (++count != num)
+ goto next;
+
+ return 0;
+}
+
+static int port_set_format(void *object,
+ enum spa_direction direction, uint32_t port_id,
+ uint32_t flags, const struct spa_pod *format)
+{
+ struct data *d = object;
+ Uint32 sdl_format;
+ void *dest;
+
+ if (format == NULL) {
+ spa_zero(d->format);
+ SDL_DestroyTexture(d->texture);
+ d->texture = NULL;
+ } else {
+ spa_debug_format(0, NULL, format);
+
+ spa_format_video_raw_parse(format, &d->format);
+
+ sdl_format = id_to_sdl_format(d->format.format);
+ if (sdl_format == SDL_PIXELFORMAT_UNKNOWN)
+ return -EINVAL;
+ if (d->format.size.width == 0 ||
+ d->format.size.height == 0)
+ return -EINVAL;
+
+ d->texture = SDL_CreateTexture(d->renderer,
+ sdl_format,
+ SDL_TEXTUREACCESS_STREAMING,
+ d->format.size.width,
+ d->format.size.height);
+ SDL_LockTexture(d->texture, NULL, &dest, &d->stride);
+ SDL_UnlockTexture(d->texture);
+
+ }
+ d->info.change_mask = SPA_PORT_CHANGE_MASK_PARAMS;
+ if (format) {
+ d->params[3] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_READWRITE);
+ d->params[4] = SPA_PARAM_INFO(SPA_PARAM_Buffers, SPA_PARAM_INFO_READ);
+ } else {
+ d->params[3] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_WRITE);
+ d->params[4] = SPA_PARAM_INFO(SPA_PARAM_Buffers, 0);
+ }
+
+ spa_node_emit_port_info(&d->hooks, direction, port_id, &d->info);
+ d->info.change_mask = 0;
+
+ return 0;
+}
+
+static int impl_port_set_param(void *object,
+ enum spa_direction direction, uint32_t port_id,
+ uint32_t id, uint32_t flags,
+ const struct spa_pod *param)
+{
+ if (id == SPA_PARAM_Format) {
+ return port_set_format(object, direction, port_id, flags, param);
+ }
+ else
+ return -ENOENT;
+}
+
+static int impl_port_use_buffers(void *object,
+ enum spa_direction direction, uint32_t port_id,
+ uint32_t flags,
+ struct spa_buffer **buffers, uint32_t n_buffers)
+{
+ struct data *d = object;
+ uint32_t i;
+
+ if (n_buffers > MAX_BUFFERS)
+ return -ENOSPC;
+
+ for (i = 0; i < n_buffers; i++)
+ d->buffers[i] = buffers[i];
+ d->n_buffers = n_buffers;
+ return 0;
+}
+
+static int do_render(struct spa_loop *loop, bool async, uint32_t seq,
+ const void *_data, size_t size, void *user_data)
+{
+ struct data *d = user_data;
+ const struct spa_buffer *buf = *(struct spa_buffer**)_data;
+ uint8_t *map;
+ void *sdata, *ddata;
+ int sstride, dstride, ostride;
+ uint32_t i;
+ uint8_t *src, *dst;
+ struct spa_meta *m;
+ struct spa_meta_region *r;
+
+ handle_events(d);
+
+ if (buf->datas[0].type == SPA_DATA_MemFd ||
+ buf->datas[0].type == SPA_DATA_DmaBuf) {
+ map = mmap(NULL, buf->datas[0].maxsize + buf->datas[0].mapoffset, PROT_READ,
+ MAP_PRIVATE, buf->datas[0].fd, 0);
+ sdata = SPA_PTROFF(map, buf->datas[0].mapoffset, uint8_t);
+ } else if (buf->datas[0].type == SPA_DATA_MemPtr) {
+ map = NULL;
+ sdata = buf->datas[0].data;
+ } else
+ return -EINVAL;
+
+ if (SDL_LockTexture(d->texture, NULL, &ddata, &dstride) < 0) {
+ fprintf(stderr, "Couldn't lock texture: %s\n", SDL_GetError());
+ return -EIO;
+ }
+
+ if ((m = spa_buffer_find_meta(buf, SPA_META_VideoDamage))) {
+ spa_meta_for_each(r, m) {
+ if (!spa_meta_region_is_valid(r))
+ break;
+ if (memcmp(&r->region, &d->region, sizeof(struct spa_region)) == 0)
+ break;
+ d->region = r->region;
+ fprintf(stderr, "region %dx%d->%dx%d\n",
+ r->region.position.x, r->region.position.y,
+ r->region.size.width, r->region.size.height);
+ }
+ }
+
+ sstride = buf->datas[0].chunk->stride;
+ ostride = SPA_MIN(sstride, dstride);
+
+ src = sdata;
+ dst = ddata;
+ for (i = 0; i < d->format.size.height; i++) {
+ memcpy(dst, src, ostride);
+ src += sstride;
+ dst += dstride;
+ }
+ SDL_UnlockTexture(d->texture);
+
+ SDL_RenderClear(d->renderer);
+ SDL_RenderCopy(d->renderer, d->texture, NULL, NULL);
+ SDL_RenderPresent(d->renderer);
+
+ if (map)
+ munmap(map, buf->datas[0].maxsize + buf->datas[0].mapoffset);
+
+ return 0;
+}
+
+static int impl_node_process(void *object)
+{
+ struct data *d = object;
+ struct spa_buffer *buf;
+ int res;
+
+ if (d->io->status != SPA_STATUS_HAVE_DATA)
+ return SPA_STATUS_NEED_DATA;
+
+ if (d->io->buffer_id >= d->n_buffers)
+ return SPA_STATUS_NEED_DATA;
+
+ buf = d->buffers[d->io->buffer_id];
+
+ if ((res = pw_loop_invoke(pw_main_loop_get_loop(d->loop), do_render,
+ SPA_ID_INVALID, &buf, sizeof(struct spa_buffer *),
+ false, d)) < 0)
+ return res;
+
+ update_param(d);
+
+ return d->io->status = SPA_STATUS_NEED_DATA;
+}
+
+static const struct spa_node_methods impl_node = {
+ SPA_VERSION_NODE_METHODS,
+ .add_listener = impl_add_listener,
+ .set_callbacks = impl_set_callbacks,
+ .set_io = impl_set_io,
+ .send_command = impl_send_command,
+ .port_set_io = impl_port_set_io,
+ .port_enum_params = impl_port_enum_params,
+ .port_set_param = impl_port_set_param,
+ .port_use_buffers = impl_port_use_buffers,
+ .process = impl_node_process,
+};
+
+static void make_node(struct data *data)
+{
+ struct pw_properties *props;
+
+ props = pw_properties_new(PW_KEY_NODE_AUTOCONNECT, "true", NULL);
+ if (data->path)
+ pw_properties_set(props, PW_KEY_TARGET_OBJECT, data->path);
+ pw_properties_set(props, PW_KEY_MEDIA_CLASS, "Stream/Input/Video");
+ pw_properties_set(props, PW_KEY_MEDIA_TYPE, "Video");
+ pw_properties_set(props, PW_KEY_MEDIA_CATEGORY, "Capture");
+ pw_properties_set(props, PW_KEY_MEDIA_ROLE, "Camera");
+
+ data->impl_node.iface = SPA_INTERFACE_INIT(
+ SPA_TYPE_INTERFACE_Node,
+ SPA_VERSION_NODE,
+ &impl_node, data);
+ pw_core_export(data->core, SPA_TYPE_INTERFACE_Node,
+ &props->dict, &data->impl_node, 0);
+ pw_properties_free(props);
+}
+
+static void set_permissions(struct data *data)
+{
+ struct pw_permission permissions[2];
+
+ /* an example, set specific permissions on one object, this is the
+ * core object. */
+ permissions[0] = PW_PERMISSION_INIT(PW_ID_CORE, PW_PERM_R | PW_PERM_X);
+ /* remove WX from all other objects */
+ permissions[1] = PW_PERMISSION_INIT(PW_ID_ANY, PW_PERM_R);
+
+ pw_client_update_permissions(
+ pw_core_get_client(data->core),
+ 2, permissions);
+}
+
+static void on_core_error(void *data, uint32_t id, int seq, int res, const char *message)
+{
+ struct data *d = data;
+
+ pw_log_error("error id:%u seq:%d res:%d (%s): %s",
+ id, seq, res, spa_strerror(res), message);
+
+ if (id == PW_ID_CORE)
+ pw_main_loop_quit(d->loop);
+}
+
+static const struct pw_core_events core_events = {
+ PW_VERSION_CORE_EVENTS,
+ .error = on_core_error,
+};
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+
+ pw_init(&argc, &argv);
+
+ data.loop = pw_main_loop_new(NULL);
+ data.context = pw_context_new(pw_main_loop_get_loop(data.loop), NULL, 0);
+ data.path = argc > 1 ? argv[1] : NULL;
+
+ spa_hook_list_init(&data.hooks);
+
+ data.info = SPA_PORT_INFO_INIT();
+ data.info.change_mask = SPA_PORT_CHANGE_MASK_FLAGS;
+ data.info.flags = 0;
+ data.info.change_mask |= SPA_PORT_CHANGE_MASK_PARAMS;
+ data.params[0] = SPA_PARAM_INFO(SPA_PARAM_EnumFormat, SPA_PARAM_INFO_READ);
+ data.params[1] = SPA_PARAM_INFO(SPA_PARAM_Meta, SPA_PARAM_INFO_READ);
+ data.params[2] = SPA_PARAM_INFO(SPA_PARAM_IO, SPA_PARAM_INFO_READ);
+ data.params[3] = SPA_PARAM_INFO(SPA_PARAM_Buffers, SPA_PARAM_INFO_READ);
+ data.params[4] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_WRITE);
+ data.info.params = data.params;
+ data.info.n_params = 5;
+
+ reset_props(&data.props);
+
+ if (SDL_Init(SDL_INIT_VIDEO) < 0) {
+ printf("can't initialize SDL: %s\n", SDL_GetError());
+ return -1;
+ }
+
+ if (SDL_CreateWindowAndRenderer
+ (WIDTH, HEIGHT, SDL_WINDOW_RESIZABLE, &data.window, &data.renderer)) {
+ printf("can't create window: %s\n", SDL_GetError());
+ return -1;
+ }
+
+ data.core = pw_context_connect(data.context, NULL, 0);
+ if (data.core == NULL) {
+ printf("can't connect: %m\n");
+ return -1;
+ }
+ pw_core_add_listener(data.core, &data.core_listener, &core_events, &data);
+
+ set_permissions(&data);
+
+ make_node(&data);
+
+ pw_main_loop_run(data.loop);
+
+ pw_context_destroy(data.context);
+ pw_main_loop_destroy(data.loop);
+
+ return 0;
+}
diff --git a/src/examples/export-source.c b/src/examples/export-source.c
new file mode 100644
index 0000000..eb8fdb5
--- /dev/null
+++ b/src/examples/export-source.c
@@ -0,0 +1,566 @@
+/* PipeWire
+ *
+ * Copyright © 2018 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Exporting and implementing a video source SPA node, using \ref api_pw_core.
+ [title]
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <math.h>
+#include <sys/mman.h>
+
+#include <spa/utils/result.h>
+#include <spa/param/audio/format-utils.h>
+#include <spa/param/props.h>
+#include <spa/node/io.h>
+#include <spa/node/utils.h>
+#include <spa/pod/filter.h>
+#include <spa/debug/format.h>
+
+#include <pipewire/pipewire.h>
+
+#define M_PI_M2 ( M_PI + M_PI )
+
+#define BUFFER_SAMPLES 128
+#define MAX_BUFFERS 32
+
+struct buffer {
+ uint32_t id;
+ struct spa_buffer *buffer;
+ struct spa_list link;
+ void *ptr;
+ bool mapped;
+};
+
+struct data {
+ const char *path;
+
+ struct pw_main_loop *loop;
+
+ struct pw_context *context;
+
+ struct pw_core *core;
+ struct spa_hook core_listener;
+
+ uint64_t info_all;
+ struct spa_port_info info;
+ struct spa_dict_item items[1];
+ struct spa_dict dict;
+ struct spa_param_info params[5];
+
+ struct spa_node impl_node;
+ struct spa_hook_list hooks;
+ struct spa_io_buffers *io;
+ struct spa_io_control *io_notify;
+ uint32_t io_notify_size;
+
+ struct spa_audio_info_raw format;
+
+ struct buffer buffers[MAX_BUFFERS];
+ uint32_t n_buffers;
+ struct spa_list empty;
+
+ double accumulator;
+ double volume_accum;
+};
+
+static void update_volume(struct data *data)
+{
+ struct spa_pod_builder b = { 0, };
+ struct spa_pod_frame f[2];
+
+ if (data->io_notify == NULL)
+ return;
+
+ spa_pod_builder_init(&b, data->io_notify, data->io_notify_size);
+ spa_pod_builder_push_sequence(&b, &f[0], 0);
+ spa_pod_builder_control(&b, 0, SPA_CONTROL_Properties);
+ spa_pod_builder_push_object(&b, &f[1], SPA_TYPE_OBJECT_Props, 0);
+ spa_pod_builder_prop(&b, SPA_PROP_volume, 0);
+ spa_pod_builder_float(&b, (sin(data->volume_accum) / 2.0) + 0.5);
+ spa_pod_builder_pop(&b, &f[1]);
+ spa_pod_builder_pop(&b, &f[0]);
+
+ data->volume_accum += M_PI_M2 / 1000.0;
+ if (data->volume_accum >= M_PI_M2)
+ data->volume_accum -= M_PI_M2;
+}
+
+static int impl_send_command(void *object, const struct spa_command *command)
+{
+ return 0;
+}
+
+static int impl_add_listener(void *object,
+ struct spa_hook *listener,
+ const struct spa_node_events *events,
+ void *data)
+{
+ struct data *d = object;
+ struct spa_hook_list save;
+ uint64_t old;
+
+ spa_hook_list_isolate(&d->hooks, &save, listener, events, data);
+
+ old = d->info.change_mask;
+ d->info.change_mask = d->info_all;
+ spa_node_emit_port_info(&d->hooks, SPA_DIRECTION_OUTPUT, 0, &d->info);
+ d->info.change_mask = old;
+
+ spa_hook_list_join(&d->hooks, &save);
+ return 0;
+}
+
+static int impl_set_callbacks(void *object,
+ const struct spa_node_callbacks *callbacks, void *data)
+{
+ return 0;
+}
+
+static int impl_set_io(void *object,
+ uint32_t id, void *data, size_t size)
+{
+ return 0;
+}
+
+static int impl_port_set_io(void *object, enum spa_direction direction, uint32_t port_id,
+ uint32_t id, void *data, size_t size)
+{
+ struct data *d = object;
+
+ switch (id) {
+ case SPA_IO_Buffers:
+ d->io = data;
+ break;
+ case SPA_IO_Notify:
+ d->io_notify = data;
+ d->io_notify_size = size;
+ break;
+ default:
+ return -ENOENT;
+ }
+ return 0;
+}
+
+static int impl_port_enum_params(void *object, int seq,
+ enum spa_direction direction, uint32_t port_id,
+ uint32_t id, uint32_t start, uint32_t num,
+ const struct spa_pod *filter)
+{
+ struct data *d = object;
+ struct spa_pod *param;
+ struct spa_pod_builder b = { 0 };
+ uint8_t buffer[1024];
+ struct spa_result_node_params result;
+ uint32_t count = 0;
+
+ result.id = id;
+ result.next = start;
+ next:
+ result.index = result.next++;
+
+ spa_pod_builder_init(&b, buffer, sizeof(buffer));
+
+ switch (id) {
+ case SPA_PARAM_EnumFormat:
+ if (result.index != 0)
+ return 0;
+
+ param = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat,
+ SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_audio),
+ SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw),
+ SPA_FORMAT_AUDIO_format, SPA_POD_CHOICE_ENUM_Id(5,
+ SPA_AUDIO_FORMAT_S16,
+ SPA_AUDIO_FORMAT_S16P,
+ SPA_AUDIO_FORMAT_S16,
+ SPA_AUDIO_FORMAT_F32P,
+ SPA_AUDIO_FORMAT_F32),
+ SPA_FORMAT_AUDIO_channels, SPA_POD_CHOICE_RANGE_Int(2, 1, INT32_MAX),
+ SPA_FORMAT_AUDIO_rate, SPA_POD_CHOICE_RANGE_Int(44100, 1, INT32_MAX));
+ break;
+
+ case SPA_PARAM_Format:
+ if (result.index != 0)
+ return 0;
+ if (d->format.format == 0)
+ return 0;
+ param = spa_format_audio_raw_build(&b, id, &d->format);
+ break;
+
+ case SPA_PARAM_Buffers:
+ if (result.index > 0)
+ return 0;
+
+ param = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamBuffers, id,
+ SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(1, 1, 32),
+ SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1),
+ SPA_PARAM_BUFFERS_size, SPA_POD_CHOICE_RANGE_Int(
+ BUFFER_SAMPLES * sizeof(float), 32, INT32_MAX),
+ SPA_PARAM_BUFFERS_stride, SPA_POD_Int(sizeof(float)));
+ break;
+
+ case SPA_PARAM_Meta:
+ switch (result.index) {
+ case 0:
+ param = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, id,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Header),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header)));
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case SPA_PARAM_IO:
+ switch (result.index) {
+ case 0:
+ param = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamIO, id,
+ SPA_PARAM_IO_id, SPA_POD_Id(SPA_IO_Buffers),
+ SPA_PARAM_IO_size, SPA_POD_Int(sizeof(struct spa_io_buffers)));
+ break;
+ case 1:
+ param = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamIO, id,
+ SPA_PARAM_IO_id, SPA_POD_Id(SPA_IO_Notify),
+ SPA_PARAM_IO_size, SPA_POD_Int(sizeof(struct spa_io_sequence) + 1024));
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (spa_pod_filter(&b, &result.param, param, filter) < 0)
+ goto next;
+
+ spa_node_emit_result(&d->hooks, seq, 0, SPA_RESULT_TYPE_NODE_PARAMS, &result);
+
+ if (++count != num)
+ goto next;
+
+ return 0;
+}
+
+static int port_set_format(void *object,
+ enum spa_direction direction, uint32_t port_id,
+ uint32_t flags, const struct spa_pod *format)
+{
+ struct data *d = object;
+
+ if (format == NULL) {
+ spa_zero(d->format);
+ } else {
+ spa_debug_format(0, NULL, format);
+
+ if (spa_format_audio_raw_parse(format, &d->format) < 0)
+ return -EINVAL;
+
+ if (d->format.format != SPA_AUDIO_FORMAT_S16 &&
+ d->format.format != SPA_AUDIO_FORMAT_F32)
+ return -EINVAL;
+ if (d->format.rate == 0 ||
+ d->format.channels == 0 ||
+ d->format.channels > SPA_AUDIO_MAX_CHANNELS)
+ return -EINVAL;
+ }
+
+ d->info.change_mask = SPA_PORT_CHANGE_MASK_PARAMS;
+ if (format) {
+ d->params[3] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_READWRITE);
+ d->params[4] = SPA_PARAM_INFO(SPA_PARAM_Buffers, SPA_PARAM_INFO_READ);
+ } else {
+ d->params[3] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_WRITE);
+ d->params[4] = SPA_PARAM_INFO(SPA_PARAM_Buffers, 0);
+ }
+ spa_node_emit_port_info(&d->hooks, SPA_DIRECTION_OUTPUT, 0, &d->info);
+
+ return 0;
+}
+
+static int impl_port_set_param(void *object,
+ enum spa_direction direction, uint32_t port_id,
+ uint32_t id, uint32_t flags,
+ const struct spa_pod *param)
+{
+ if (id == SPA_PARAM_Format) {
+ return port_set_format(object, direction, port_id, flags, param);
+ }
+ else
+ return -ENOENT;
+}
+
+static int impl_port_use_buffers(void *object,
+ enum spa_direction direction, uint32_t port_id,
+ uint32_t flags,
+ struct spa_buffer **buffers, uint32_t n_buffers)
+{
+ struct data *d = object;
+ uint32_t i;
+
+ if (n_buffers > MAX_BUFFERS)
+ return -ENOSPC;
+
+ for (i = 0; i < n_buffers; i++) {
+ struct buffer *b = &d->buffers[i];
+ struct spa_data *datas = buffers[i]->datas;
+
+ if (datas[0].data != NULL) {
+ b->ptr = datas[0].data;
+ b->mapped = false;
+ }
+ else if (datas[0].type == SPA_DATA_MemFd ||
+ datas[0].type == SPA_DATA_DmaBuf) {
+ b->ptr = mmap(NULL, datas[0].maxsize + datas[0].mapoffset, PROT_WRITE,
+ MAP_SHARED, datas[0].fd, 0);
+ if (b->ptr == MAP_FAILED) {
+ pw_log_error("failed to buffer mem");
+ return -errno;
+
+ }
+ b->ptr = SPA_PTROFF(b->ptr, datas[0].mapoffset, void);
+ b->mapped = true;
+ }
+ else {
+ pw_log_error("invalid buffer mem");
+ return -EINVAL;
+ }
+ b->id = i;
+ b->buffer = buffers[i];
+ pw_log_debug("got buffer %d size %d", i, datas[0].maxsize);
+ spa_list_append(&d->empty, &b->link);
+ }
+ d->n_buffers = n_buffers;
+ return 0;
+}
+
+static inline void reuse_buffer(struct data *d, uint32_t id)
+{
+ pw_log_trace("export-source %p: recycle buffer %d", d, id);
+ spa_list_append(&d->empty, &d->buffers[id].link);
+}
+
+static int impl_port_reuse_buffer(void *object, uint32_t port_id, uint32_t buffer_id)
+{
+ struct data *d = object;
+ reuse_buffer(d, buffer_id);
+ return 0;
+}
+
+static void fill_f32(struct data *d, void *dest, int avail)
+{
+ float *dst = dest;
+ int n_samples = avail / (sizeof(float) * d->format.channels);
+ int i;
+ uint32_t c;
+
+ for (i = 0; i < n_samples; i++) {
+ float val;
+
+ d->accumulator += M_PI_M2 * 440 / d->format.rate;
+ if (d->accumulator >= M_PI_M2)
+ d->accumulator -= M_PI_M2;
+
+ val = sin(d->accumulator);
+
+ for (c = 0; c < d->format.channels; c++)
+ *dst++ = val;
+ }
+}
+
+static void fill_s16(struct data *d, void *dest, int avail)
+{
+ int16_t *dst = dest;
+ int n_samples = avail / (sizeof(int16_t) * d->format.channels);
+ int i;
+ uint32_t c;
+
+ for (i = 0; i < n_samples; i++) {
+ int16_t val;
+
+ d->accumulator += M_PI_M2 * 440 / d->format.rate;
+ if (d->accumulator >= M_PI_M2)
+ d->accumulator -= M_PI_M2;
+
+ val = (int16_t) (sin(d->accumulator) * 32767.0);
+
+ for (c = 0; c < d->format.channels; c++)
+ *dst++ = val;
+ }
+}
+
+static int impl_node_process(void *object)
+{
+ struct data *d = object;
+ struct buffer *b;
+ int avail;
+ struct spa_io_buffers *io = d->io;
+ uint32_t maxsize, index = 0;
+ uint32_t filled, offset;
+ struct spa_data *od;
+
+ if (io->buffer_id < d->n_buffers) {
+ reuse_buffer(d, io->buffer_id);
+ io->buffer_id = SPA_ID_INVALID;
+ }
+ if (spa_list_is_empty(&d->empty)) {
+ pw_log_error("export-source %p: out of buffers", d);
+ return -EPIPE;
+ }
+ b = spa_list_first(&d->empty, struct buffer, link);
+ spa_list_remove(&b->link);
+
+ od = b->buffer->datas;
+
+ maxsize = od[0].maxsize;
+
+ filled = 0;
+ index = 0;
+ avail = maxsize - filled;
+ offset = index % maxsize;
+
+ if (offset + avail > maxsize)
+ avail = maxsize - offset;
+
+ if (d->format.format == SPA_AUDIO_FORMAT_S16)
+ fill_s16(d, SPA_PTROFF(b->ptr, offset, void), avail);
+ else if (d->format.format == SPA_AUDIO_FORMAT_F32)
+ fill_f32(d, SPA_PTROFF(b->ptr, offset, void), avail);
+
+ od[0].chunk->offset = 0;
+ od[0].chunk->size = avail;
+ od[0].chunk->stride = 0;
+
+ io->buffer_id = b->id;
+ io->status = SPA_STATUS_HAVE_DATA;
+
+ update_volume(d);
+
+ return SPA_STATUS_HAVE_DATA;
+}
+
+static const struct spa_node_methods impl_node = {
+ SPA_VERSION_NODE_METHODS,
+ .add_listener = impl_add_listener,
+ .set_callbacks = impl_set_callbacks,
+ .set_io = impl_set_io,
+ .send_command = impl_send_command,
+ .port_set_io = impl_port_set_io,
+ .port_enum_params = impl_port_enum_params,
+ .port_set_param = impl_port_set_param,
+ .port_use_buffers = impl_port_use_buffers,
+ .port_reuse_buffer = impl_port_reuse_buffer,
+ .process = impl_node_process,
+};
+
+static void make_node(struct data *data)
+{
+ struct pw_properties *props;
+
+ props = pw_properties_new(PW_KEY_NODE_AUTOCONNECT, "true",
+ PW_KEY_NODE_EXCLUSIVE, "true",
+ PW_KEY_MEDIA_TYPE, "Audio",
+ PW_KEY_MEDIA_CATEGORY, "Playback",
+ PW_KEY_MEDIA_ROLE, "Music",
+ NULL);
+ if (data->path)
+ pw_properties_set(props, PW_KEY_TARGET_OBJECT, data->path);
+
+ data->impl_node.iface = SPA_INTERFACE_INIT(
+ SPA_TYPE_INTERFACE_Node,
+ SPA_VERSION_NODE,
+ &impl_node, data);
+ pw_core_export(data->core, SPA_TYPE_INTERFACE_Node,
+ &props->dict, &data->impl_node, 0);
+ pw_properties_free(props);
+}
+
+static void on_core_error(void *data, uint32_t id, int seq, int res, const char *message)
+{
+ struct data *d = data;
+
+ pw_log_error("error id:%u seq:%d res:%d (%s): %s",
+ id, seq, res, spa_strerror(res), message);
+
+ if (id == PW_ID_CORE)
+ pw_main_loop_quit(d->loop);
+}
+
+static const struct pw_core_events core_events = {
+ PW_VERSION_CORE_EVENTS,
+ .error = on_core_error,
+};
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+
+ pw_init(&argc, &argv);
+
+ data.loop = pw_main_loop_new(NULL);
+ data.context = pw_context_new(pw_main_loop_get_loop(data.loop), NULL, 0);
+ data.path = argc > 1 ? argv[1] : NULL;
+
+ data.info_all = SPA_PORT_CHANGE_MASK_FLAGS |
+ SPA_PORT_CHANGE_MASK_PROPS |
+ SPA_PORT_CHANGE_MASK_PARAMS;
+ data.info = SPA_PORT_INFO_INIT();
+ data.info.flags = 0;
+ data.items[0] = SPA_DICT_ITEM_INIT(PW_KEY_FORMAT_DSP, "32 bit float mono audio");
+ data.dict = SPA_DICT_INIT_ARRAY(data.items);
+ data.info.props = &data.dict;
+ data.params[0] = SPA_PARAM_INFO(SPA_PARAM_EnumFormat, SPA_PARAM_INFO_READ);
+ data.params[1] = SPA_PARAM_INFO(SPA_PARAM_Meta, SPA_PARAM_INFO_READ);
+ data.params[2] = SPA_PARAM_INFO(SPA_PARAM_IO, SPA_PARAM_INFO_READ);
+ data.params[3] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_WRITE);
+ data.params[4] = SPA_PARAM_INFO(SPA_PARAM_Buffers, 0);
+ data.info.params = data.params;
+ data.info.n_params = 5;
+
+ spa_list_init(&data.empty);
+ spa_hook_list_init(&data.hooks);
+
+ if ((data.core = pw_context_connect(data.context, NULL, 0)) == NULL) {
+ printf("can't connect: %m\n");
+ return -1;
+ }
+
+ pw_core_add_listener(data.core, &data.core_listener, &core_events, &data);
+
+ make_node(&data);
+
+ pw_main_loop_run(data.loop);
+
+ pw_context_destroy(data.context);
+ pw_main_loop_destroy(data.loop);
+
+ return 0;
+}
diff --git a/src/examples/export-spa-device.c b/src/examples/export-spa-device.c
new file mode 100644
index 0000000..4629a02
--- /dev/null
+++ b/src/examples/export-spa-device.c
@@ -0,0 +1,144 @@
+/* PipeWire
+ *
+ * Copyright © 2019 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Exporting and loading a SPA device, using \ref api_pw_core.
+ [title]
+ */
+
+#include <stdio.h>
+#include <sys/mman.h>
+#include <signal.h>
+
+#include <spa/utils/result.h>
+#include <spa/param/video/format-utils.h>
+#include <spa/param/props.h>
+
+#include <pipewire/impl.h>
+
+struct data {
+ struct pw_main_loop *loop;
+
+ struct pw_context *context;
+
+ struct pw_core *core;
+ struct spa_hook core_listener;
+
+ struct pw_impl_device *device;
+ const char *library;
+ const char *factory;
+ const char *path;
+};
+
+static int make_device(struct data *data)
+{
+ struct pw_impl_factory *factory;
+ struct pw_properties *props;
+
+ factory = pw_context_find_factory(data->context, "spa-device-factory");
+ if (factory == NULL)
+ return -1;
+
+ props = pw_properties_new(SPA_KEY_LIBRARY_NAME, data->library,
+ SPA_KEY_FACTORY_NAME, data->factory, NULL);
+
+ data->device = pw_impl_factory_create_object(factory,
+ NULL,
+ PW_TYPE_INTERFACE_Device,
+ PW_VERSION_DEVICE,
+ props, SPA_ID_INVALID);
+
+ pw_core_export(data->core, SPA_TYPE_INTERFACE_Device, NULL,
+ pw_impl_device_get_implementation(data->device), 0);
+
+ return 0;
+}
+
+static void on_core_error(void *data, uint32_t id, int seq, int res, const char *message)
+{
+ struct data *d = data;
+
+ pw_log_error("error id:%u seq:%d res:%d (%s): %s",
+ id, seq, res, spa_strerror(res), message);
+
+ if (id == PW_ID_CORE)
+ pw_main_loop_quit(d->loop);
+}
+
+static const struct pw_core_events core_events = {
+ PW_VERSION_CORE_EVENTS,
+ .error = on_core_error,
+};
+
+static void do_quit(void *data, int signal_number)
+{
+ struct data *d = data;
+ pw_main_loop_quit(d->loop);
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+ struct pw_loop *l;
+
+ pw_init(&argc, &argv);
+
+ if (argc < 3) {
+ fprintf(stderr, "usage: %s <library> <factory>\n\n"
+ "\texample: %s v4l2/libspa-v4l2 api.v4l2.device\n\n",
+ argv[0], argv[0]);
+ return -1;
+ }
+
+ data.loop = pw_main_loop_new(NULL);
+ l = pw_main_loop_get_loop(data.loop);
+ pw_loop_add_signal(l, SIGINT, do_quit, &data);
+ pw_loop_add_signal(l, SIGTERM, do_quit, &data);
+ data.context = pw_context_new(l, NULL, 0);
+ data.library = argv[1];
+ data.factory = argv[2];
+
+ pw_context_load_module(data.context, "libpipewire-module-spa-device-factory", NULL, NULL);
+
+ data.core = pw_context_connect(data.context, NULL, 0);
+ if (data.core == NULL) {
+ pw_log_error("can't connect %m");
+ return -1;
+ }
+
+ pw_core_add_listener(data.core, &data.core_listener, &core_events, &data);
+
+ if (make_device(&data) < 0) {
+ pw_log_error("can't make device");
+ return -1;
+ }
+
+ pw_main_loop_run(data.loop);
+
+ pw_context_destroy(data.context);
+ pw_main_loop_destroy(data.loop);
+
+ return 0;
+}
diff --git a/src/examples/export-spa.c b/src/examples/export-spa.c
new file mode 100644
index 0000000..e2f27cf
--- /dev/null
+++ b/src/examples/export-spa.c
@@ -0,0 +1,183 @@
+/* PipeWire
+ *
+ * Copyright © 2018 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Exporting and loading a SPA node, using \ref api_pw_core.
+ [title]
+ */
+
+#include <stdio.h>
+#include <sys/mman.h>
+#include <signal.h>
+
+#include <spa/utils/result.h>
+#include <spa/param/video/format-utils.h>
+#include <spa/param/props.h>
+
+#include <pipewire/impl.h>
+
+struct data {
+ struct pw_main_loop *loop;
+
+ struct pw_context *context;
+
+ struct pw_core *core;
+ struct spa_hook core_listener;
+
+ struct spa_node *node;
+ const char *library;
+ const char *factory;
+ const char *path;
+
+ struct pw_proxy *proxy;
+ struct spa_hook proxy_listener;
+ uint32_t id;
+};
+
+static void proxy_event_bound(void *_data, uint32_t global_id)
+{
+ struct data *data = _data;
+ if (data->id != global_id) {
+ printf("node id: %u\n", global_id);
+ data->id = global_id;
+ }
+}
+
+static const struct pw_proxy_events proxy_events = {
+ PW_VERSION_PROXY_EVENTS,
+ .bound = proxy_event_bound,
+};
+
+static int make_node(struct data *data)
+{
+ struct pw_properties *props;
+ struct spa_handle *hndl;
+ void *iface;
+ int res;
+
+ props = pw_properties_new(SPA_KEY_LIBRARY_NAME, data->library,
+ SPA_KEY_FACTORY_NAME, data->factory,
+ NULL);
+
+
+ hndl = pw_context_load_spa_handle(data->context, data->factory, &props->dict);
+ if (hndl == NULL)
+ return -errno;
+
+ if ((res = spa_handle_get_interface(hndl, SPA_TYPE_INTERFACE_Node, &iface)) < 0)
+ return res;
+
+ data->node = iface;
+
+ if (data->path) {
+ pw_properties_set(props, PW_KEY_NODE_AUTOCONNECT, "true");
+ pw_properties_set(props, PW_KEY_TARGET_OBJECT, data->path);
+ }
+
+ data->proxy = pw_core_export(data->core,
+ SPA_TYPE_INTERFACE_Node, &props->dict,
+ data->node, 0);
+ pw_properties_free(props);
+
+ if (data->proxy == NULL)
+ return -errno;
+
+ pw_proxy_add_listener(data->proxy,
+ &data->proxy_listener, &proxy_events, data);
+
+ return 0;
+}
+
+static void on_core_error(void *data, uint32_t id, int seq, int res, const char *message)
+{
+ struct data *d = data;
+
+ pw_log_error("error id:%u seq:%d res:%d (%s): %s",
+ id, seq, res, spa_strerror(res), message);
+
+ if (id == PW_ID_CORE)
+ pw_main_loop_quit(d->loop);
+}
+
+static const struct pw_core_events core_events = {
+ PW_VERSION_CORE_EVENTS,
+ .error = on_core_error,
+};
+
+static void do_quit(void *data, int signal_number)
+{
+ struct data *d = data;
+ pw_main_loop_quit(d->loop);
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+ struct pw_loop *l;
+
+ pw_init(&argc, &argv);
+
+ if (argc < 3) {
+ fprintf(stderr, "usage: %s <library> <factory> [path]\n\n"
+ "\texample: %s v4l2/libspa-v4l2 api.v4l2.source\n\n",
+ argv[0], argv[0]);
+ return -1;
+ }
+
+ data.loop = pw_main_loop_new(NULL);
+ l = pw_main_loop_get_loop(data.loop);
+ pw_loop_add_signal(l, SIGINT, do_quit, &data);
+ pw_loop_add_signal(l, SIGTERM, do_quit, &data);
+ data.context = pw_context_new(l, NULL, 0);
+ data.library = argv[1];
+ data.factory = argv[2];
+ if (argc > 3)
+ data.path = argv[3];
+
+ pw_context_load_module(data.context, "libpipewire-module-spa-node-factory", NULL, NULL);
+
+ data.core = pw_context_connect(data.context, NULL, 0);
+ if (data.core == NULL) {
+ printf("can't connect: %m\n");
+ return -1;
+ }
+ pw_core_add_listener(data.core,
+ &data.core_listener,
+ &core_events, &data);
+
+ if (make_node(&data) < 0) {
+ pw_log_error("can't make node");
+ return -1;
+ }
+
+ pw_main_loop_run(data.loop);
+
+ pw_proxy_destroy(data.proxy);
+ pw_core_disconnect(data.core);
+ pw_context_destroy(data.context);
+ pw_main_loop_destroy(data.loop);
+
+ return 0;
+}
diff --git a/src/examples/local-v4l2.c b/src/examples/local-v4l2.c
new file mode 100644
index 0000000..1d70e9e
--- /dev/null
+++ b/src/examples/local-v4l2.c
@@ -0,0 +1,469 @@
+/* PipeWire
+ *
+ * Copyright © 2018 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Using libspa-v4l2
+ [title]
+ */
+
+#include <stdio.h>
+#include <sys/mman.h>
+
+#define WIDTH 640
+#define HEIGHT 480
+#define BPP 3
+#define MAX_BUFFERS 32
+
+#include "sdl.h"
+
+#include <spa/param/video/format-utils.h>
+#include <spa/param/props.h>
+#include <spa/pod/filter.h>
+#include <spa/node/io.h>
+#include <spa/node/utils.h>
+#include <spa/debug/format.h>
+#include <spa/utils/names.h>
+
+#include <pipewire/impl.h>
+
+struct data {
+ SDL_Renderer *renderer;
+ SDL_Window *window;
+ SDL_Texture *texture;
+
+ struct pw_main_loop *loop;
+
+ struct pw_context *context;
+ struct pw_core *core;
+
+ struct spa_port_info info;
+ struct spa_param_info params[4];
+
+ struct spa_node impl_node;
+ struct spa_io_buffers *io;
+
+ struct spa_hook_list hooks;
+
+ struct spa_video_info_raw format;
+ int32_t stride;
+
+ struct spa_buffer *buffers[MAX_BUFFERS];
+ int n_buffers;
+
+ struct pw_proxy *out, *in, *link;
+};
+
+static void handle_events(struct data *data)
+{
+ SDL_Event event;
+ while (SDL_PollEvent(&event)) {
+ switch (event.type) {
+ case SDL_QUIT:
+ pw_main_loop_quit(data->loop);
+ break;
+ }
+ }
+}
+
+static int impl_set_io(void *object, uint32_t id, void *data, size_t size)
+{
+ return 0;
+}
+
+static int impl_send_command(void *object, const struct spa_command *command)
+{
+ return 0;
+}
+
+static int impl_add_listener(void *object,
+ struct spa_hook *listener,
+ const struct spa_node_events *events,
+ void *data)
+{
+ struct data *d = object;
+ struct spa_hook_list save;
+
+ spa_hook_list_isolate(&d->hooks, &save, listener, events, data);
+
+ spa_node_emit_port_info(&d->hooks, SPA_DIRECTION_INPUT, 0, &d->info);
+
+ spa_hook_list_join(&d->hooks, &save);
+
+ return 0;
+}
+
+static int impl_set_callbacks(void *object,
+ const struct spa_node_callbacks *callbacks, void *data)
+{
+ return 0;
+}
+
+static int impl_port_set_io(void *object, enum spa_direction direction, uint32_t port_id,
+ uint32_t id, void *data, size_t size)
+{
+ struct data *d = object;
+
+ if (id == SPA_IO_Buffers)
+ d->io = data;
+ else
+ return -ENOENT;
+
+ return 0;
+}
+
+static int impl_port_enum_params(void *object, int seq,
+ enum spa_direction direction, uint32_t port_id,
+ uint32_t id, uint32_t start, uint32_t num,
+ const struct spa_pod *filter)
+{
+ struct data *d = object;
+ struct spa_pod *param;
+ struct spa_pod_builder b = { 0 };
+ uint8_t buffer[1024];
+ struct spa_result_node_params result;
+ uint32_t count = 0;
+
+ result.id = id;
+ result.next = start;
+ next:
+ result.index = result.next++;
+
+ spa_pod_builder_init(&b, buffer, sizeof(buffer));
+
+ switch (id) {
+ case SPA_PARAM_EnumFormat:
+ {
+ SDL_RendererInfo info;
+
+ if (result.index > 0)
+ return 0;
+
+ SDL_GetRendererInfo(d->renderer, &info);
+ param = sdl_build_formats(&info, &b);
+ break;
+ }
+ case SPA_PARAM_Buffers:
+ if (result.index > 0)
+ return 0;
+
+ param = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamBuffers, id,
+ SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(2, 1, 32),
+ SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1),
+ SPA_PARAM_BUFFERS_size, SPA_POD_Int(d->stride * d->format.size.height),
+ SPA_PARAM_BUFFERS_stride, SPA_POD_Int(d->stride));
+ break;
+
+ case SPA_PARAM_Meta:
+ if (result.index > 0)
+ return 0;
+
+ param = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, id,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Header),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header)));
+ break;
+
+ default:
+ return -ENOENT;
+ }
+
+ if (spa_pod_filter(&b, &result.param, param, filter) < 0)
+ goto next;
+
+ spa_node_emit_result(&d->hooks, seq, 0, SPA_RESULT_TYPE_NODE_PARAMS, &result);
+
+ if (++count != num)
+ goto next;
+
+ return 0;
+}
+
+static int port_set_format(void *object, enum spa_direction direction, uint32_t port_id,
+ uint32_t flags, const struct spa_pod *format)
+{
+ struct data *d = object;
+ Uint32 sdl_format;
+ void *dest;
+
+ if (format == NULL) {
+ spa_zero(d->format);
+ SDL_DestroyTexture(d->texture);
+ d->texture = NULL;
+ } else {
+ spa_debug_format(0, NULL, format);
+
+ spa_format_video_raw_parse(format, &d->format);
+
+ sdl_format = id_to_sdl_format(d->format.format);
+ if (sdl_format == SDL_PIXELFORMAT_UNKNOWN)
+ return -EINVAL;
+ if (d->format.size.width == 0 ||
+ d->format.size.height == 0)
+ return -EINVAL;
+
+ d->texture = SDL_CreateTexture(d->renderer,
+ sdl_format,
+ SDL_TEXTUREACCESS_STREAMING,
+ d->format.size.width,
+ d->format.size.height);
+ SDL_LockTexture(d->texture, NULL, &dest, &d->stride);
+ SDL_UnlockTexture(d->texture);
+ }
+
+ d->info.change_mask = SPA_PORT_CHANGE_MASK_PARAMS;
+ if (format) {
+ d->params[1] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_READWRITE);
+ d->params[2] = SPA_PARAM_INFO(SPA_PARAM_Buffers, SPA_PARAM_INFO_READ);
+ } else {
+ d->params[1] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_WRITE);
+ d->params[2] = SPA_PARAM_INFO(SPA_PARAM_Buffers, 0);
+ }
+ spa_node_emit_port_info(&d->hooks, SPA_DIRECTION_INPUT, 0, &d->info);
+
+ return 0;
+}
+
+static int impl_port_set_param(void *object,
+ enum spa_direction direction, uint32_t port_id,
+ uint32_t id, uint32_t flags,
+ const struct spa_pod *param)
+{
+ if (id == SPA_PARAM_Format) {
+ return port_set_format(object, direction, port_id, flags, param);
+ }
+ else
+ return -ENOENT;
+}
+
+static int impl_port_use_buffers(void *object,
+ enum spa_direction direction, uint32_t port_id,
+ uint32_t flags, struct spa_buffer **buffers, uint32_t n_buffers)
+{
+ struct data *d = object;
+ uint32_t i;
+
+ if (n_buffers > MAX_BUFFERS)
+ return -ENOSPC;
+
+ for (i = 0; i < n_buffers; i++)
+ d->buffers[i] = buffers[i];
+ d->n_buffers = n_buffers;
+ return 0;
+}
+
+static int do_render(struct spa_loop *loop, bool async, uint32_t seq,
+ const void *_data, size_t size, void *user_data)
+{
+ struct data *d = user_data;
+ struct spa_buffer *buf;
+ uint8_t *map;
+ void *sdata, *ddata;
+ int sstride, dstride, ostride;
+ uint32_t i;
+ uint8_t *src, *dst;
+
+ buf = d->buffers[d->io->buffer_id];
+
+ if (buf->datas[0].type == SPA_DATA_MemFd ||
+ buf->datas[0].type == SPA_DATA_DmaBuf) {
+ map = mmap(NULL, buf->datas[0].maxsize + buf->datas[0].mapoffset, PROT_READ,
+ MAP_PRIVATE, buf->datas[0].fd, 0);
+ sdata = SPA_PTROFF(map, buf->datas[0].mapoffset, uint8_t);
+ } else if (buf->datas[0].type == SPA_DATA_MemPtr) {
+ map = NULL;
+ sdata = buf->datas[0].data;
+ } else
+ return -EINVAL;
+
+ if (SDL_LockTexture(d->texture, NULL, &ddata, &dstride) < 0) {
+ fprintf(stderr, "Couldn't lock texture: %s\n", SDL_GetError());
+ return -EIO;
+ }
+ sstride = buf->datas[0].chunk->stride;
+ ostride = SPA_MIN(sstride, dstride);
+
+ src = sdata;
+ dst = ddata;
+ for (i = 0; i < d->format.size.height; i++) {
+ memcpy(dst, src, ostride);
+ src += sstride;
+ dst += dstride;
+ }
+ SDL_UnlockTexture(d->texture);
+
+ SDL_RenderClear(d->renderer);
+ SDL_RenderCopy(d->renderer, d->texture, NULL, NULL);
+ SDL_RenderPresent(d->renderer);
+
+ if (map)
+ munmap(map, buf->datas[0].maxsize + buf->datas[0].mapoffset);
+
+ return 0;
+}
+
+static int impl_node_process(void *object)
+{
+ struct data *d = object;
+ int res;
+
+ if ((res = pw_loop_invoke(pw_main_loop_get_loop(d->loop), do_render,
+ SPA_ID_INVALID, NULL, 0, true, d)) < 0)
+ return res;
+
+ handle_events(d);
+
+ d->io->status = SPA_STATUS_NEED_DATA;
+
+ return SPA_STATUS_NEED_DATA;
+}
+
+static const struct spa_node_methods impl_node = {
+ SPA_VERSION_NODE_METHODS,
+ .add_listener = impl_add_listener,
+ .set_callbacks = impl_set_callbacks,
+ .set_io = impl_set_io,
+ .send_command = impl_send_command,
+ .port_set_io = impl_port_set_io,
+ .port_enum_params = impl_port_enum_params,
+ .port_set_param = impl_port_set_param,
+ .port_use_buffers = impl_port_use_buffers,
+ .process = impl_node_process,
+};
+
+static int make_nodes(struct data *data)
+{
+ struct pw_properties *props;
+
+ data->impl_node.iface = SPA_INTERFACE_INIT(
+ SPA_TYPE_INTERFACE_Node,
+ SPA_VERSION_NODE,
+ &impl_node, data);
+
+ data->info = SPA_PORT_INFO_INIT();
+ data->info.change_mask =
+ SPA_PORT_CHANGE_MASK_FLAGS |
+ SPA_PORT_CHANGE_MASK_PARAMS;
+ data->info.flags = 0;
+ data->params[0] = SPA_PARAM_INFO(SPA_PARAM_EnumFormat, SPA_PARAM_INFO_READ);
+ data->params[1] = SPA_PARAM_INFO(SPA_PARAM_Format, SPA_PARAM_INFO_WRITE);
+ data->params[2] = SPA_PARAM_INFO(SPA_PARAM_Buffers, 0);
+ data->params[3] = SPA_PARAM_INFO(SPA_PARAM_Meta, SPA_PARAM_INFO_READ);
+ data->info.params = data->params;
+ data->info.n_params = SPA_N_ELEMENTS(data->params);
+
+ data->in = pw_core_export(data->core,
+ SPA_TYPE_INTERFACE_Node,
+ NULL,
+ &data->impl_node,
+ 0);
+
+ props = pw_properties_new(
+ SPA_KEY_LIBRARY_NAME, "v4l2/libspa-v4l2",
+ SPA_KEY_FACTORY_NAME, SPA_NAME_API_V4L2_SOURCE,
+ NULL);
+
+ data->out = pw_core_create_object(data->core,
+ "spa-node-factory",
+ PW_TYPE_INTERFACE_Node,
+ PW_VERSION_NODE,
+ &props->dict, 0);
+
+
+ while (true) {
+
+ if (pw_proxy_get_bound_id(data->out) != SPA_ID_INVALID &&
+ pw_proxy_get_bound_id(data->in) != SPA_ID_INVALID)
+ break;
+
+ pw_loop_iterate(pw_main_loop_get_loop(data->loop), -1);
+ }
+
+ pw_properties_clear(props);
+
+ pw_properties_setf(props,
+ PW_KEY_LINK_OUTPUT_NODE, "%d", pw_proxy_get_bound_id(data->out));
+ pw_properties_setf(props,
+ PW_KEY_LINK_INPUT_NODE, "%d", pw_proxy_get_bound_id(data->in));
+
+ data->link = pw_core_create_object(data->core,
+ "link-factory",
+ PW_TYPE_INTERFACE_Link,
+ PW_VERSION_LINK,
+ &props->dict, 0);
+
+ pw_properties_free(props);
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+
+ pw_init(&argc, &argv);
+
+ data.loop = pw_main_loop_new(NULL);
+ data.context = pw_context_new(
+ pw_main_loop_get_loop(data.loop),
+ pw_properties_new(
+ PW_KEY_CORE_DAEMON, "false",
+ NULL), 0);
+
+ spa_hook_list_init(&data.hooks);
+
+ pw_context_load_module(data.context, "libpipewire-module-spa-node-factory", NULL, NULL);
+ pw_context_load_module(data.context, "libpipewire-module-link-factory", NULL, NULL);
+
+ if (SDL_Init(SDL_INIT_VIDEO) < 0) {
+ printf("can't initialize SDL: %s\n", SDL_GetError());
+ return -1;
+ }
+
+ if (SDL_CreateWindowAndRenderer
+ (WIDTH, HEIGHT, SDL_WINDOW_RESIZABLE, &data.window, &data.renderer)) {
+ printf("can't create window: %s\n", SDL_GetError());
+ return -1;
+ }
+
+ data.core = pw_context_connect_self(data.context, NULL, 0);
+ if (data.core == NULL) {
+ printf("can't connect to core: %m\n");
+ return -1;
+ }
+
+ make_nodes(&data);
+
+ pw_main_loop_run(data.loop);
+
+ pw_proxy_destroy(data.link);
+ pw_proxy_destroy(data.in);
+ pw_proxy_destroy(data.out);
+ pw_context_destroy(data.context);
+ pw_main_loop_destroy(data.loop);
+ pw_deinit();
+
+ return 0;
+}
diff --git a/src/examples/meson.build b/src/examples/meson.build
new file mode 100644
index 0000000..e2f2600
--- /dev/null
+++ b/src/examples/meson.build
@@ -0,0 +1,51 @@
+# Examples, in order from simple to complicated
+examples = [
+ 'audio-src',
+ 'audio-dsp-src',
+ 'audio-dsp-filter',
+ 'audio-capture',
+ 'video-play',
+ 'video-src',
+ 'video-dsp-play',
+ 'video-play-pull',
+ 'video-play-reneg',
+ 'video-src-alloc',
+ 'video-src-reneg',
+ 'video-src-fixate',
+ 'video-play-fixate',
+ 'export-sink',
+ 'export-source',
+ 'export-spa',
+ 'export-spa-device',
+ 'bluez-session',
+ 'local-v4l2',
+]
+
+examples_extra_deps = {
+ 'video-src-fixate': [drm_dep],
+ 'video-play': [sdl_dep],
+ 'video-play-reneg': [sdl_dep],
+ 'video-play-fixate': [sdl_dep, drm_dep],
+ 'video-play-pull': [sdl_dep],
+ 'video-dsp-play': [sdl_dep],
+ 'local-v4l2': [sdl_dep],
+ 'export-sink': [sdl_dep],
+}
+
+foreach c : examples
+ deps = examples_extra_deps.get(c, [])
+
+ found = true
+ foreach dep : deps
+ found = found and dep.found()
+ endforeach
+
+ if found
+ executable(
+ c, c + '.c',
+ install : installed_tests_enabled,
+ install_dir : installed_tests_execdir / 'examples',
+ dependencies : [pipewire_dep, mathlib] + deps,
+ )
+ endif
+endforeach
diff --git a/src/examples/sdl.h b/src/examples/sdl.h
new file mode 100644
index 0000000..74ea74a
--- /dev/null
+++ b/src/examples/sdl.h
@@ -0,0 +1,198 @@
+/* PipeWire
+ *
+ * Copyright © 2018 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ SDL2 video format conversions
+ [title]
+ */
+
+#include <SDL2/SDL.h>
+
+#include <spa/utils/type.h>
+#include <spa/pod/builder.h>
+#include <spa/param/video/raw.h>
+#include <spa/param/video/format.h>
+
+static struct {
+ Uint32 format;
+ uint32_t id;
+} sdl_video_formats[] = {
+#if SDL_BYTEORDER == SDL_BIG_ENDIAN
+ { SDL_PIXELFORMAT_UNKNOWN, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_INDEX1LSB, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_UNKNOWN, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_INDEX1LSB, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_INDEX1MSB, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_INDEX4LSB, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_INDEX4MSB, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_INDEX8, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGB332, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGB444, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGB555, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_BGR555, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_ARGB4444, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGBA4444, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_ABGR4444, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_BGRA4444, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_ARGB1555, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGBA5551, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_ABGR1555, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_BGRA5551, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGB565, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_BGR565, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGB24, SPA_VIDEO_FORMAT_RGB,},
+ { SDL_PIXELFORMAT_RGB888, SPA_VIDEO_FORMAT_RGB,},
+ { SDL_PIXELFORMAT_RGBX8888, SPA_VIDEO_FORMAT_RGBx,},
+ { SDL_PIXELFORMAT_BGR24, SPA_VIDEO_FORMAT_BGR,},
+ { SDL_PIXELFORMAT_BGR888, SPA_VIDEO_FORMAT_BGR,},
+ { SDL_PIXELFORMAT_BGRX8888, SPA_VIDEO_FORMAT_BGRx,},
+ { SDL_PIXELFORMAT_ARGB2101010, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGBA8888, SPA_VIDEO_FORMAT_RGBA,},
+ { SDL_PIXELFORMAT_ARGB8888, SPA_VIDEO_FORMAT_ARGB,},
+ { SDL_PIXELFORMAT_BGRA8888, SPA_VIDEO_FORMAT_BGRA,},
+ { SDL_PIXELFORMAT_ABGR8888, SPA_VIDEO_FORMAT_ABGR,},
+ { SDL_PIXELFORMAT_YV12, SPA_VIDEO_FORMAT_YV12,},
+ { SDL_PIXELFORMAT_IYUV, SPA_VIDEO_FORMAT_I420,},
+ { SDL_PIXELFORMAT_YUY2, SPA_VIDEO_FORMAT_YUY2,},
+ { SDL_PIXELFORMAT_UYVY, SPA_VIDEO_FORMAT_UYVY,},
+ { SDL_PIXELFORMAT_YVYU, SPA_VIDEO_FORMAT_YVYU,},
+#if SDL_VERSION_ATLEAST(2,0,4)
+ { SDL_PIXELFORMAT_NV12, SPA_VIDEO_FORMAT_NV12,},
+ { SDL_PIXELFORMAT_NV21, SPA_VIDEO_FORMAT_NV21,},
+#endif
+#else
+ { SDL_PIXELFORMAT_UNKNOWN, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_INDEX1LSB, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_UNKNOWN, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_INDEX1LSB, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_INDEX1MSB, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_INDEX4LSB, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_INDEX4MSB, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_INDEX8, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGB332, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGB444, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGB555, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_BGR555, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_ARGB4444, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGBA4444, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_ABGR4444, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_BGRA4444, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_ARGB1555, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGBA5551, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_ABGR1555, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_BGRA5551, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGB565, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_BGR565, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGB24, SPA_VIDEO_FORMAT_BGR,},
+ { SDL_PIXELFORMAT_RGB888, SPA_VIDEO_FORMAT_BGR,},
+ { SDL_PIXELFORMAT_RGBX8888, SPA_VIDEO_FORMAT_xBGR,},
+ { SDL_PIXELFORMAT_BGR24, SPA_VIDEO_FORMAT_RGB,},
+ { SDL_PIXELFORMAT_BGR888, SPA_VIDEO_FORMAT_RGB,},
+ { SDL_PIXELFORMAT_BGRX8888, SPA_VIDEO_FORMAT_xRGB,},
+ { SDL_PIXELFORMAT_ARGB2101010, SPA_VIDEO_FORMAT_UNKNOWN,},
+ { SDL_PIXELFORMAT_RGBA8888, SPA_VIDEO_FORMAT_ABGR,},
+ { SDL_PIXELFORMAT_ARGB8888, SPA_VIDEO_FORMAT_BGRA,},
+ { SDL_PIXELFORMAT_BGRA8888, SPA_VIDEO_FORMAT_ARGB,},
+ { SDL_PIXELFORMAT_ABGR8888, SPA_VIDEO_FORMAT_RGBA,},
+ { SDL_PIXELFORMAT_YV12, SPA_VIDEO_FORMAT_YV12,},
+ { SDL_PIXELFORMAT_IYUV, SPA_VIDEO_FORMAT_I420,},
+ { SDL_PIXELFORMAT_YUY2, SPA_VIDEO_FORMAT_YUY2,},
+ { SDL_PIXELFORMAT_UYVY, SPA_VIDEO_FORMAT_UYVY,},
+ { SDL_PIXELFORMAT_YVYU, SPA_VIDEO_FORMAT_YVYU,},
+#if SDL_VERSION_ATLEAST(2,0,4)
+ { SDL_PIXELFORMAT_NV12, SPA_VIDEO_FORMAT_NV12,},
+ { SDL_PIXELFORMAT_NV21, SPA_VIDEO_FORMAT_NV21,},
+#endif
+#endif
+};
+
+static inline uint32_t sdl_format_to_id(Uint32 format)
+{
+ SPA_FOR_EACH_ELEMENT_VAR(sdl_video_formats, f) {
+ if (f->format == format)
+ return f->id;
+ }
+ return SPA_VIDEO_FORMAT_UNKNOWN;
+}
+
+static inline Uint32 id_to_sdl_format(uint32_t id)
+{
+ SPA_FOR_EACH_ELEMENT_VAR(sdl_video_formats, f) {
+ if (f->id == id)
+ return f->format;
+ }
+ return SDL_PIXELFORMAT_UNKNOWN;
+}
+
+static inline struct spa_pod *sdl_build_formats(SDL_RendererInfo *info, struct spa_pod_builder *b)
+{
+ uint32_t i, c;
+ struct spa_pod_frame f[2];
+
+ /* make an object of type SPA_TYPE_OBJECT_Format and id SPA_PARAM_EnumFormat.
+ * The object type is important because it defines the properties that are
+ * acceptable. The id gives more context about what the object is meant to
+ * contain. In this case we enumerate supported formats. */
+ spa_pod_builder_push_object(b, &f[0], SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat);
+ /* add media type and media subtype properties */
+ spa_pod_builder_prop(b, SPA_FORMAT_mediaType, 0);
+ spa_pod_builder_id(b, SPA_MEDIA_TYPE_video);
+ spa_pod_builder_prop(b, SPA_FORMAT_mediaSubtype, 0);
+ spa_pod_builder_id(b, SPA_MEDIA_SUBTYPE_raw);
+
+ /* build an enumeration of formats */
+ spa_pod_builder_prop(b, SPA_FORMAT_VIDEO_format, 0);
+ spa_pod_builder_push_choice(b, &f[1], SPA_CHOICE_Enum, 0);
+ /* first the formats supported by the textures */
+ for (i = 0, c = 0; i < info->num_texture_formats; i++) {
+ uint32_t id = sdl_format_to_id(info->texture_formats[i]);
+ if (id == 0)
+ continue;
+ if (c++ == 0)
+ spa_pod_builder_id(b, id);
+ spa_pod_builder_id(b, id);
+ }
+ /* then all the other ones SDL can convert from/to */
+ SPA_FOR_EACH_ELEMENT_VAR(sdl_video_formats, f) {
+ uint32_t id = f->id;
+ if (id != SPA_VIDEO_FORMAT_UNKNOWN)
+ spa_pod_builder_id(b, id);
+ }
+ spa_pod_builder_id(b, SPA_VIDEO_FORMAT_RGBA_F32);
+ spa_pod_builder_pop(b, &f[1]);
+ /* add size and framerate ranges */
+ spa_pod_builder_add(b,
+ SPA_FORMAT_VIDEO_size, SPA_POD_CHOICE_RANGE_Rectangle(
+ &SPA_RECTANGLE(WIDTH, HEIGHT),
+ &SPA_RECTANGLE(1,1),
+ &SPA_RECTANGLE(info->max_texture_width,
+ info->max_texture_height)),
+ SPA_FORMAT_VIDEO_framerate, SPA_POD_CHOICE_RANGE_Fraction(
+ &SPA_FRACTION(25,1),
+ &SPA_FRACTION(0,1),
+ &SPA_FRACTION(30,1)),
+ 0);
+ return spa_pod_builder_pop(b, &f[0]);
+}
diff --git a/src/examples/video-dsp-play.c b/src/examples/video-dsp-play.c
new file mode 100644
index 0000000..6068e2a
--- /dev/null
+++ b/src/examples/video-dsp-play.c
@@ -0,0 +1,315 @@
+/* PipeWire
+ *
+ * Copyright © 2019 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Video input stream using \ref pw_filter "pw_filter".
+ [title]
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#include <spa/param/video/format-utils.h>
+#include <spa/param/props.h>
+#include <spa/debug/format.h>
+
+#include <pipewire/pipewire.h>
+#include <pipewire/filter.h>
+
+#define WIDTH 640
+#define HEIGHT 480
+#define BPP 3
+
+#define MAX_BUFFERS 64
+
+#include "sdl.h"
+
+struct pixel {
+ float r, g, b, a;
+};
+
+struct data {
+ const char *target;
+
+ SDL_Renderer *renderer;
+ SDL_Window *window;
+ SDL_Texture *texture;
+ SDL_Texture *cursor;
+
+ struct pw_main_loop *loop;
+
+ struct pw_filter *filter;
+ struct spa_hook filter_listener;
+
+ void *in_port;
+
+ struct spa_io_position *position;
+ struct spa_video_info_dsp format;
+
+ int counter;
+ SDL_Rect rect;
+ SDL_Rect cursor_rect;
+};
+
+static void handle_events(struct data *data)
+{
+ SDL_Event event;
+ while (SDL_PollEvent(&event)) {
+ switch (event.type) {
+ case SDL_QUIT:
+ pw_main_loop_quit(data->loop);
+ break;
+ }
+ }
+}
+
+/* our data processing function is in general:
+ *
+ * struct pw_buffer *b;
+ * b = pw_filter_dequeue_buffer(port);
+ *
+ * .. do stuff with buffer ...
+ *
+ * pw_filter_queue_buffer(port, b);
+ */
+static void
+on_process(void *_data, struct spa_io_position *position)
+{
+ struct data *data = _data;
+ struct pw_buffer *b;
+ struct spa_buffer *buf;
+ void *sdata, *ddata;
+ int sstride, dstride;
+ uint32_t i, j;
+ uint8_t *src, *dst;
+
+ b = NULL;
+ while (true) {
+ struct pw_buffer *t;
+ if ((t = pw_filter_dequeue_buffer(data->in_port)) == NULL)
+ break;
+ if (b)
+ pw_filter_queue_buffer(data->in_port, b);
+ b = t;
+ }
+ if (b == NULL) {
+ pw_log_warn("out of buffers: %m");
+ return;
+ }
+
+ buf = b->buffer;
+
+ pw_log_trace("new buffer %p %dx%d", buf,
+ data->position->video.size.width, data->position->video.size.height);
+
+ handle_events(data);
+
+ if ((sdata = buf->datas[0].data) == NULL) {
+ pw_log_error("no buffer data");
+ goto done;
+ }
+
+ if (SDL_LockTexture(data->texture, NULL, &ddata, &dstride) < 0) {
+ pw_log_error("Couldn't lock texture: %s", SDL_GetError());
+ goto done;
+ }
+
+ /* copy video image in texture */
+ sstride = buf->datas[0].chunk->stride;
+ if (sstride == 0)
+ sstride = buf->datas[0].chunk->size / data->position->video.size.height;
+
+ src = sdata;
+ dst = ddata;
+
+ for (i = 0; i < data->position->video.size.height; i++) {
+ struct pixel *p = (struct pixel *) src;
+ for (j = 0; j < data->position->video.size.width; j++) {
+ dst[j * 4 + 0] = SPA_CLAMP(p[j].r * 255.0f, 0, 255);
+ dst[j * 4 + 1] = SPA_CLAMP(p[j].g * 255.0f, 0, 255);
+ dst[j * 4 + 2] = SPA_CLAMP(p[j].b * 255.0f, 0, 255);
+ dst[j * 4 + 3] = SPA_CLAMP(p[j].a * 255.0f, 0, 255);
+ }
+ src += sstride;
+ dst += dstride;
+ }
+ SDL_UnlockTexture(data->texture);
+
+ SDL_RenderClear(data->renderer);
+ SDL_RenderCopy(data->renderer, data->texture, &data->rect, NULL);
+ SDL_RenderPresent(data->renderer);
+
+ done:
+ pw_filter_queue_buffer(data->in_port, b);
+}
+
+static void on_filter_state_changed(void *_data, enum pw_filter_state old,
+ enum pw_filter_state state, const char *error)
+{
+ struct data *data = _data;
+ fprintf(stderr, "filter state: \"%s\"\n", pw_filter_state_as_string(state));
+ switch (state) {
+ case PW_FILTER_STATE_UNCONNECTED:
+ pw_main_loop_quit(data->loop);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+on_filter_io_changed(void *_data, void *port_data, uint32_t id, void *area, uint32_t size)
+{
+ struct data *data = _data;
+
+ switch (id) {
+ case SPA_IO_Position:
+ data->position = area;
+ break;
+ }
+}
+
+static void
+on_filter_param_changed(void *_data, void *port_data, uint32_t id, const struct spa_pod *param)
+{
+ struct data *data = _data;
+ struct pw_filter *filter = data->filter;
+
+ /* NULL means to clear the format */
+ if (param == NULL || id != SPA_PARAM_Format)
+ return;
+
+ /* call a helper function to parse the format for us. */
+ spa_format_video_dsp_parse(param, &data->format);
+
+ if (data->format.format != SPA_VIDEO_FORMAT_RGBA_F32) {
+ pw_filter_set_error(filter, -EINVAL, "unknown format");
+ return;
+ }
+
+ data->texture = SDL_CreateTexture(data->renderer,
+ SDL_PIXELFORMAT_RGBA32,
+ SDL_TEXTUREACCESS_STREAMING,
+ data->position->video.size.width,
+ data->position->video.size.height);
+ if (data->texture == NULL) {
+ pw_filter_set_error(filter, -errno, "can't create texture");
+ return;
+ }
+
+ data->rect.x = 0;
+ data->rect.y = 0;
+ data->rect.w = data->position->video.size.width;
+ data->rect.h = data->position->video.size.height;
+}
+
+/* these are the filter events we listen for */
+static const struct pw_filter_events filter_events = {
+ PW_VERSION_FILTER_EVENTS,
+ .state_changed = on_filter_state_changed,
+ .io_changed = on_filter_io_changed,
+ .param_changed = on_filter_param_changed,
+ .process = on_process,
+};
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+
+ pw_init(&argc, &argv);
+
+ /* create a main loop */
+ data.loop = pw_main_loop_new(NULL);
+
+ data.target = argc > 1 ? argv[1] : NULL;
+
+ /* create a simple filter, the simple filter manages to core and remote
+ * objects for you if you don't need to deal with them
+ *
+ * If you plan to autoconnect your filter, you need to provide at least
+ * media, category and role properties
+ *
+ * Pass your events and a user_data pointer as the last arguments. This
+ * will inform you about the filter state. The most important event
+ * you need to listen to is the process event where you need to consume
+ * the data provided to you.
+ */
+ data.filter = pw_filter_new_simple(
+ pw_main_loop_get_loop(data.loop),
+ "video-dsp-play",
+ pw_properties_new(
+ PW_KEY_MEDIA_TYPE, "Video",
+ PW_KEY_MEDIA_CATEGORY, "Capture",
+ PW_KEY_MEDIA_ROLE, "DSP",
+ PW_KEY_NODE_AUTOCONNECT, data.target ? "true" : "false",
+ PW_KEY_TARGET_OBJECT, data.target,
+ PW_KEY_MEDIA_CLASS, "Stream/Input/Video",
+ NULL),
+ &filter_events,
+ &data);
+
+
+ if (SDL_Init(SDL_INIT_VIDEO) < 0) {
+ fprintf(stderr, "can't initialize SDL: %s\n", SDL_GetError());
+ return -1;
+ }
+
+ if (SDL_CreateWindowAndRenderer
+ (WIDTH, HEIGHT, SDL_WINDOW_RESIZABLE, &data.window, &data.renderer)) {
+ fprintf(stderr, "can't create window: %s\n", SDL_GetError());
+ return -1;
+ }
+
+ /* Make a new DSP port. This will automatically set up the right
+ * parameters for the port */
+ data.in_port = pw_filter_add_port(data.filter,
+ PW_DIRECTION_INPUT,
+ PW_FILTER_PORT_FLAG_MAP_BUFFERS,
+ 0,
+ pw_properties_new(
+ PW_KEY_FORMAT_DSP, "32 bit float RGBA video",
+ PW_KEY_PORT_NAME, "input",
+ NULL),
+ NULL, 0);
+
+ pw_filter_connect(data.filter,
+ 0, /* no flags */
+ NULL, 0);
+
+ /* do things until we quit the mainloop */
+ pw_main_loop_run(data.loop);
+
+ pw_filter_destroy(data.filter);
+ pw_main_loop_destroy(data.loop);
+
+ SDL_DestroyTexture(data.texture);
+ if (data.cursor)
+ SDL_DestroyTexture(data.cursor);
+ SDL_DestroyRenderer(data.renderer);
+ SDL_DestroyWindow(data.window);
+
+ return 0;
+}
diff --git a/src/examples/video-play-fixate.c b/src/examples/video-play-fixate.c
new file mode 100644
index 0000000..7477c5a
--- /dev/null
+++ b/src/examples/video-play-fixate.c
@@ -0,0 +1,516 @@
+/* PipeWire
+ *
+ * Copyright © 2020 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Video input stream using \ref pw_stream "pw_stream", with format fixation.
+ [title]
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <libdrm/drm_fourcc.h>
+
+#include <spa/utils/result.h>
+#include <spa/param/video/format-utils.h>
+#include <spa/param/props.h>
+#include <spa/debug/format.h>
+
+#include <pipewire/pipewire.h>
+
+#define WIDTH 640
+#define HEIGHT 480
+
+#define MAX_BUFFERS 64
+#define MAX_MOD 8
+
+#include "sdl.h"
+
+struct pixel {
+ float r, g, b, a;
+};
+
+struct pw_version {
+ int major;
+ int minor;
+ int micro;
+};
+
+struct modifier_info {
+ uint32_t spa_format;
+ uint32_t n_modifiers;
+ uint64_t modifiers[MAX_MOD];
+};
+
+struct data {
+ const char *path;
+
+ SDL_Renderer *renderer;
+ SDL_Window *window;
+ SDL_Texture *texture;
+ SDL_Texture *cursor;
+
+ struct pw_main_loop *loop;
+ struct spa_source *reneg;
+
+ struct pw_stream *stream;
+ struct spa_hook stream_listener;
+
+ struct spa_video_info format;
+ int32_t stride;
+ struct spa_rectangle size;
+
+ uint32_t n_mod_info;
+ struct modifier_info mod_info[2];
+
+ int counter;
+};
+
+static struct pw_version parse_pw_version(const char* version) {
+ struct pw_version pw_version;
+ sscanf(version, "%d.%d.%d", &pw_version.major, &pw_version.minor,
+ &pw_version.micro);
+ return pw_version;
+}
+
+static bool has_pw_version(int major, int minor, int micro) {
+ struct pw_version pw_version = parse_pw_version(pw_get_library_version());
+ printf("PW Version: %d.%d.%d\n", pw_version.major, pw_version.minor,
+ pw_version.micro);
+ return major <= pw_version.major && minor <= pw_version.minor && micro <= pw_version.micro;
+}
+
+static void init_modifiers(struct data *data)
+{
+ data->n_mod_info = 1;
+ data->mod_info[0].spa_format = SPA_VIDEO_FORMAT_RGB;
+ data->mod_info[0].n_modifiers = 2;
+ data->mod_info[0].modifiers[0] = DRM_FORMAT_MOD_LINEAR;
+ data->mod_info[0].modifiers[1] = DRM_FORMAT_MOD_INVALID;
+}
+
+static void destroy_modifiers(struct data *data)
+{
+ data->mod_info[0].n_modifiers = 0;
+}
+
+static void strip_modifier(struct data *data, uint32_t spa_format, uint64_t modifier)
+{
+ if (data->mod_info[0].spa_format != spa_format)
+ return;
+ struct modifier_info *mod_info = &data->mod_info[0];
+ uint32_t counter = 0;
+ // Dropping of single modifiers is only supported on PipeWire 0.3.40 and newer.
+ // On older PipeWire just dropping all modifiers might work on Versions newer then 0.3.33/35
+ if (has_pw_version(0,3,40)) {
+ printf("Dropping a single modifier\n");
+ for (uint32_t i = 0; i < mod_info->n_modifiers; i++) {
+ if (mod_info->modifiers[i] == modifier)
+ continue;
+ mod_info->modifiers[counter++] = mod_info->modifiers[i];
+ }
+ } else {
+ printf("Dropping all modifiers\n");
+ counter = 0;
+ }
+ mod_info->n_modifiers = counter;
+}
+
+static void handle_events(struct data *data)
+{
+ SDL_Event event;
+ while (SDL_PollEvent(&event)) {
+ switch (event.type) {
+ case SDL_QUIT:
+ pw_main_loop_quit(data->loop);
+ break;
+ }
+ }
+}
+
+static struct spa_pod *build_format(struct spa_pod_builder *b, SDL_RendererInfo *info, enum spa_video_format format,
+ uint64_t *modifiers, int modifier_count)
+{
+ struct spa_pod_frame f[2];
+ int i, c;
+
+ spa_pod_builder_push_object(b, &f[0], SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat);
+ spa_pod_builder_add(b, SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video), 0);
+ spa_pod_builder_add(b, SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw), 0);
+ /* format */
+ spa_pod_builder_add(b, SPA_FORMAT_VIDEO_format, SPA_POD_Id(format), 0);
+ /* modifiers */
+ if (modifier_count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID) {
+ // we only support implicit modifiers, use shortpath to skip fixation phase
+ spa_pod_builder_prop(b, SPA_FORMAT_VIDEO_modifier, SPA_POD_PROP_FLAG_MANDATORY);
+ spa_pod_builder_long(b, modifiers[0]);
+ } else if (modifier_count > 0) {
+ // build an enumeration of modifiers
+ spa_pod_builder_prop(b, SPA_FORMAT_VIDEO_modifier, SPA_POD_PROP_FLAG_MANDATORY | SPA_POD_PROP_FLAG_DONT_FIXATE);
+ spa_pod_builder_push_choice(b, &f[1], SPA_CHOICE_Enum, 0);
+ // modifiers from the array
+ for (i = 0, c = 0; i < modifier_count; i++) {
+ spa_pod_builder_long(b, modifiers[i]);
+ if (c++ == 0)
+ spa_pod_builder_long(b, modifiers[i]);
+ }
+ spa_pod_builder_pop(b, &f[1]);
+ }
+ spa_pod_builder_add(b, SPA_FORMAT_VIDEO_size,
+ SPA_POD_CHOICE_RANGE_Rectangle(
+ &SPA_RECTANGLE(WIDTH, HEIGHT),
+ &SPA_RECTANGLE(1,1),
+ &SPA_RECTANGLE(info->max_texture_width,
+ info->max_texture_height)),
+ 0);
+ spa_pod_builder_add(b, SPA_FORMAT_VIDEO_framerate,
+ SPA_POD_CHOICE_RANGE_Fraction(
+ &SPA_FRACTION(25,1),
+ &SPA_FRACTION(0,1),
+ &SPA_FRACTION(30,1)),
+ 0);
+ return spa_pod_builder_pop(b, &f[0]);
+}
+
+/* our data processing function is in general:
+ *
+ * struct pw_buffer *b;
+ * b = pw_stream_dequeue_buffer(stream);
+ *
+ * .. do stuff with buffer ...
+ *
+ * pw_stream_queue_buffer(stream, b);
+ */
+static void
+on_process(void *_data)
+{
+ struct data *data = _data;
+ struct pw_stream *stream = data->stream;
+ struct pw_buffer *b;
+ struct spa_buffer *buf;
+ void *sdata, *ddata;
+ int sstride, dstride, ostride;
+ uint32_t i;
+ uint8_t *src, *dst;
+
+ b = NULL;
+ /* dequeue and queue old buffers, use the last available
+ * buffer */
+ while (true) {
+ struct pw_buffer *t;
+ if ((t = pw_stream_dequeue_buffer(stream)) == NULL)
+ break;
+ if (b)
+ pw_stream_queue_buffer(stream, b);
+ b = t;
+ }
+ if (b == NULL) {
+ pw_log_warn("out of buffers: %m");
+ return;
+ }
+
+ buf = b->buffer;
+
+ pw_log_info("new buffer %p", buf);
+
+ handle_events(data);
+
+ if (buf->datas[0].type == SPA_DATA_DmaBuf) {
+ // Simulate a failed import of a DmaBuf
+ // We should try another modifier
+ printf("Failed to import dmabuf, stripping modifier %"PRIu64"\n", data->format.info.raw.modifier);
+ strip_modifier(data, data->format.info.raw.format, data->format.info.raw.modifier);
+ pw_loop_signal_event(pw_main_loop_get_loop(data->loop), data->reneg);
+ goto done;
+ }
+
+ if ((sdata = buf->datas[0].data) == NULL)
+ goto done;
+
+ if (SDL_LockTexture(data->texture, NULL, &ddata, &dstride) < 0) {
+ fprintf(stderr, "Couldn't lock texture: %s\n", SDL_GetError());
+ goto done;
+ }
+
+ /* copy video image in texture */
+ sstride = buf->datas[0].chunk->stride;
+ if (sstride == 0)
+ sstride = buf->datas[0].chunk->size / data->size.height;
+ ostride = SPA_MIN(sstride, dstride);
+
+ src = sdata;
+ dst = ddata;
+
+ for (i = 0; i < data->size.height; i++) {
+ memcpy(dst, src, ostride);
+ src += sstride;
+ dst += dstride;
+ }
+ SDL_UnlockTexture(data->texture);
+
+ SDL_RenderClear(data->renderer);
+ /* now render the video */
+ SDL_RenderCopy(data->renderer, data->texture, NULL, NULL);
+ SDL_RenderPresent(data->renderer);
+
+ done:
+ pw_stream_queue_buffer(stream, b);
+}
+
+static void on_stream_state_changed(void *_data, enum pw_stream_state old,
+ enum pw_stream_state state, const char *error)
+{
+ struct data *data = _data;
+ fprintf(stderr, "stream state: \"%s\"\n", pw_stream_state_as_string(state));
+ switch (state) {
+ case PW_STREAM_STATE_UNCONNECTED:
+ pw_main_loop_quit(data->loop);
+ break;
+ case PW_STREAM_STATE_PAUSED:
+ break;
+ case PW_STREAM_STATE_STREAMING:
+ default:
+ break;
+ }
+}
+
+/* Be notified when the stream param changes. We're only looking at the
+ * format changes.
+ *
+ * We are now supposed to call pw_stream_finish_format() with success or
+ * failure, depending on if we can support the format. Because we gave
+ * a list of supported formats, this should be ok.
+ *
+ * As part of pw_stream_finish_format() we can provide parameters that
+ * will control the buffer memory allocation. This includes the metadata
+ * that we would like on our buffer, the size, alignment, etc.
+ */
+static void
+on_stream_param_changed(void *_data, uint32_t id, const struct spa_pod *param)
+{
+ struct data *data = _data;
+ struct pw_stream *stream = data->stream;
+ uint8_t params_buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
+ const struct spa_pod *params[1];
+ Uint32 sdl_format;
+ void *d;
+
+ /* NULL means to clear the format */
+ if (param == NULL || id != SPA_PARAM_Format)
+ return;
+
+ fprintf(stderr, "got format:\n");
+ spa_debug_format(2, NULL, param);
+
+ if (spa_format_parse(param, &data->format.media_type, &data->format.media_subtype) < 0)
+ return;
+
+ if (data->format.media_type != SPA_MEDIA_TYPE_video ||
+ data->format.media_subtype != SPA_MEDIA_SUBTYPE_raw)
+ return;
+
+ /* call a helper function to parse the format for us. */
+ spa_format_video_raw_parse(param, &data->format.info.raw);
+ sdl_format = id_to_sdl_format(data->format.info.raw.format);
+ data->size = data->format.info.raw.size;
+
+ if (sdl_format == SDL_PIXELFORMAT_UNKNOWN) {
+ pw_stream_set_error(stream, -EINVAL, "unknown pixel format");
+ return;
+ }
+ if (data->size.width == 0 || data->size.height == 0) {
+ pw_stream_set_error(stream, -EINVAL, "invalid size");
+ return;
+ }
+
+ data->texture = SDL_CreateTexture(data->renderer,
+ sdl_format,
+ SDL_TEXTUREACCESS_STREAMING,
+ data->size.width,
+ data->size.height);
+ SDL_LockTexture(data->texture, NULL, &d, &data->stride);
+ SDL_UnlockTexture(data->texture);
+
+ /* a SPA_TYPE_OBJECT_ParamBuffers object defines the acceptable size,
+ * number, stride etc of the buffers */
+ params[0] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
+ SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS),
+ SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1),
+ SPA_PARAM_BUFFERS_size, SPA_POD_Int(data->stride * data->size.height),
+ SPA_PARAM_BUFFERS_stride, SPA_POD_Int(data->stride),
+ SPA_PARAM_BUFFERS_dataType, SPA_POD_CHOICE_FLAGS_Int((1<<SPA_DATA_MemPtr) | (1<<SPA_DATA_DmaBuf)));
+
+ /* we are done */
+ pw_stream_update_params(stream, params, 1);
+}
+
+/* these are the stream events we listen for */
+static const struct pw_stream_events stream_events = {
+ PW_VERSION_STREAM_EVENTS,
+ .state_changed = on_stream_state_changed,
+ .param_changed = on_stream_param_changed,
+ .process = on_process,
+};
+
+static int build_formats(struct data *data, struct spa_pod_builder *b, const struct spa_pod **params)
+{
+ SDL_RendererInfo info;
+ int n_params = 0;
+
+ SDL_GetRendererInfo(data->renderer, &info);
+
+ if (data->mod_info[0].n_modifiers > 0) {
+ params[n_params++] = build_format(b, &info, SPA_VIDEO_FORMAT_RGB, data->mod_info[0].modifiers, data->mod_info[0].n_modifiers);
+ }
+ params[n_params++] = build_format(b, &info, SPA_VIDEO_FORMAT_RGB, NULL, 0);
+
+ for (int i=0; i < n_params; i++) {
+ spa_debug_format(2, NULL, params[i]);
+ }
+
+ return n_params;
+}
+
+static void reneg_format(void *_data, uint64_t expiration)
+{
+ struct data *data = (struct data*) _data;
+ uint8_t buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+ const struct spa_pod *params[2];
+ uint32_t n_params;
+
+ if (data->format.info.raw.format == 0)
+ return;
+
+ fprintf(stderr, "renegotiate formats:\n");
+ n_params = build_formats(data, &b, params);
+
+ pw_stream_update_params(data->stream, params, n_params);
+}
+
+static void do_quit(void *userdata, int signal_number)
+{
+ struct data *data = userdata;
+ pw_main_loop_quit(data->loop);
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+ const struct spa_pod *params[2];
+ uint8_t buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+ struct pw_properties *props;
+ int res, n_params;
+
+ pw_init(&argc, &argv);
+
+ /* create a main loop */
+ data.loop = pw_main_loop_new(NULL);
+
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGINT, do_quit, &data);
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGTERM, do_quit, &data);
+
+ /* create a simple stream, the simple stream manages to core and remote
+ * objects for you if you don't need to deal with them
+ *
+ * If you plan to autoconnect your stream, you need to provide at least
+ * media, category and role properties
+ *
+ * Pass your events and a user_data pointer as the last arguments. This
+ * will inform you about the stream state. The most important event
+ * you need to listen to is the process event where you need to consume
+ * the data provided to you.
+ */
+ props = pw_properties_new(PW_KEY_MEDIA_TYPE, "Video",
+ PW_KEY_MEDIA_CATEGORY, "Capture",
+ PW_KEY_MEDIA_ROLE, "Camera",
+ NULL),
+ data.path = argc > 1 ? argv[1] : NULL;
+ if (data.path)
+ /* Set stream target if given on command line */
+ pw_properties_set(props, PW_KEY_TARGET_OBJECT, data.path);
+
+ data.stream = pw_stream_new_simple(
+ pw_main_loop_get_loop(data.loop),
+ "video-play-fixate",
+ props,
+ &stream_events,
+ &data);
+
+ if (SDL_Init(SDL_INIT_VIDEO) < 0) {
+ fprintf(stderr, "can't initialize SDL: %s\n", SDL_GetError());
+ return -1;
+ }
+
+ init_modifiers(&data);
+
+ if (SDL_CreateWindowAndRenderer
+ (WIDTH, HEIGHT, SDL_WINDOW_RESIZABLE, &data.window, &data.renderer)) {
+ fprintf(stderr, "can't create window: %s\n", SDL_GetError());
+ return -1;
+ }
+
+ /* build the extra parameters to connect with. To connect, we can provide
+ * a list of supported formats. We use a builder that writes the param
+ * object to the stack. */
+ printf("supported formats:\n");
+ n_params = build_formats(&data, &b, params);
+
+ /* now connect the stream, we need a direction (input/output),
+ * an optional target node to connect to, some flags and parameters
+ */
+ if ((res = pw_stream_connect(data.stream,
+ PW_DIRECTION_INPUT,
+ PW_ID_ANY,
+ PW_STREAM_FLAG_AUTOCONNECT | /* try to automatically connect this stream */
+ PW_STREAM_FLAG_MAP_BUFFERS, /* mmap the buffer data for us */
+ params, n_params)) /* extra parameters, see above */ < 0) {
+ fprintf(stderr, "can't connect: %s\n", spa_strerror(res));
+ return -1;
+ }
+
+ data.reneg = pw_loop_add_event(pw_main_loop_get_loop(data.loop), reneg_format, &data);
+
+ /* do things until we quit the mainloop */
+ pw_main_loop_run(data.loop);
+
+ pw_stream_destroy(data.stream);
+ pw_main_loop_destroy(data.loop);
+
+ destroy_modifiers(&data);
+
+ SDL_DestroyTexture(data.texture);
+ if (data.cursor)
+ SDL_DestroyTexture(data.cursor);
+ SDL_DestroyRenderer(data.renderer);
+ SDL_DestroyWindow(data.window);
+ pw_deinit();
+
+ return 0;
+}
diff --git a/src/examples/video-play-pull.c b/src/examples/video-play-pull.c
new file mode 100644
index 0000000..fd0e305
--- /dev/null
+++ b/src/examples/video-play-pull.c
@@ -0,0 +1,588 @@
+/* PipeWire
+ *
+ * Copyright © 2018 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Video input stream using \ref pw_stream_trigger_process, for pull mode.
+ [title]
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include <spa/utils/result.h>
+#include <spa/param/video/format-utils.h>
+#include <spa/param/props.h>
+#include <spa/debug/format.h>
+
+#include <pipewire/pipewire.h>
+
+#define WIDTH 640
+#define HEIGHT 480
+
+#define MAX_BUFFERS 64
+
+#include "sdl.h"
+
+struct pixel {
+ float r, g, b, a;
+};
+
+struct data {
+ const char *path;
+
+ SDL_Renderer *renderer;
+ SDL_Window *window;
+ SDL_Texture *texture;
+ SDL_Texture *cursor;
+
+ struct pw_main_loop *loop;
+ struct spa_source *timer;
+
+ struct pw_stream *stream;
+ struct spa_hook stream_listener;
+
+ struct spa_io_position *position;
+
+ struct spa_video_info format;
+ int32_t stride;
+ struct spa_rectangle size;
+
+ int counter;
+ SDL_Rect rect;
+ SDL_Rect cursor_rect;
+ bool is_yuv;
+ bool have_request_process;
+};
+
+static void handle_events(struct data *data)
+{
+ SDL_Event event;
+ while (SDL_PollEvent(&event)) {
+ switch (event.type) {
+ case SDL_QUIT:
+ pw_main_loop_quit(data->loop);
+ break;
+ }
+ }
+}
+
+/* our data processing function is in general:
+ *
+ * struct pw_buffer *b;
+ * b = pw_stream_dequeue_buffer(stream);
+ *
+ * .. do stuff with buffer ...
+ *
+ * pw_stream_queue_buffer(stream, b);
+ */
+static void
+on_process(void *_data)
+{
+ struct data *data = _data;
+ struct pw_stream *stream = data->stream;
+ struct pw_buffer *b;
+ struct spa_buffer *buf;
+ void *sdata, *ddata;
+ int sstride, dstride, ostride;
+ struct spa_meta_region *mc;
+ struct spa_meta_cursor *mcs;
+ uint32_t i, j;
+ uint8_t *src, *dst;
+ bool render_cursor = false;
+
+ b = NULL;
+ while (true) {
+ struct pw_buffer *t;
+ if ((t = pw_stream_dequeue_buffer(stream)) == NULL)
+ break;
+ if (b)
+ pw_stream_queue_buffer(stream, b);
+ b = t;
+ }
+ if (b == NULL) {
+ pw_log_warn("out of buffers: %m");
+ return;
+ }
+
+ buf = b->buffer;
+
+ pw_log_trace("new buffer %p", buf);
+
+ handle_events(data);
+
+ if ((sdata = buf->datas[0].data) == NULL)
+ goto done;
+
+ /* get the videocrop metadata if any */
+ if ((mc = spa_buffer_find_meta_data(buf, SPA_META_VideoCrop, sizeof(*mc))) &&
+ spa_meta_region_is_valid(mc)) {
+ data->rect.x = mc->region.position.x;
+ data->rect.y = mc->region.position.y;
+ data->rect.w = mc->region.size.width;
+ data->rect.h = mc->region.size.height;
+ }
+ /* get cursor metadata */
+ if ((mcs = spa_buffer_find_meta_data(buf, SPA_META_Cursor, sizeof(*mcs))) &&
+ spa_meta_cursor_is_valid(mcs)) {
+ struct spa_meta_bitmap *mb;
+ void *cdata;
+ int cstride;
+
+ data->cursor_rect.x = mcs->position.x;
+ data->cursor_rect.y = mcs->position.y;
+
+ mb = SPA_PTROFF(mcs, mcs->bitmap_offset, struct spa_meta_bitmap);
+ data->cursor_rect.w = mb->size.width;
+ data->cursor_rect.h = mb->size.height;
+
+ if (data->cursor == NULL) {
+ data->cursor = SDL_CreateTexture(data->renderer,
+ id_to_sdl_format(mb->format),
+ SDL_TEXTUREACCESS_STREAMING,
+ mb->size.width, mb->size.height);
+ SDL_SetTextureBlendMode(data->cursor, SDL_BLENDMODE_BLEND);
+ }
+
+
+ if (SDL_LockTexture(data->cursor, NULL, &cdata, &cstride) < 0) {
+ fprintf(stderr, "Couldn't lock cursor texture: %s\n", SDL_GetError());
+ goto done;
+ }
+
+ /* copy the cursor bitmap into the texture */
+ src = SPA_PTROFF(mb, mb->offset, uint8_t);
+ dst = cdata;
+ ostride = SPA_MIN(cstride, mb->stride);
+
+ for (i = 0; i < mb->size.height; i++) {
+ memcpy(dst, src, ostride);
+ dst += cstride;
+ src += mb->stride;
+ }
+ SDL_UnlockTexture(data->cursor);
+
+ render_cursor = true;
+ }
+
+ /* copy video image in texture */
+ if (data->is_yuv) {
+ sstride = data->stride;
+ SDL_UpdateYUVTexture(data->texture,
+ NULL,
+ sdata,
+ sstride,
+ SPA_PTROFF(sdata, sstride * data->size.height, void),
+ sstride / 2,
+ SPA_PTROFF(sdata, 5 * (sstride * data->size.height) / 4, void),
+ sstride / 2);
+ }
+ else {
+ if (SDL_LockTexture(data->texture, NULL, &ddata, &dstride) < 0) {
+ fprintf(stderr, "Couldn't lock texture: %s\n", SDL_GetError());
+ goto done;
+ }
+
+ sstride = buf->datas[0].chunk->stride;
+ if (sstride == 0)
+ sstride = buf->datas[0].chunk->size / data->size.height;
+ ostride = SPA_MIN(sstride, dstride);
+
+ src = sdata;
+ dst = ddata;
+
+ if (data->format.media_subtype == SPA_MEDIA_SUBTYPE_dsp) {
+ for (i = 0; i < data->size.height; i++) {
+ struct pixel *p = (struct pixel *) src;
+ for (j = 0; j < data->size.width; j++) {
+ dst[j * 4 + 0] = SPA_CLAMP(p[j].r * 255.0f, 0, 255);
+ dst[j * 4 + 1] = SPA_CLAMP(p[j].g * 255.0f, 0, 255);
+ dst[j * 4 + 2] = SPA_CLAMP(p[j].b * 255.0f, 0, 255);
+ dst[j * 4 + 3] = SPA_CLAMP(p[j].a * 255.0f, 0, 255);
+ }
+ src += sstride;
+ dst += dstride;
+ }
+ } else {
+ for (i = 0; i < data->size.height; i++) {
+ memcpy(dst, src, ostride);
+ src += sstride;
+ dst += dstride;
+ }
+ }
+ SDL_UnlockTexture(data->texture);
+ }
+
+ SDL_RenderClear(data->renderer);
+ /* now render the video and then the cursor if any */
+ SDL_RenderCopy(data->renderer, data->texture, &data->rect, NULL);
+ if (render_cursor) {
+ SDL_RenderCopy(data->renderer, data->cursor, NULL, &data->cursor_rect);
+ }
+ SDL_RenderPresent(data->renderer);
+
+ done:
+ pw_stream_queue_buffer(stream, b);
+}
+
+static void enable_timeouts(struct data *data, bool enabled)
+{
+ struct timespec timeout, interval, *to, *iv;
+
+ if (!enabled || data->have_request_process) {
+ to = iv = NULL;
+ } else {
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = 1;
+ interval.tv_sec = 0;
+ interval.tv_nsec = 80 * SPA_NSEC_PER_MSEC;
+ to = &timeout;
+ iv = &interval;
+ }
+ pw_loop_update_timer(pw_main_loop_get_loop(data->loop),
+ data->timer, to, iv, false);
+}
+
+
+static void on_stream_state_changed(void *_data, enum pw_stream_state old,
+ enum pw_stream_state state, const char *error)
+{
+ struct data *data = _data;
+ fprintf(stderr, "stream state: \"%s\"\n", pw_stream_state_as_string(state));
+ switch (state) {
+ case PW_STREAM_STATE_UNCONNECTED:
+ pw_main_loop_quit(data->loop);
+ break;
+ case PW_STREAM_STATE_PAUSED:
+ enable_timeouts(data, false);
+ break;
+ case PW_STREAM_STATE_STREAMING:
+ enable_timeouts(data, true);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+on_stream_io_changed(void *_data, uint32_t id, void *area, uint32_t size)
+{
+ struct data *data = _data;
+
+ switch (id) {
+ case SPA_IO_Position:
+ data->position = area;
+ break;
+ }
+}
+
+static void
+on_trigger_done(void *_data)
+{
+ struct data *data = _data;
+ pw_log_trace("%p trigger done", data);
+}
+
+static void on_timeout(void *userdata, uint64_t expirations)
+{
+ struct data *data = userdata;
+ if (!data->have_request_process)
+ pw_stream_trigger_process(data->stream);
+}
+
+static void
+on_command(void *_data, const struct spa_command *command)
+{
+ struct data *data = _data;
+ switch (SPA_NODE_COMMAND_ID(command)) {
+ case SPA_NODE_COMMAND_RequestProcess:
+ data->have_request_process = true;
+ enable_timeouts(data, false);
+ pw_stream_trigger_process(data->stream);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Be notified when the stream param changes. We're only looking at the
+ * format changes.
+ *
+ * We are now supposed to call pw_stream_finish_format() with success or
+ * failure, depending on if we can support the format. Because we gave
+ * a list of supported formats, this should be ok.
+ *
+ * As part of pw_stream_finish_format() we can provide parameters that
+ * will control the buffer memory allocation. This includes the metadata
+ * that we would like on our buffer, the size, alignment, etc.
+ */
+static void
+on_stream_param_changed(void *_data, uint32_t id, const struct spa_pod *param)
+{
+ struct data *data = _data;
+ struct pw_stream *stream = data->stream;
+ uint8_t params_buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
+ const struct spa_pod *params[5];
+ Uint32 sdl_format;
+ void *d;
+ int32_t mult, size;
+
+ /* NULL means to clear the format */
+ if (param == NULL || id != SPA_PARAM_Format)
+ return;
+
+ fprintf(stderr, "got format:\n");
+ spa_debug_format(2, NULL, param);
+
+ if (spa_format_parse(param, &data->format.media_type, &data->format.media_subtype) < 0)
+ return;
+
+ if (data->format.media_type != SPA_MEDIA_TYPE_video)
+ return;
+
+ switch (data->format.media_subtype) {
+ case SPA_MEDIA_SUBTYPE_raw:
+ /* call a helper function to parse the format for us. */
+ spa_format_video_raw_parse(param, &data->format.info.raw);
+ sdl_format = id_to_sdl_format(data->format.info.raw.format);
+ data->size = SPA_RECTANGLE(data->format.info.raw.size.width,
+ data->format.info.raw.size.height);
+ mult = 1;
+ break;
+ case SPA_MEDIA_SUBTYPE_dsp:
+ spa_format_video_dsp_parse(param, &data->format.info.dsp);
+ if (data->format.info.dsp.format != SPA_VIDEO_FORMAT_DSP_F32)
+ return;
+ sdl_format = SDL_PIXELFORMAT_RGBA32;
+ data->size = SPA_RECTANGLE(data->position->video.size.width,
+ data->position->video.size.height);
+ mult = 4;
+ break;
+ default:
+ sdl_format = SDL_PIXELFORMAT_UNKNOWN;
+ break;
+ }
+
+ if (sdl_format == SDL_PIXELFORMAT_UNKNOWN) {
+ pw_stream_set_error(stream, -EINVAL, "unknown pixel format");
+ return;
+ }
+ if (data->size.width == 0 || data->size.height == 0) {
+ pw_stream_set_error(stream, -EINVAL, "invalid size");
+ return;
+ }
+
+ data->texture = SDL_CreateTexture(data->renderer,
+ sdl_format,
+ SDL_TEXTUREACCESS_STREAMING,
+ data->size.width,
+ data->size.height);
+ SDL_LockTexture(data->texture, NULL, &d, &data->stride);
+ SDL_UnlockTexture(data->texture);
+
+ switch(sdl_format) {
+ case SDL_PIXELFORMAT_YV12:
+ case SDL_PIXELFORMAT_IYUV:
+ size = (data->stride * data->size.height) * 3 / 2;
+ data->is_yuv = true;
+ break;
+ default:
+ size = data->stride * data->size.height;
+ break;
+ }
+
+ data->rect.x = 0;
+ data->rect.y = 0;
+ data->rect.w = data->size.width;
+ data->rect.h = data->size.height;
+
+ /* a SPA_TYPE_OBJECT_ParamBuffers object defines the acceptable size,
+ * number, stride etc of the buffers */
+ params[0] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
+ SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS),
+ SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1),
+ SPA_PARAM_BUFFERS_size, SPA_POD_Int(size * mult),
+ SPA_PARAM_BUFFERS_stride, SPA_POD_Int(data->stride * mult),
+ SPA_PARAM_BUFFERS_dataType, SPA_POD_CHOICE_FLAGS_Int((1<<SPA_DATA_MemPtr)));
+
+ /* a header metadata with timing information */
+ params[1] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Header),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header)));
+ /* video cropping information */
+ params[2] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoCrop),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_region)));
+#define CURSOR_META_SIZE(w,h) (sizeof(struct spa_meta_cursor) + \
+ sizeof(struct spa_meta_bitmap) + w * h * 4)
+ /* cursor information */
+ params[3] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Cursor),
+ SPA_PARAM_META_size, SPA_POD_CHOICE_RANGE_Int(
+ CURSOR_META_SIZE(64,64),
+ CURSOR_META_SIZE(1,1),
+ CURSOR_META_SIZE(256,256)));
+
+ /* we are done */
+ pw_stream_update_params(stream, params, 4);
+}
+
+/* these are the stream events we listen for */
+static const struct pw_stream_events stream_events = {
+ PW_VERSION_STREAM_EVENTS,
+ .state_changed = on_stream_state_changed,
+ .io_changed = on_stream_io_changed,
+ .param_changed = on_stream_param_changed,
+ .process = on_process,
+ .trigger_done = on_trigger_done,
+ .command = on_command,
+};
+
+static int build_format(struct data *data, struct spa_pod_builder *b, const struct spa_pod **params)
+{
+ SDL_RendererInfo info;
+
+ SDL_GetRendererInfo(data->renderer, &info);
+ params[0] = sdl_build_formats(&info, b);
+
+ fprintf(stderr, "supported SDL formats:\n");
+ spa_debug_format(2, NULL, params[0]);
+
+ params[1] = spa_pod_builder_add_object(b,
+ SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat,
+ SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video),
+ SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_dsp),
+ SPA_FORMAT_VIDEO_format, SPA_POD_Id(SPA_VIDEO_FORMAT_DSP_F32));
+
+ fprintf(stderr, "supported DSP formats:\n");
+ spa_debug_format(2, NULL, params[1]);
+
+ return 2;
+}
+
+static void do_quit(void *userdata, int signal_number)
+{
+ struct data *data = userdata;
+ pw_main_loop_quit(data->loop);
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+ const struct spa_pod *params[2];
+ uint8_t buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+ struct pw_properties *props;
+ int res, n_params;
+
+ pw_init(&argc, &argv);
+
+ /* create a main loop */
+ data.loop = pw_main_loop_new(NULL);
+
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGINT, do_quit, &data);
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGTERM, do_quit, &data);
+
+ /* the timer to pull in data */
+ data.timer = pw_loop_add_timer(pw_main_loop_get_loop(data.loop), on_timeout, &data);
+
+ /* create a simple stream, the simple stream manages to core and remote
+ * objects for you if you don't need to deal with them
+ *
+ * If you plan to autoconnect your stream, you need to provide at least
+ * media, category and role properties
+ *
+ * Pass your events and a user_data pointer as the last arguments. This
+ * will inform you about the stream state. The most important event
+ * you need to listen to is the process event where you need to consume
+ * the data provided to you.
+ */
+ props = pw_properties_new(PW_KEY_MEDIA_TYPE, "Video",
+ PW_KEY_MEDIA_CATEGORY, "Capture",
+ PW_KEY_MEDIA_ROLE, "Camera",
+ PW_KEY_PRIORITY_DRIVER, "10000",
+ NULL),
+ data.path = argc > 1 ? argv[1] : NULL;
+ if (data.path)
+ /* Set stream target if given on command line */
+ pw_properties_set(props, PW_KEY_TARGET_OBJECT, data.path);
+
+ data.stream = pw_stream_new_simple(
+ pw_main_loop_get_loop(data.loop),
+ "video-play",
+ props,
+ &stream_events,
+ &data);
+
+ if (SDL_Init(SDL_INIT_VIDEO) < 0) {
+ fprintf(stderr, "can't initialize SDL: %s\n", SDL_GetError());
+ return -1;
+ }
+
+ if (SDL_CreateWindowAndRenderer
+ (WIDTH, HEIGHT, SDL_WINDOW_RESIZABLE, &data.window, &data.renderer)) {
+ fprintf(stderr, "can't create window: %s\n", SDL_GetError());
+ return -1;
+ }
+
+ /* build the extra parameters to connect with. To connect, we can provide
+ * a list of supported formats. We use a builder that writes the param
+ * object to the stack. */
+ n_params = build_format(&data, &b, params);
+
+ /* now connect the stream, we need a direction (input/output),
+ * an optional target node to connect to, some flags and parameters
+ */
+ if ((res = pw_stream_connect(data.stream,
+ PW_DIRECTION_INPUT,
+ PW_ID_ANY,
+ PW_STREAM_FLAG_DRIVER | /* we're driver, we pull */
+ PW_STREAM_FLAG_AUTOCONNECT | /* try to automatically connect this stream */
+ PW_STREAM_FLAG_MAP_BUFFERS, /* mmap the buffer data for us */
+ params, n_params)) /* extra parameters, see above */ < 0) {
+ fprintf(stderr, "can't connect: %s\n", spa_strerror(res));
+ return -1;
+ }
+
+ /* do things until we quit the mainloop */
+ pw_main_loop_run(data.loop);
+
+ pw_stream_destroy(data.stream);
+ pw_main_loop_destroy(data.loop);
+
+ SDL_DestroyTexture(data.texture);
+ if (data.cursor)
+ SDL_DestroyTexture(data.cursor);
+ SDL_DestroyRenderer(data.renderer);
+ SDL_DestroyWindow(data.window);
+ pw_deinit();
+
+ return 0;
+}
diff --git a/src/examples/video-play-reneg.c b/src/examples/video-play-reneg.c
new file mode 100644
index 0000000..26b19cb
--- /dev/null
+++ b/src/examples/video-play-reneg.c
@@ -0,0 +1,438 @@
+/* PipeWire
+ *
+ * Copyright © 2020 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Video input stream using \ref pw_stream "pw_stream", with format renegotiation.
+ [title]
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include <spa/utils/result.h>
+#include <spa/param/video/format-utils.h>
+#include <spa/param/props.h>
+#include <spa/debug/format.h>
+
+#include <pipewire/pipewire.h>
+
+#define WIDTH 640
+#define HEIGHT 480
+
+#define MAX_BUFFERS 64
+
+#include "sdl.h"
+
+struct pixel {
+ float r, g, b, a;
+};
+
+struct data {
+ const char *path;
+
+ SDL_Renderer *renderer;
+ SDL_Window *window;
+ SDL_Texture *texture;
+ SDL_Texture *cursor;
+
+ struct pw_main_loop *loop;
+ struct spa_source *timer;
+
+ struct pw_stream *stream;
+ struct spa_hook stream_listener;
+
+ struct spa_video_info format;
+ int32_t stride;
+ struct spa_rectangle size;
+
+ int counter;
+};
+
+static void handle_events(struct data *data)
+{
+ SDL_Event event;
+ while (SDL_PollEvent(&event)) {
+ switch (event.type) {
+ case SDL_QUIT:
+ pw_main_loop_quit(data->loop);
+ break;
+ }
+ }
+}
+
+/* our data processing function is in general:
+ *
+ * struct pw_buffer *b;
+ * b = pw_stream_dequeue_buffer(stream);
+ *
+ * .. do stuff with buffer ...
+ *
+ * pw_stream_queue_buffer(stream, b);
+ */
+static void
+on_process(void *_data)
+{
+ struct data *data = _data;
+ struct pw_stream *stream = data->stream;
+ struct pw_buffer *b;
+ struct spa_buffer *buf;
+ void *sdata, *ddata;
+ int sstride, dstride, ostride;
+ uint32_t i;
+ uint8_t *src, *dst;
+
+ b = NULL;
+ /* dequeue and queue old buffers, use the last available
+ * buffer */
+ while (true) {
+ struct pw_buffer *t;
+ if ((t = pw_stream_dequeue_buffer(stream)) == NULL)
+ break;
+ if (b)
+ pw_stream_queue_buffer(stream, b);
+ b = t;
+ }
+ if (b == NULL) {
+ pw_log_warn("out of buffers: %m");
+ return;
+ }
+
+ buf = b->buffer;
+
+ pw_log_info("new buffer %p", buf);
+
+ handle_events(data);
+
+ if ((sdata = buf->datas[0].data) == NULL)
+ goto done;
+
+ if (SDL_LockTexture(data->texture, NULL, &ddata, &dstride) < 0) {
+ fprintf(stderr, "Couldn't lock texture: %s\n", SDL_GetError());
+ goto done;
+ }
+
+ /* copy video image in texture */
+ sstride = buf->datas[0].chunk->stride;
+ if (sstride == 0)
+ sstride = buf->datas[0].chunk->size / data->size.height;
+ ostride = SPA_MIN(sstride, dstride);
+
+ src = sdata;
+ dst = ddata;
+
+ for (i = 0; i < data->size.height; i++) {
+ memcpy(dst, src, ostride);
+ src += sstride;
+ dst += dstride;
+ }
+ SDL_UnlockTexture(data->texture);
+
+ SDL_RenderClear(data->renderer);
+ /* now render the video */
+ SDL_RenderCopy(data->renderer, data->texture, NULL, NULL);
+ SDL_RenderPresent(data->renderer);
+
+ done:
+ pw_stream_queue_buffer(stream, b);
+}
+
+static void on_stream_state_changed(void *_data, enum pw_stream_state old,
+ enum pw_stream_state state, const char *error)
+{
+ struct data *data = _data;
+ fprintf(stderr, "stream state: \"%s\"\n", pw_stream_state_as_string(state));
+ switch (state) {
+ case PW_STREAM_STATE_UNCONNECTED:
+ pw_main_loop_quit(data->loop);
+ break;
+ case PW_STREAM_STATE_PAUSED:
+ pw_loop_update_timer(pw_main_loop_get_loop(data->loop),
+ data->timer, NULL, NULL, false);
+ break;
+ case PW_STREAM_STATE_STREAMING:
+ {
+ struct timespec timeout, interval;
+
+ timeout.tv_sec = 1;
+ timeout.tv_nsec = 0;
+ interval.tv_sec = 1;
+ interval.tv_nsec = 0;
+
+ pw_loop_update_timer(pw_main_loop_get_loop(data->loop),
+ data->timer, &timeout, &interval, false);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/* Be notified when the stream param changes. We're only looking at the
+ * format changes.
+ *
+ * We are now supposed to call pw_stream_finish_format() with success or
+ * failure, depending on if we can support the format. Because we gave
+ * a list of supported formats, this should be ok.
+ *
+ * As part of pw_stream_finish_format() we can provide parameters that
+ * will control the buffer memory allocation. This includes the metadata
+ * that we would like on our buffer, the size, alignment, etc.
+ */
+static void
+on_stream_param_changed(void *_data, uint32_t id, const struct spa_pod *param)
+{
+ struct data *data = _data;
+ struct pw_stream *stream = data->stream;
+ uint8_t params_buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
+ const struct spa_pod *params[1];
+ Uint32 sdl_format;
+ void *d;
+
+ /* NULL means to clear the format */
+ if (param == NULL || id != SPA_PARAM_Format)
+ return;
+
+ fprintf(stderr, "got format:\n");
+ spa_debug_format(2, NULL, param);
+
+ if (spa_format_parse(param, &data->format.media_type, &data->format.media_subtype) < 0)
+ return;
+
+ if (data->format.media_type != SPA_MEDIA_TYPE_video ||
+ data->format.media_subtype != SPA_MEDIA_SUBTYPE_raw)
+ return;
+
+ /* call a helper function to parse the format for us. */
+ spa_format_video_raw_parse(param, &data->format.info.raw);
+ sdl_format = id_to_sdl_format(data->format.info.raw.format);
+ data->size = data->format.info.raw.size;
+
+ if (sdl_format == SDL_PIXELFORMAT_UNKNOWN) {
+ pw_stream_set_error(stream, -EINVAL, "unknown pixel format");
+ return;
+ }
+ if (data->size.width == 0 || data->size.height == 0) {
+ pw_stream_set_error(stream, -EINVAL, "invalid size");
+ return;
+ }
+
+ data->texture = SDL_CreateTexture(data->renderer,
+ sdl_format,
+ SDL_TEXTUREACCESS_STREAMING,
+ data->size.width,
+ data->size.height);
+ SDL_LockTexture(data->texture, NULL, &d, &data->stride);
+ SDL_UnlockTexture(data->texture);
+
+ /* a SPA_TYPE_OBJECT_ParamBuffers object defines the acceptable size,
+ * number, stride etc of the buffers */
+ params[0] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
+ SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS),
+ SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1),
+ SPA_PARAM_BUFFERS_size, SPA_POD_Int(data->stride * data->size.height),
+ SPA_PARAM_BUFFERS_stride, SPA_POD_Int(data->stride),
+ SPA_PARAM_BUFFERS_dataType, SPA_POD_CHOICE_FLAGS_Int((1<<SPA_DATA_MemPtr)));
+
+ /* we are done */
+ pw_stream_update_params(stream, params, 1);
+}
+
+/* these are the stream events we listen for */
+static const struct pw_stream_events stream_events = {
+ PW_VERSION_STREAM_EVENTS,
+ .state_changed = on_stream_state_changed,
+ .param_changed = on_stream_param_changed,
+ .process = on_process,
+};
+
+static int build_format(struct data *data, struct spa_pod_builder *b, const struct spa_pod **params)
+{
+ SDL_RendererInfo info;
+
+ SDL_GetRendererInfo(data->renderer, &info);
+ params[0] = sdl_build_formats(&info, b);
+
+ fprintf(stderr, "supported SDL formats:\n");
+ spa_debug_format(2, NULL, params[0]);
+
+ return 1;
+}
+
+static int reneg_format(struct data *data)
+{
+ uint8_t buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+ const struct spa_pod *params[2];
+ int32_t width, height;
+
+ if (data->format.info.raw.format == 0)
+ return -EBUSY;
+
+ width = data->counter & 1 ? 320 : 640;
+ height = data->counter & 1 ? 240 : 480;
+
+ fprintf(stderr, "renegotiate to %dx%d:\n", width, height);
+ params[0] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat,
+ SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video),
+ SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw),
+ SPA_FORMAT_VIDEO_format, SPA_POD_Id(data->format.info.raw.format),
+ SPA_FORMAT_VIDEO_size, SPA_POD_Rectangle(&SPA_RECTANGLE(width, height)),
+ SPA_FORMAT_VIDEO_framerate, SPA_POD_Fraction(&data->format.info.raw.framerate));
+
+ pw_stream_update_params(data->stream, params, 1);
+
+ data->counter++;
+ return 0;
+}
+
+static int reneg_buffers(struct data *data)
+{
+ uint8_t buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+ const struct spa_pod *params[2];
+
+ fprintf(stderr, "renegotiate buffers\n");
+ params[0] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
+ SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS),
+ SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1),
+ SPA_PARAM_BUFFERS_size, SPA_POD_Int(data->stride * data->size.height),
+ SPA_PARAM_BUFFERS_stride, SPA_POD_Int(data->stride));
+
+ pw_stream_update_params(data->stream, params, 1);
+
+ data->counter++;
+ return 0;
+}
+
+static void on_timeout(void *userdata, uint64_t expirations)
+{
+ struct data *data = userdata;
+ if (1)
+ reneg_format(data);
+ else
+ reneg_buffers(data);
+}
+
+static void do_quit(void *userdata, int signal_number)
+{
+ struct data *data = userdata;
+ pw_main_loop_quit(data->loop);
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+ const struct spa_pod *params[2];
+ uint8_t buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+ struct pw_properties *props;
+ int res, n_params;
+
+ pw_init(&argc, &argv);
+
+ /* create a main loop */
+ data.loop = pw_main_loop_new(NULL);
+
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGINT, do_quit, &data);
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGTERM, do_quit, &data);
+
+ /* create a simple stream, the simple stream manages to core and remote
+ * objects for you if you don't need to deal with them
+ *
+ * If you plan to autoconnect your stream, you need to provide at least
+ * media, category and role properties
+ *
+ * Pass your events and a user_data pointer as the last arguments. This
+ * will inform you about the stream state. The most important event
+ * you need to listen to is the process event where you need to consume
+ * the data provided to you.
+ */
+ props = pw_properties_new(PW_KEY_MEDIA_TYPE, "Video",
+ PW_KEY_MEDIA_CATEGORY, "Capture",
+ PW_KEY_MEDIA_ROLE, "Camera",
+ NULL),
+ data.path = argc > 1 ? argv[1] : NULL;
+ if (data.path)
+ /* Set stream target if given on command line */
+ pw_properties_set(props, PW_KEY_TARGET_OBJECT, data.path);
+
+ data.stream = pw_stream_new_simple(
+ pw_main_loop_get_loop(data.loop),
+ "video-play-reneg",
+ props,
+ &stream_events,
+ &data);
+
+ if (SDL_Init(SDL_INIT_VIDEO) < 0) {
+ fprintf(stderr, "can't initialize SDL: %s\n", SDL_GetError());
+ return -1;
+ }
+
+ if (SDL_CreateWindowAndRenderer
+ (WIDTH, HEIGHT, SDL_WINDOW_RESIZABLE, &data.window, &data.renderer)) {
+ fprintf(stderr, "can't create window: %s\n", SDL_GetError());
+ return -1;
+ }
+
+ /* build the extra parameters to connect with. To connect, we can provide
+ * a list of supported formats. We use a builder that writes the param
+ * object to the stack. */
+ n_params = build_format(&data, &b, params);
+
+ /* now connect the stream, we need a direction (input/output),
+ * an optional target node to connect to, some flags and parameters
+ */
+ if ((res = pw_stream_connect(data.stream,
+ PW_DIRECTION_INPUT,
+ PW_ID_ANY,
+ PW_STREAM_FLAG_AUTOCONNECT | /* try to automatically connect this stream */
+ PW_STREAM_FLAG_MAP_BUFFERS, /* mmap the buffer data for us */
+ params, n_params)) /* extra parameters, see above */ < 0) {
+ fprintf(stderr, "can't connect: %s\n", spa_strerror(res));
+ return -1;
+ }
+
+ data.timer = pw_loop_add_timer(pw_main_loop_get_loop(data.loop), on_timeout, &data);
+
+ /* do things until we quit the mainloop */
+ pw_main_loop_run(data.loop);
+
+ pw_stream_destroy(data.stream);
+ pw_main_loop_destroy(data.loop);
+
+ SDL_DestroyTexture(data.texture);
+ if (data.cursor)
+ SDL_DestroyTexture(data.cursor);
+ SDL_DestroyRenderer(data.renderer);
+ SDL_DestroyWindow(data.window);
+ pw_deinit();
+
+ return 0;
+}
diff --git a/src/examples/video-play.c b/src/examples/video-play.c
new file mode 100644
index 0000000..9cbbab6
--- /dev/null
+++ b/src/examples/video-play.c
@@ -0,0 +1,529 @@
+/* PipeWire
+ *
+ * Copyright © 2018 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Video input stream using \ref pw_stream "pw_stream".
+ [title]
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include <spa/utils/result.h>
+#include <spa/param/video/format-utils.h>
+#include <spa/param/props.h>
+#include <spa/debug/format.h>
+
+#include <pipewire/pipewire.h>
+
+#define WIDTH 640
+#define HEIGHT 480
+
+#define MAX_BUFFERS 64
+
+#include "sdl.h"
+
+struct pixel {
+ float r, g, b, a;
+};
+
+struct data {
+ const char *path;
+
+ SDL_Renderer *renderer;
+ SDL_Window *window;
+ SDL_Texture *texture;
+ SDL_Texture *cursor;
+
+ struct pw_main_loop *loop;
+
+ struct pw_stream *stream;
+ struct spa_hook stream_listener;
+
+ struct spa_io_position *position;
+
+ struct spa_video_info format;
+ int32_t stride;
+ struct spa_rectangle size;
+
+ int counter;
+ SDL_Rect rect;
+ SDL_Rect cursor_rect;
+ bool is_yuv;
+};
+
+static void handle_events(struct data *data)
+{
+ SDL_Event event;
+ while (SDL_PollEvent(&event)) {
+ switch (event.type) {
+ case SDL_QUIT:
+ pw_main_loop_quit(data->loop);
+ break;
+ }
+ }
+}
+
+/* our data processing function is in general:
+ *
+ * struct pw_buffer *b;
+ * b = pw_stream_dequeue_buffer(stream);
+ *
+ * .. do stuff with buffer ...
+ *
+ * pw_stream_queue_buffer(stream, b);
+ */
+static void
+on_process(void *_data)
+{
+ struct data *data = _data;
+ struct pw_stream *stream = data->stream;
+ struct pw_buffer *b;
+ struct spa_buffer *buf;
+ void *sdata, *ddata;
+ int sstride, dstride, ostride;
+ struct spa_meta_region *mc;
+ struct spa_meta_cursor *mcs;
+ uint32_t i, j;
+ uint8_t *src, *dst;
+ bool render_cursor = false;
+
+ b = NULL;
+ while (true) {
+ struct pw_buffer *t;
+ if ((t = pw_stream_dequeue_buffer(stream)) == NULL)
+ break;
+ if (b)
+ pw_stream_queue_buffer(stream, b);
+ b = t;
+ }
+ if (b == NULL) {
+ pw_log_warn("out of buffers: %m");
+ return;
+ }
+
+ buf = b->buffer;
+
+ pw_log_trace("new buffer %p", buf);
+
+ handle_events(data);
+
+ if ((sdata = buf->datas[0].data) == NULL)
+ goto done;
+
+ /* get the videocrop metadata if any */
+ if ((mc = spa_buffer_find_meta_data(buf, SPA_META_VideoCrop, sizeof(*mc))) &&
+ spa_meta_region_is_valid(mc)) {
+ data->rect.x = mc->region.position.x;
+ data->rect.y = mc->region.position.y;
+ data->rect.w = mc->region.size.width;
+ data->rect.h = mc->region.size.height;
+ }
+ /* get cursor metadata */
+ if ((mcs = spa_buffer_find_meta_data(buf, SPA_META_Cursor, sizeof(*mcs))) &&
+ spa_meta_cursor_is_valid(mcs)) {
+ struct spa_meta_bitmap *mb;
+ void *cdata;
+ int cstride;
+
+ data->cursor_rect.x = mcs->position.x;
+ data->cursor_rect.y = mcs->position.y;
+
+ mb = SPA_PTROFF(mcs, mcs->bitmap_offset, struct spa_meta_bitmap);
+ data->cursor_rect.w = mb->size.width;
+ data->cursor_rect.h = mb->size.height;
+
+ if (data->cursor == NULL) {
+ data->cursor = SDL_CreateTexture(data->renderer,
+ id_to_sdl_format(mb->format),
+ SDL_TEXTUREACCESS_STREAMING,
+ mb->size.width, mb->size.height);
+ SDL_SetTextureBlendMode(data->cursor, SDL_BLENDMODE_BLEND);
+ }
+
+
+ if (SDL_LockTexture(data->cursor, NULL, &cdata, &cstride) < 0) {
+ fprintf(stderr, "Couldn't lock cursor texture: %s\n", SDL_GetError());
+ goto done;
+ }
+
+ /* copy the cursor bitmap into the texture */
+ src = SPA_PTROFF(mb, mb->offset, uint8_t);
+ dst = cdata;
+ ostride = SPA_MIN(cstride, mb->stride);
+
+ for (i = 0; i < mb->size.height; i++) {
+ memcpy(dst, src, ostride);
+ dst += cstride;
+ src += mb->stride;
+ }
+ SDL_UnlockTexture(data->cursor);
+
+ render_cursor = true;
+ }
+
+ /* copy video image in texture */
+ if (data->is_yuv) {
+ sstride = data->stride;
+ SDL_UpdateYUVTexture(data->texture,
+ NULL,
+ sdata,
+ sstride,
+ SPA_PTROFF(sdata, sstride * data->size.height, void),
+ sstride / 2,
+ SPA_PTROFF(sdata, 5 * (sstride * data->size.height) / 4, void),
+ sstride / 2);
+ }
+ else {
+ if (SDL_LockTexture(data->texture, NULL, &ddata, &dstride) < 0) {
+ fprintf(stderr, "Couldn't lock texture: %s\n", SDL_GetError());
+ goto done;
+ }
+
+ sstride = buf->datas[0].chunk->stride;
+ if (sstride == 0)
+ sstride = buf->datas[0].chunk->size / data->size.height;
+ ostride = SPA_MIN(sstride, dstride);
+
+ src = sdata;
+ dst = ddata;
+
+ if (data->format.media_subtype == SPA_MEDIA_SUBTYPE_dsp) {
+ for (i = 0; i < data->size.height; i++) {
+ struct pixel *p = (struct pixel *) src;
+ for (j = 0; j < data->size.width; j++) {
+ dst[j * 4 + 0] = SPA_CLAMP(p[j].r * 255.0f, 0, 255);
+ dst[j * 4 + 1] = SPA_CLAMP(p[j].g * 255.0f, 0, 255);
+ dst[j * 4 + 2] = SPA_CLAMP(p[j].b * 255.0f, 0, 255);
+ dst[j * 4 + 3] = SPA_CLAMP(p[j].a * 255.0f, 0, 255);
+ }
+ src += sstride;
+ dst += dstride;
+ }
+ } else {
+ for (i = 0; i < data->size.height; i++) {
+ memcpy(dst, src, ostride);
+ src += sstride;
+ dst += dstride;
+ }
+ }
+ SDL_UnlockTexture(data->texture);
+ }
+
+ SDL_RenderClear(data->renderer);
+ /* now render the video and then the cursor if any */
+ SDL_RenderCopy(data->renderer, data->texture, &data->rect, NULL);
+ if (render_cursor) {
+ SDL_RenderCopy(data->renderer, data->cursor, NULL, &data->cursor_rect);
+ }
+ SDL_RenderPresent(data->renderer);
+
+ done:
+ pw_stream_queue_buffer(stream, b);
+}
+
+static void on_stream_state_changed(void *_data, enum pw_stream_state old,
+ enum pw_stream_state state, const char *error)
+{
+ struct data *data = _data;
+ fprintf(stderr, "stream state: \"%s\"\n", pw_stream_state_as_string(state));
+ switch (state) {
+ case PW_STREAM_STATE_UNCONNECTED:
+ pw_main_loop_quit(data->loop);
+ break;
+ case PW_STREAM_STATE_PAUSED:
+ /* because we started inactive, activate ourselves now */
+ pw_stream_set_active(data->stream, true);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+on_stream_io_changed(void *_data, uint32_t id, void *area, uint32_t size)
+{
+ struct data *data = _data;
+
+ switch (id) {
+ case SPA_IO_Position:
+ data->position = area;
+ break;
+ }
+}
+
+/* Be notified when the stream param changes. We're only looking at the
+ * format changes.
+ *
+ * We are now supposed to call pw_stream_finish_format() with success or
+ * failure, depending on if we can support the format. Because we gave
+ * a list of supported formats, this should be ok.
+ *
+ * As part of pw_stream_finish_format() we can provide parameters that
+ * will control the buffer memory allocation. This includes the metadata
+ * that we would like on our buffer, the size, alignment, etc.
+ */
+static void
+on_stream_param_changed(void *_data, uint32_t id, const struct spa_pod *param)
+{
+ struct data *data = _data;
+ struct pw_stream *stream = data->stream;
+ uint8_t params_buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
+ const struct spa_pod *params[5];
+ Uint32 sdl_format;
+ void *d;
+ int32_t mult, size;
+
+ /* NULL means to clear the format */
+ if (param == NULL || id != SPA_PARAM_Format)
+ return;
+
+ fprintf(stderr, "got format:\n");
+ spa_debug_format(2, NULL, param);
+
+ if (spa_format_parse(param, &data->format.media_type, &data->format.media_subtype) < 0)
+ return;
+
+ if (data->format.media_type != SPA_MEDIA_TYPE_video)
+ return;
+
+ switch (data->format.media_subtype) {
+ case SPA_MEDIA_SUBTYPE_raw:
+ /* call a helper function to parse the format for us. */
+ spa_format_video_raw_parse(param, &data->format.info.raw);
+ sdl_format = id_to_sdl_format(data->format.info.raw.format);
+ data->size = SPA_RECTANGLE(data->format.info.raw.size.width,
+ data->format.info.raw.size.height);
+ mult = 1;
+ break;
+ case SPA_MEDIA_SUBTYPE_dsp:
+ spa_format_video_dsp_parse(param, &data->format.info.dsp);
+ if (data->format.info.dsp.format != SPA_VIDEO_FORMAT_DSP_F32)
+ return;
+ sdl_format = SDL_PIXELFORMAT_RGBA32;
+ data->size = SPA_RECTANGLE(data->position->video.size.width,
+ data->position->video.size.height);
+ mult = 4;
+ break;
+ default:
+ sdl_format = SDL_PIXELFORMAT_UNKNOWN;
+ break;
+ }
+
+ if (sdl_format == SDL_PIXELFORMAT_UNKNOWN) {
+ pw_stream_set_error(stream, -EINVAL, "unknown pixel format");
+ return;
+ }
+ if (data->size.width == 0 || data->size.height == 0) {
+ pw_stream_set_error(stream, -EINVAL, "invalid size");
+ return;
+ }
+
+ data->texture = SDL_CreateTexture(data->renderer,
+ sdl_format,
+ SDL_TEXTUREACCESS_STREAMING,
+ data->size.width,
+ data->size.height);
+ SDL_LockTexture(data->texture, NULL, &d, &data->stride);
+ SDL_UnlockTexture(data->texture);
+
+ switch(sdl_format) {
+ case SDL_PIXELFORMAT_YV12:
+ case SDL_PIXELFORMAT_IYUV:
+ size = (data->stride * data->size.height) * 3 / 2;
+ data->is_yuv = true;
+ break;
+ default:
+ size = data->stride * data->size.height;
+ break;
+ }
+
+ data->rect.x = 0;
+ data->rect.y = 0;
+ data->rect.w = data->size.width;
+ data->rect.h = data->size.height;
+
+ /* a SPA_TYPE_OBJECT_ParamBuffers object defines the acceptable size,
+ * number, stride etc of the buffers */
+ params[0] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
+ SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS),
+ SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1),
+ SPA_PARAM_BUFFERS_size, SPA_POD_Int(size * mult),
+ SPA_PARAM_BUFFERS_stride, SPA_POD_Int(data->stride * mult),
+ SPA_PARAM_BUFFERS_dataType, SPA_POD_CHOICE_FLAGS_Int((1<<SPA_DATA_MemPtr)));
+
+ /* a header metadata with timing information */
+ params[1] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Header),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header)));
+ /* video cropping information */
+ params[2] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoCrop),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_region)));
+#define CURSOR_META_SIZE(w,h) (sizeof(struct spa_meta_cursor) + \
+ sizeof(struct spa_meta_bitmap) + w * h * 4)
+ /* cursor information */
+ params[3] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Cursor),
+ SPA_PARAM_META_size, SPA_POD_CHOICE_RANGE_Int(
+ CURSOR_META_SIZE(64,64),
+ CURSOR_META_SIZE(1,1),
+ CURSOR_META_SIZE(256,256)));
+
+ /* we are done */
+ pw_stream_update_params(stream, params, 4);
+}
+
+/* these are the stream events we listen for */
+static const struct pw_stream_events stream_events = {
+ PW_VERSION_STREAM_EVENTS,
+ .state_changed = on_stream_state_changed,
+ .io_changed = on_stream_io_changed,
+ .param_changed = on_stream_param_changed,
+ .process = on_process,
+};
+
+static int build_format(struct data *data, struct spa_pod_builder *b, const struct spa_pod **params)
+{
+ SDL_RendererInfo info;
+
+ SDL_GetRendererInfo(data->renderer, &info);
+ params[0] = sdl_build_formats(&info, b);
+
+ fprintf(stderr, "supported SDL formats:\n");
+ spa_debug_format(2, NULL, params[0]);
+
+ params[1] = spa_pod_builder_add_object(b,
+ SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat,
+ SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video),
+ SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_dsp),
+ SPA_FORMAT_VIDEO_format, SPA_POD_Id(SPA_VIDEO_FORMAT_DSP_F32));
+
+ fprintf(stderr, "supported DSP formats:\n");
+ spa_debug_format(2, NULL, params[1]);
+
+ return 2;
+}
+
+static void do_quit(void *userdata, int signal_number)
+{
+ struct data *data = userdata;
+ pw_main_loop_quit(data->loop);
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+ const struct spa_pod *params[2];
+ uint8_t buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+ struct pw_properties *props;
+ int res, n_params;
+
+ pw_init(&argc, &argv);
+
+ /* create a main loop */
+ data.loop = pw_main_loop_new(NULL);
+
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGINT, do_quit, &data);
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGTERM, do_quit, &data);
+
+ /* create a simple stream, the simple stream manages to core and remote
+ * objects for you if you don't need to deal with them
+ *
+ * If you plan to autoconnect your stream, you need to provide at least
+ * media, category and role properties
+ *
+ * Pass your events and a user_data pointer as the last arguments. This
+ * will inform you about the stream state. The most important event
+ * you need to listen to is the process event where you need to consume
+ * the data provided to you.
+ */
+ props = pw_properties_new(PW_KEY_MEDIA_TYPE, "Video",
+ PW_KEY_MEDIA_CATEGORY, "Capture",
+ PW_KEY_MEDIA_ROLE, "Camera",
+ NULL),
+ data.path = argc > 1 ? argv[1] : NULL;
+ if (data.path)
+ pw_properties_set(props, PW_KEY_TARGET_OBJECT, data.path);
+
+ data.stream = pw_stream_new_simple(
+ pw_main_loop_get_loop(data.loop),
+ "video-play",
+ props,
+ &stream_events,
+ &data);
+
+ if (SDL_Init(SDL_INIT_VIDEO) < 0) {
+ fprintf(stderr, "can't initialize SDL: %s\n", SDL_GetError());
+ return -1;
+ }
+
+ if (SDL_CreateWindowAndRenderer
+ (WIDTH, HEIGHT, SDL_WINDOW_RESIZABLE, &data.window, &data.renderer)) {
+ fprintf(stderr, "can't create window: %s\n", SDL_GetError());
+ return -1;
+ }
+
+ /* build the extra parameters to connect with. To connect, we can provide
+ * a list of supported formats. We use a builder that writes the param
+ * object to the stack. */
+ n_params = build_format(&data, &b, params);
+
+ /* now connect the stream, we need a direction (input/output),
+ * an optional target node to connect to, some flags and parameters
+ */
+ if ((res = pw_stream_connect(data.stream,
+ PW_DIRECTION_INPUT,
+ PW_ID_ANY,
+ PW_STREAM_FLAG_AUTOCONNECT | /* try to automatically connect this stream */
+ PW_STREAM_FLAG_INACTIVE | /* we will activate ourselves */
+ PW_STREAM_FLAG_MAP_BUFFERS, /* mmap the buffer data for us */
+ params, n_params)) /* extra parameters, see above */ < 0) {
+ fprintf(stderr, "can't connect: %s\n", spa_strerror(res));
+ return -1;
+ }
+
+ /* do things until we quit the mainloop */
+ pw_main_loop_run(data.loop);
+
+ pw_stream_destroy(data.stream);
+ pw_main_loop_destroy(data.loop);
+
+ SDL_DestroyTexture(data.texture);
+ if (data.cursor)
+ SDL_DestroyTexture(data.cursor);
+ SDL_DestroyRenderer(data.renderer);
+ SDL_DestroyWindow(data.window);
+ pw_deinit();
+
+ return 0;
+}
diff --git a/src/examples/video-src-alloc.c b/src/examples/video-src-alloc.c
new file mode 100644
index 0000000..ef364fd
--- /dev/null
+++ b/src/examples/video-src-alloc.c
@@ -0,0 +1,464 @@
+/* PipeWire
+ *
+ * Copyright © 2018 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Allocating buffer memory and sending fds to the server.
+ [title]
+ */
+
+#include "config.h"
+
+#include <stdio.h>
+#include <errno.h>
+#include <signal.h>
+#include <math.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+
+#include <spa/param/video/format-utils.h>
+
+#include <pipewire/pipewire.h>
+
+#define BPP 3
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+#define CURSOR_BPP 4
+
+#define MAX_BUFFERS 64
+
+#define M_PI_M2 ( M_PI + M_PI )
+
+struct data {
+ struct pw_thread_loop *loop;
+ struct spa_source *timer;
+
+ struct pw_stream *stream;
+ struct spa_hook stream_listener;
+
+ struct spa_video_info_raw format;
+ int32_t stride;
+
+ int counter;
+ uint32_t seq;
+
+ double crop;
+ double accumulator;
+};
+
+static void draw_elipse(uint32_t *dst, int width, int height, uint32_t color)
+{
+ int i, j, r1, r2, r12, r22, r122;
+
+ r1 = width/2;
+ r12 = r1 * r1;
+ r2 = height/2;
+ r22 = r2 * r2;
+ r122 = r12 * r22;
+
+ for (i = -r2; i < r2; i++) {
+ for (j = -r1; j < r1; j++) {
+ dst[(i + r2)*width+(j+r1)] =
+ (i * i * r12 + j * j * r22 <= r122) ? color : 0x00000000;
+ }
+ }
+}
+
+/* called when we should push a new buffer in the queue */
+static void on_process(void *userdata)
+{
+ struct data *data = userdata;
+ struct pw_buffer *b;
+ struct spa_buffer *buf;
+ uint32_t i, j;
+ uint8_t *p;
+ struct spa_meta *m;
+ struct spa_meta_header *h;
+ struct spa_meta_region *mc;
+ struct spa_meta_cursor *mcs;
+
+ if ((b = pw_stream_dequeue_buffer(data->stream)) == NULL) {
+ pw_log_warn("out of buffers: %m");
+ return;
+ }
+
+ buf = b->buffer;
+ if ((p = buf->datas[0].data) == NULL)
+ return;
+
+ if ((h = spa_buffer_find_meta_data(buf, SPA_META_Header, sizeof(*h)))) {
+#if 0
+ struct timespec now;
+ clock_gettime(CLOCK_MONOTONIC, &now);
+ h->pts = SPA_TIMESPEC_TO_NSEC(&now);
+#else
+ h->pts = -1;
+#endif
+ h->flags = 0;
+ h->seq = data->seq++;
+ h->dts_offset = 0;
+ }
+ if ((m = spa_buffer_find_meta(buf, SPA_META_VideoDamage))) {
+ struct spa_meta_region *r = spa_meta_first(m);
+
+ if (spa_meta_check(r, m)) {
+ r->region.position = SPA_POINT(0,0);
+ r->region.size = data->format.size;
+ r++;
+ }
+ if (spa_meta_check(r, m))
+ r->region = SPA_REGION(0,0,0,0);
+ }
+ if ((mc = spa_buffer_find_meta_data(buf, SPA_META_VideoCrop, sizeof(*mc)))) {
+ data->crop = (sin(data->accumulator) + 1.0) * 32.0;
+ mc->region.position.x = data->crop;
+ mc->region.position.y = data->crop;
+ mc->region.size.width = data->format.size.width - data->crop*2;
+ mc->region.size.height = data->format.size.height - data->crop*2;
+ }
+ if ((mcs = spa_buffer_find_meta_data(buf, SPA_META_Cursor, sizeof(*mcs)))) {
+ struct spa_meta_bitmap *mb;
+ uint32_t *bitmap, color;
+
+ mcs->id = 1;
+ mcs->position.x = (sin(data->accumulator) + 1.0) * 160.0 + 80;
+ mcs->position.y = (cos(data->accumulator) + 1.0) * 100.0 + 50;
+ mcs->hotspot.x = 0;
+ mcs->hotspot.y = 0;
+ mcs->bitmap_offset = sizeof(struct spa_meta_cursor);
+
+ mb = SPA_PTROFF(mcs, mcs->bitmap_offset, struct spa_meta_bitmap);
+ mb->format = SPA_VIDEO_FORMAT_ARGB;
+ mb->size.width = CURSOR_WIDTH;
+ mb->size.height = CURSOR_HEIGHT;
+ mb->stride = CURSOR_WIDTH * CURSOR_BPP;
+ mb->offset = sizeof(struct spa_meta_bitmap);
+
+ bitmap = SPA_PTROFF(mb, mb->offset, uint32_t);
+ color = (cos(data->accumulator) + 1.0) * (1 << 23);
+ color |= 0xff000000;
+
+ draw_elipse(bitmap, mb->size.width, mb->size.height, color);
+ }
+
+ for (i = 0; i < data->format.size.height; i++) {
+ for (j = 0; j < data->format.size.width * BPP; j++) {
+ p[j] = data->counter + j * i;
+ }
+ p += data->stride;
+ data->counter += 13;
+ }
+
+ data->accumulator += M_PI_M2 / 50.0;
+ if (data->accumulator >= M_PI_M2)
+ data->accumulator -= M_PI_M2;
+
+ buf->datas[0].chunk->offset = 0;
+ buf->datas[0].chunk->size = data->format.size.height * data->stride;
+ buf->datas[0].chunk->stride = data->stride;
+
+ pw_stream_queue_buffer(data->stream, b);
+}
+
+/* trigger the graph when we are a driver */
+static void on_timeout(void *userdata, uint64_t expirations)
+{
+ struct data *data = userdata;
+ pw_log_trace("timeout");
+ pw_stream_trigger_process(data->stream);
+}
+
+/* when the stream is STREAMING, start the timer at 40ms intervals
+ * to produce and push a frame. In other states we PAUSE the timer. */
+static void on_stream_state_changed(void *_data, enum pw_stream_state old, enum pw_stream_state state,
+ const char *error)
+{
+ struct data *data = _data;
+
+ printf("stream state: \"%s\"\n", pw_stream_state_as_string(state));
+
+ switch (state) {
+ case PW_STREAM_STATE_PAUSED:
+ printf("node id: %d\n", pw_stream_get_node_id(data->stream));
+ pw_loop_update_timer(pw_thread_loop_get_loop(data->loop),
+ data->timer, NULL, NULL, false);
+ break;
+ case PW_STREAM_STATE_STREAMING:
+ {
+ struct timespec timeout, interval;
+
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = 1;
+ interval.tv_sec = 0;
+ interval.tv_nsec = 40 * SPA_NSEC_PER_MSEC;
+
+ if (pw_stream_is_driving(data->stream))
+ pw_loop_update_timer(pw_thread_loop_get_loop(data->loop),
+ data->timer, &timeout, &interval, false);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/* we set the PW_STREAM_FLAG_ALLOC_BUFFERS flag when connecting so we need
+ * to provide buffer memory. */
+static void on_stream_add_buffer(void *_data, struct pw_buffer *buffer)
+{
+ struct data *data = _data;
+ struct spa_buffer *buf = buffer->buffer;
+ struct spa_data *d;
+#ifdef HAVE_MEMFD_CREATE
+ unsigned int seals;
+#endif
+
+ pw_log_info("add buffer %p", buffer);
+ d = buf->datas;
+
+ if ((d[0].type & (1<<SPA_DATA_MemFd)) == 0) {
+ pw_log_error("unsupported data type %08x", d[0].type);
+ return;
+ }
+
+ /* create the memfd on the buffer, set the type and flags */
+ d[0].type = SPA_DATA_MemFd;
+ d[0].flags = SPA_DATA_FLAG_READWRITE;
+#ifdef HAVE_MEMFD_CREATE
+ d[0].fd = memfd_create("video-src-memfd", MFD_CLOEXEC | MFD_ALLOW_SEALING);
+#else
+ d[0].fd = -1;
+#endif
+ if (d[0].fd == -1) {
+ pw_log_error("can't create memfd: %m");
+ return;
+ }
+ d[0].mapoffset = 0;
+ d[0].maxsize = data->stride * data->format.size.height;
+
+ /* truncate to the right size before we set seals */
+ if (ftruncate(d[0].fd, d[0].maxsize) < 0) {
+ pw_log_error("can't truncate to %d: %m", d[0].maxsize);
+ return;
+ }
+#ifdef HAVE_MEMFD_CREATE
+ /* not enforced yet but server might require SEAL_SHRINK later */
+ seals = F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL;
+ if (fcntl(d[0].fd, F_ADD_SEALS, seals) == -1) {
+ pw_log_warn("Failed to add seals: %m");
+ }
+#endif
+
+ /* now mmap so we can write to it in the process function above */
+ d[0].data = mmap(NULL, d[0].maxsize, PROT_READ|PROT_WRITE,
+ MAP_SHARED, d[0].fd, d[0].mapoffset);
+ if (d[0].data == MAP_FAILED) {
+ pw_log_error("can't mmap memory: %m");
+ return;
+ }
+}
+
+/* close the memfd we set on the buffers here */
+static void on_stream_remove_buffer(void *_data, struct pw_buffer *buffer)
+{
+ struct spa_buffer *buf = buffer->buffer;
+ struct spa_data *d;
+
+ d = buf->datas;
+ pw_log_info("remove buffer %p", buffer);
+
+ munmap(d[0].data, d[0].maxsize);
+ close(d[0].fd);
+}
+
+/* Be notified when the stream param changes. We're only looking at the
+ * format param.
+ *
+ * We are now supposed to call pw_stream_update_params() with success or
+ * failure, depending on if we can support the format. Because we gave
+ * a list of supported formats, this should be ok.
+ *
+ * As part of pw_stream_update_params() we can provide parameters that
+ * will control the buffer memory allocation. This includes the metadata
+ * that we would like on our buffer, the size, alignment, etc.
+ */
+static void
+on_stream_param_changed(void *_data, uint32_t id, const struct spa_pod *param)
+{
+ struct data *data = _data;
+ struct pw_stream *stream = data->stream;
+ uint8_t params_buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
+ const struct spa_pod *params[5];
+
+ if (param == NULL || id != SPA_PARAM_Format)
+ return;
+
+ spa_format_video_raw_parse(param, &data->format);
+
+ data->stride = SPA_ROUND_UP_N(data->format.size.width * BPP, 4);
+
+ params[0] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
+ SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS),
+ SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1),
+ SPA_PARAM_BUFFERS_size, SPA_POD_Int(data->stride * data->format.size.height),
+ SPA_PARAM_BUFFERS_stride, SPA_POD_Int(data->stride),
+ SPA_PARAM_BUFFERS_dataType, SPA_POD_CHOICE_FLAGS_Int(1<<SPA_DATA_MemFd));
+
+ params[1] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Header),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header)));
+
+ params[2] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoDamage),
+ SPA_PARAM_META_size, SPA_POD_CHOICE_RANGE_Int(
+ sizeof(struct spa_meta_region) * 16,
+ sizeof(struct spa_meta_region) * 1,
+ sizeof(struct spa_meta_region) * 16));
+ params[3] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoCrop),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_region)));
+#define CURSOR_META_SIZE(w,h) (sizeof(struct spa_meta_cursor) + \
+ sizeof(struct spa_meta_bitmap) + w * h * CURSOR_BPP)
+ params[4] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Cursor),
+ SPA_PARAM_META_size, SPA_POD_Int(
+ CURSOR_META_SIZE(CURSOR_WIDTH,CURSOR_HEIGHT)));
+
+ pw_stream_update_params(stream, params, 5);
+}
+
+static const struct pw_stream_events stream_events = {
+ PW_VERSION_STREAM_EVENTS,
+ .process = on_process,
+ .state_changed = on_stream_state_changed,
+ .param_changed = on_stream_param_changed,
+ .add_buffer = on_stream_add_buffer,
+ .remove_buffer = on_stream_remove_buffer,
+};
+
+static void do_quit(void *userdata, int signal_number)
+{
+ struct data *data = userdata;
+ pw_thread_loop_signal(data->loop, false);
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+ const struct spa_pod *params[1];
+ uint8_t buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+
+ pw_init(&argc, &argv);
+
+ /* create a thread loop and start it */
+ data.loop = pw_thread_loop_new("video-src-alloc", NULL);
+
+ /* take the lock around all PipeWire functions. In callbacks, the lock
+ * is already taken for you but it's ok to lock again because the lock is
+ * recursive */
+ pw_thread_loop_lock(data.loop);
+
+ /* install some handlers to exit nicely */
+ pw_loop_add_signal(pw_thread_loop_get_loop(data.loop), SIGINT, do_quit, &data);
+ pw_loop_add_signal(pw_thread_loop_get_loop(data.loop), SIGTERM, do_quit, &data);
+
+ /* start after the signal handlers are set */
+ pw_thread_loop_start(data.loop);
+
+ /* create a simple stream, the simple stream manages the core
+ * object for you if you don't want to deal with them.
+ *
+ * We're making a new video provider. We need to set the media-class
+ * property.
+ *
+ * Pass your events and a user_data pointer as the last arguments. This
+ * will inform you about the stream state. The most important event
+ * you need to listen to is the process event where you need to provide
+ * the data.
+ */
+ data.stream = pw_stream_new_simple(
+ pw_thread_loop_get_loop(data.loop),
+ "video-src-alloc",
+ pw_properties_new(
+ PW_KEY_MEDIA_CLASS, "Video/Source",
+ NULL),
+ &stream_events,
+ &data);
+
+ /* make a timer to schedule our frames */
+ data.timer = pw_loop_add_timer(pw_thread_loop_get_loop(data.loop), on_timeout, &data);
+
+ /* build the extra parameter for the connection. Here we make an
+ * EnumFormat parameter which lists the possible formats we can provide.
+ * The server will select a format that matches and informs us about this
+ * in the stream param_changed event.
+ */
+ params[0] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat,
+ SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video),
+ SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw),
+ SPA_FORMAT_VIDEO_format, SPA_POD_Id(SPA_VIDEO_FORMAT_RGB),
+ SPA_FORMAT_VIDEO_size, SPA_POD_CHOICE_RANGE_Rectangle(
+ &SPA_RECTANGLE(320, 240),
+ &SPA_RECTANGLE(1, 1),
+ &SPA_RECTANGLE(4096, 4096)),
+ SPA_FORMAT_VIDEO_framerate, SPA_POD_Fraction(&SPA_FRACTION(25, 1)));
+
+ /* now connect the stream, we need a direction (input/output),
+ * an optional target node to connect to, some flags and parameters.
+ *
+ * Here we pass PW_STREAM_FLAG_ALLOC_BUFFERS. We should in the
+ * add_buffer callback configure the buffer memory. This should be
+ * fd backed memory (memfd, dma-buf, ...) that can be shared with
+ * the server. */
+ pw_stream_connect(data.stream,
+ PW_DIRECTION_OUTPUT,
+ PW_ID_ANY,
+ PW_STREAM_FLAG_DRIVER |
+ PW_STREAM_FLAG_ALLOC_BUFFERS,
+ params, 1);
+
+ /* unlock, run the loop and wait, this will trigger the callbacks */
+ pw_thread_loop_wait(data.loop);
+
+ /* unlock before stop */
+ pw_thread_loop_unlock(data.loop);
+ pw_thread_loop_stop(data.loop);
+
+ pw_stream_destroy(data.stream);
+
+ /* destroy after dependent objects are destroyed */
+ pw_thread_loop_destroy(data.loop);
+ pw_deinit();
+
+ return 0;
+}
diff --git a/src/examples/video-src-fixate.c b/src/examples/video-src-fixate.c
new file mode 100644
index 0000000..fdcc666
--- /dev/null
+++ b/src/examples/video-src-fixate.c
@@ -0,0 +1,602 @@
+/* PipeWire
+ *
+ * Copyright © 2018 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Fixating negotiated modifiers.
+ [title]
+ */
+
+#include "config.h"
+
+#include <stdio.h>
+#include <errno.h>
+#include <signal.h>
+#include <math.h>
+#include <libdrm/drm_fourcc.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+
+#include <spa/param/video/format-utils.h>
+#include <spa/debug/format.h>
+
+#include <pipewire/pipewire.h>
+
+#define BPP 3
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+#define CURSOR_BPP 4
+
+#define MAX_BUFFERS 64
+
+#define M_PI_M2 ( M_PI + M_PI )
+
+uint64_t supported_modifiers[] = {DRM_FORMAT_MOD_INVALID, DRM_FORMAT_MOD_LINEAR};
+
+struct data {
+ struct pw_thread_loop *loop;
+ struct spa_source *timer;
+
+ struct pw_stream *stream;
+ struct spa_hook stream_listener;
+
+ struct spa_video_info_raw format;
+ int32_t stride;
+
+ int counter;
+ uint32_t seq;
+
+ double crop;
+ double accumulator;
+};
+
+static void draw_elipse(uint32_t *dst, int width, int height, uint32_t color)
+{
+ int i, j, r1, r2, r12, r22, r122;
+
+ r1 = width/2;
+ r12 = r1 * r1;
+ r2 = height/2;
+ r22 = r2 * r2;
+ r122 = r12 * r22;
+
+ for (i = -r2; i < r2; i++) {
+ for (j = -r1; j < r1; j++) {
+ dst[(i + r2)*width+(j+r1)] =
+ (i * i * r12 + j * j * r22 <= r122) ? color : 0x00000000;
+ }
+ }
+}
+
+static struct spa_pod *fixate_format(struct spa_pod_builder *b, enum spa_video_format format,
+ uint64_t *modifier)
+{
+ struct spa_pod_frame f[1];
+
+ spa_pod_builder_push_object(b, &f[0], SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat);
+ spa_pod_builder_add(b, SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video), 0);
+ spa_pod_builder_add(b, SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw), 0);
+ /* format */
+ spa_pod_builder_add(b, SPA_FORMAT_VIDEO_format, SPA_POD_Id(format), 0);
+ /* modifiers */
+ if (modifier) {
+ // we only support implicit modifiers, use shortpath to skip fixation phase
+ spa_pod_builder_prop(b, SPA_FORMAT_VIDEO_modifier, SPA_POD_PROP_FLAG_MANDATORY);
+ spa_pod_builder_long(b, *modifier);
+ }
+ spa_pod_builder_add(b, SPA_FORMAT_VIDEO_size,
+ SPA_POD_CHOICE_RANGE_Rectangle(
+ &SPA_RECTANGLE(320, 240),
+ &SPA_RECTANGLE(1,1),
+ &SPA_RECTANGLE(4096,4096)),
+ 0);
+ // variable framerate
+ spa_pod_builder_add(b, SPA_FORMAT_VIDEO_framerate,
+ SPA_POD_Fraction(&SPA_FRACTION(25, 1)), 0);
+ return spa_pod_builder_pop(b, &f[0]);
+}
+
+static struct spa_pod *build_format(struct spa_pod_builder *b, enum spa_video_format format,
+ uint64_t *modifiers, int modifier_count)
+{
+ struct spa_pod_frame f[2];
+ int i, c;
+
+ spa_pod_builder_push_object(b, &f[0], SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat);
+ spa_pod_builder_add(b, SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video), 0);
+ spa_pod_builder_add(b, SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw), 0);
+ /* format */
+ spa_pod_builder_add(b, SPA_FORMAT_VIDEO_format, SPA_POD_Id(format), 0);
+ /* modifiers */
+ if (modifier_count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID) {
+ // we only support implicit modifiers, use shortpath to skip fixation phase
+ spa_pod_builder_prop(b, SPA_FORMAT_VIDEO_modifier, SPA_POD_PROP_FLAG_MANDATORY);
+ spa_pod_builder_long(b, modifiers[0]);
+ } else if (modifier_count > 0) {
+ // build an enumeration of modifiers
+ spa_pod_builder_prop(b, SPA_FORMAT_VIDEO_modifier, SPA_POD_PROP_FLAG_MANDATORY | SPA_POD_PROP_FLAG_DONT_FIXATE);
+ spa_pod_builder_push_choice(b, &f[1], SPA_CHOICE_Enum, 0);
+ // modifiers from the array
+ for (i = 0, c = 0; i < modifier_count; i++) {
+ spa_pod_builder_long(b, modifiers[i]);
+ if (c++ == 0)
+ spa_pod_builder_long(b, modifiers[i]);
+ }
+ spa_pod_builder_pop(b, &f[1]);
+ }
+ spa_pod_builder_add(b, SPA_FORMAT_VIDEO_size,
+ SPA_POD_CHOICE_RANGE_Rectangle(
+ &SPA_RECTANGLE(320, 240),
+ &SPA_RECTANGLE(1,1),
+ &SPA_RECTANGLE(4096,4096)),
+ 0);
+ // variable framerate
+ spa_pod_builder_add(b, SPA_FORMAT_VIDEO_framerate,
+ SPA_POD_Fraction(&SPA_FRACTION(25, 1)), 0);
+ return spa_pod_builder_pop(b, &f[0]);
+}
+
+/* called when we should push a new buffer in the queue */
+static void on_process(void *userdata)
+{
+ struct data *data = userdata;
+ struct pw_buffer *b;
+ struct spa_buffer *buf;
+ uint32_t i, j;
+ uint8_t *p;
+ struct spa_meta *m;
+ struct spa_meta_header *h;
+ struct spa_meta_region *mc;
+ struct spa_meta_cursor *mcs;
+
+ if ((b = pw_stream_dequeue_buffer(data->stream)) == NULL) {
+ pw_log_warn("out of buffers: %m");
+ return;
+ }
+
+ buf = b->buffer;
+ if ((p = buf->datas[0].data) == NULL) {
+ printf("No data ptr\n");
+ goto done;
+ }
+
+ if ((h = spa_buffer_find_meta_data(buf, SPA_META_Header, sizeof(*h)))) {
+#if 0
+ struct timespec now;
+ clock_gettime(CLOCK_MONOTONIC, &now);
+ h->pts = SPA_TIMESPEC_TO_NSEC(&now);
+#else
+ h->pts = -1;
+#endif
+ h->flags = 0;
+ h->seq = data->seq++;
+ h->dts_offset = 0;
+ }
+ if ((m = spa_buffer_find_meta(buf, SPA_META_VideoDamage))) {
+ struct spa_meta_region *r = spa_meta_first(m);
+
+ if (spa_meta_check(r, m)) {
+ r->region.position = SPA_POINT(0,0);
+ r->region.size = data->format.size;
+ r++;
+ }
+ if (spa_meta_check(r, m))
+ r->region = SPA_REGION(0,0,0,0);
+ }
+ if ((mc = spa_buffer_find_meta_data(buf, SPA_META_VideoCrop, sizeof(*mc)))) {
+ data->crop = (sin(data->accumulator) + 1.0) * 32.0;
+ mc->region.position.x = data->crop;
+ mc->region.position.y = data->crop;
+ mc->region.size.width = data->format.size.width - data->crop*2;
+ mc->region.size.height = data->format.size.height - data->crop*2;
+ }
+ if ((mcs = spa_buffer_find_meta_data(buf, SPA_META_Cursor, sizeof(*mcs)))) {
+ struct spa_meta_bitmap *mb;
+ uint32_t *bitmap, color;
+
+ mcs->id = 1;
+ mcs->position.x = (sin(data->accumulator) + 1.0) * 160.0 + 80;
+ mcs->position.y = (cos(data->accumulator) + 1.0) * 100.0 + 50;
+ mcs->hotspot.x = 0;
+ mcs->hotspot.y = 0;
+ mcs->bitmap_offset = sizeof(struct spa_meta_cursor);
+
+ mb = SPA_PTROFF(mcs, mcs->bitmap_offset, struct spa_meta_bitmap);
+ mb->format = SPA_VIDEO_FORMAT_ARGB;
+ mb->size.width = CURSOR_WIDTH;
+ mb->size.height = CURSOR_HEIGHT;
+ mb->stride = CURSOR_WIDTH * CURSOR_BPP;
+ mb->offset = sizeof(struct spa_meta_bitmap);
+
+ bitmap = SPA_PTROFF(mb, mb->offset, uint32_t);
+ color = (cos(data->accumulator) + 1.0) * (1 << 23);
+ color |= 0xff000000;
+
+ draw_elipse(bitmap, mb->size.width, mb->size.height, color);
+ }
+
+ for (i = 0; i < data->format.size.height; i++) {
+ for (j = 0; j < data->format.size.width * BPP; j++) {
+ p[j] = data->counter + j * i;
+ }
+ p += data->stride;
+ data->counter += 13;
+ }
+
+ data->accumulator += M_PI_M2 / 50.0;
+ if (data->accumulator >= M_PI_M2)
+ data->accumulator -= M_PI_M2;
+
+ buf->datas[0].chunk->offset = 0;
+ buf->datas[0].chunk->size = data->format.size.height * data->stride;
+ buf->datas[0].chunk->stride = data->stride;
+
+done:
+ pw_stream_queue_buffer(data->stream, b);
+}
+
+/* trigger the graph when we are a driver */
+static void on_timeout(void *userdata, uint64_t expirations)
+{
+ struct data *data = userdata;
+ pw_log_trace("timeout");
+ pw_stream_trigger_process(data->stream);
+}
+
+/* when the stream is STREAMING, start the timer at 40ms intervals
+ * to produce and push a frame. In other states we PAUSE the timer. */
+static void on_stream_state_changed(void *_data, enum pw_stream_state old, enum pw_stream_state state,
+ const char *error)
+{
+ struct data *data = _data;
+
+ printf("stream state: \"%s\"\n", pw_stream_state_as_string(state));
+
+ switch (state) {
+ case PW_STREAM_STATE_PAUSED:
+ printf("node id: %d\n", pw_stream_get_node_id(data->stream));
+ pw_loop_update_timer(pw_thread_loop_get_loop(data->loop),
+ data->timer, NULL, NULL, false);
+ break;
+ case PW_STREAM_STATE_STREAMING:
+ {
+ struct timespec timeout, interval;
+
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = 1;
+ interval.tv_sec = 0;
+ interval.tv_nsec = 40 * SPA_NSEC_PER_MSEC;
+
+ if (pw_stream_is_driving(data->stream))
+ pw_loop_update_timer(pw_thread_loop_get_loop(data->loop),
+ data->timer, &timeout, &interval, false);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/* we set the PW_STREAM_FLAG_ALLOC_BUFFERS flag when connecting so we need
+ * to provide buffer memory. */
+static void on_stream_add_buffer(void *_data, struct pw_buffer *buffer)
+{
+ printf("add_buffer\n");
+ struct data *data = _data;
+ struct spa_buffer *buf = buffer->buffer;
+ struct spa_data *d;
+#ifdef HAVE_MEMFD_CREATE
+ unsigned int seals;
+#endif
+
+ pw_log_info("add buffer %p", buffer);
+ d = buf->datas;
+
+ if ((d[0].type & (1<<SPA_DATA_DmaBuf)) > 0) {
+ printf("pretend to support dmabufs while setting the fd to -1\n");
+ d[0].type = SPA_DATA_DmaBuf;
+ d[0].fd = -1;
+ d[0].data = NULL;
+ return;
+ }
+
+ if ((d[0].type & (1<<SPA_DATA_MemFd)) == 0) {
+ pw_log_error("unsupported data type %08x", d[0].type);
+ return;
+ }
+
+ printf("use memfd\n");
+ /* create the memfd on the buffer, set the type and flags */
+ d[0].type = SPA_DATA_MemFd;
+ d[0].flags = SPA_DATA_FLAG_READWRITE;
+#ifdef HAVE_MEMFD_CREATE
+ d[0].fd = memfd_create("video-src-fixate-memfd", MFD_CLOEXEC | MFD_ALLOW_SEALING);
+#else
+ d[0].fd = -1;
+#endif
+ if (d[0].fd == -1) {
+ pw_log_error("can't create memfd: %m");
+ return;
+ }
+ d[0].mapoffset = 0;
+ d[0].maxsize = data->stride * data->format.size.height;
+
+ /* truncate to the right size before we set seals */
+ if (ftruncate(d[0].fd, d[0].maxsize) < 0) {
+ pw_log_error("can't truncate to %d: %m", d[0].maxsize);
+ return;
+ }
+#ifdef HAVE_MEMFD_CREATE
+ /* not enforced yet but server might require SEAL_SHRINK later */
+ seals = F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL;
+ if (fcntl(d[0].fd, F_ADD_SEALS, seals) == -1) {
+ pw_log_warn("Failed to add seals: %m");
+ }
+#endif
+
+ /* now mmap so we can write to it in the process function above */
+ d[0].data = mmap(NULL, d[0].maxsize, PROT_READ|PROT_WRITE,
+ MAP_SHARED, d[0].fd, d[0].mapoffset);
+ if (d[0].data == MAP_FAILED) {
+ pw_log_error("can't mmap memory: %m");
+ return;
+ }
+}
+
+/* close the memfd we set on the buffers here */
+static void on_stream_remove_buffer(void *_data, struct pw_buffer *buffer)
+{
+ printf("remove_buffer\n");
+ struct spa_buffer *buf = buffer->buffer;
+ struct spa_data *d;
+
+ d = buf->datas;
+ pw_log_info("remove buffer %p", buffer);
+ if ((d[0].type & (1<<SPA_DATA_DmaBuf)) == 0)
+ return;
+
+ munmap(d[0].data, d[0].maxsize);
+ close(d[0].fd);
+}
+
+/* Be notified when the stream param changes. We're only looking at the
+ * format param.
+ *
+ * We are now supposed to call pw_stream_update_params() with success or
+ * failure, depending on if we can support the format. Because we gave
+ * a list of supported formats, this should be ok.
+ *
+ * As part of pw_stream_update_params() we can provide parameters that
+ * will control the buffer memory allocation. This includes the metadata
+ * that we would like on our buffer, the size, alignment, etc.
+ */
+static void
+on_stream_param_changed(void *_data, uint32_t id, const struct spa_pod *param)
+{
+ struct data *data = _data;
+ struct pw_stream *stream = data->stream;
+ uint8_t params_buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
+ const struct spa_pod *params[5];
+ int blocks, size, stride, buffertypes;
+
+ if (param == NULL || id != SPA_PARAM_Format)
+ return;
+
+ printf("param changed: \n");
+ spa_debug_format(4, NULL, param);
+
+ spa_format_video_raw_parse(param, &data->format);
+
+ data->stride = SPA_ROUND_UP_N(data->format.size.width * BPP, 4);
+
+ const struct spa_pod_prop *prop_modifier;
+ // check if client supports modifier
+ if ((prop_modifier = spa_pod_find_prop(param, NULL, SPA_FORMAT_VIDEO_modifier)) == NULL) {
+ blocks = 1;
+ size = data->stride * data->format.size.height;
+ stride = data->stride;
+ buffertypes = (1<<SPA_DATA_MemFd);
+ } else {
+ // check if the modifier is fixated
+ if ((prop_modifier->flags & SPA_POD_PROP_FLAG_DONT_FIXATE) > 0) {
+ const struct spa_pod *pod_modifier = &prop_modifier->value;
+ printf("fixating format\n");
+
+ uint32_t n_modifiers = SPA_POD_CHOICE_N_VALUES(pod_modifier);
+ uint64_t *modifiers = SPA_POD_CHOICE_VALUES(pod_modifier);
+ uint64_t modifier;
+ // shortcut for the old gbm allocator path
+ if (n_modifiers == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID) {
+ modifier = modifiers[0];
+ } else {
+ // Use the allocator to find the best modifier from the list
+ modifier = modifiers[rand()%n_modifiers];
+ }
+
+ params[0] = fixate_format(&b, SPA_VIDEO_FORMAT_RGB, &modifier);
+
+ params[1] = build_format(&b, SPA_VIDEO_FORMAT_RGB,
+ supported_modifiers, sizeof(supported_modifiers)/sizeof(supported_modifiers[0]));
+ params[2] = build_format(&b, SPA_VIDEO_FORMAT_RGB,
+ NULL, 0);
+
+ printf("announcing fixated EnumFormats\n");
+ for (unsigned int i=0; i < 3; i++) {
+ spa_debug_format(4, NULL, params[i]);
+ }
+
+ pw_stream_update_params(stream, params, 3);
+ return;
+ }
+ printf("no fixation required\n");
+ blocks = 1;
+ size = data->stride * data->format.size.height;
+ stride = data->stride;
+ buffertypes = (1<<SPA_DATA_DmaBuf);
+ }
+
+ params[0] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
+ SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS),
+ SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(blocks),
+ SPA_PARAM_BUFFERS_size, SPA_POD_Int(size),
+ SPA_PARAM_BUFFERS_stride, SPA_POD_Int(stride),
+ SPA_PARAM_BUFFERS_dataType, SPA_POD_CHOICE_FLAGS_Int(buffertypes));
+
+ params[1] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Header),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header)));
+
+ params[2] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoDamage),
+ SPA_PARAM_META_size, SPA_POD_CHOICE_RANGE_Int(
+ sizeof(struct spa_meta_region) * 16,
+ sizeof(struct spa_meta_region) * 1,
+ sizeof(struct spa_meta_region) * 16));
+ params[3] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoCrop),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_region)));
+#define CURSOR_META_SIZE(w,h) (sizeof(struct spa_meta_cursor) + \
+ sizeof(struct spa_meta_bitmap) + w * h * CURSOR_BPP)
+ params[4] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Cursor),
+ SPA_PARAM_META_size, SPA_POD_Int(
+ CURSOR_META_SIZE(CURSOR_WIDTH,CURSOR_HEIGHT)));
+
+ pw_stream_update_params(stream, params, 5);
+}
+
+static const struct pw_stream_events stream_events = {
+ PW_VERSION_STREAM_EVENTS,
+ .process = on_process,
+ .state_changed = on_stream_state_changed,
+ .param_changed = on_stream_param_changed,
+ .add_buffer = on_stream_add_buffer,
+ .remove_buffer = on_stream_remove_buffer,
+};
+
+static void do_quit(void *userdata, int signal_number)
+{
+ struct data *data = userdata;
+ pw_thread_loop_signal(data->loop, false);
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+ const struct spa_pod *params[2];
+ uint8_t buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+
+ srand(32);
+
+ pw_init(&argc, &argv);
+
+ /* create a thread loop and start it */
+ data.loop = pw_thread_loop_new("video-src-fixate", NULL);
+
+ /* take the lock around all PipeWire functions. In callbacks, the lock
+ * is already taken for you but it's ok to lock again because the lock is
+ * recursive */
+ pw_thread_loop_lock(data.loop);
+
+ /* install some handlers to exit nicely */
+ pw_loop_add_signal(pw_thread_loop_get_loop(data.loop), SIGINT, do_quit, &data);
+ pw_loop_add_signal(pw_thread_loop_get_loop(data.loop), SIGTERM, do_quit, &data);
+
+ /* start after the signal handlers are set */
+ pw_thread_loop_start(data.loop);
+
+ /* create a simple stream, the simple stream manages the core
+ * object for you if you don't want to deal with them.
+ *
+ * We're making a new video provider. We need to set the media-class
+ * property.
+ *
+ * Pass your events and a user_data pointer as the last arguments. This
+ * will inform you about the stream state. The most important event
+ * you need to listen to is the process event where you need to provide
+ * the data.
+ */
+ data.stream = pw_stream_new_simple(
+ pw_thread_loop_get_loop(data.loop),
+ "video-src-fixate",
+ pw_properties_new(
+ PW_KEY_MEDIA_CLASS, "Video/Source",
+ NULL),
+ &stream_events,
+ &data);
+
+ /* make a timer to schedule our frames */
+ data.timer = pw_loop_add_timer(pw_thread_loop_get_loop(data.loop), on_timeout, &data);
+
+ /* build the extra parameter for the connection. Here we make an
+ * EnumFormat parameter which lists the possible formats we can provide.
+ * The server will select a format that matches and informs us about this
+ * in the stream param_changed event.
+ */
+ params[0] = build_format(&b, SPA_VIDEO_FORMAT_RGB,
+ supported_modifiers, sizeof(supported_modifiers)/sizeof(supported_modifiers[0]));
+ params[1] = build_format(&b, SPA_VIDEO_FORMAT_RGB, NULL, 0);
+
+ printf("announcing starting EnumFormats\n");
+ for (unsigned int i=0; i < 2; i++) {
+ spa_debug_format(4, NULL, params[i]);
+ }
+
+ /* now connect the stream, we need a direction (input/output),
+ * an optional target node to connect to, some flags and parameters.
+ *
+ * Here we pass PW_STREAM_FLAG_ALLOC_BUFFERS. We should in the
+ * add_buffer callback configure the buffer memory. This should be
+ * fd backed memory (memfd, dma-buf, ...) that can be shared with
+ * the server. */
+ pw_stream_connect(data.stream,
+ PW_DIRECTION_OUTPUT,
+ PW_ID_ANY,
+ PW_STREAM_FLAG_DRIVER |
+ PW_STREAM_FLAG_ALLOC_BUFFERS,
+ params, 2);
+
+ /* unlock, run the loop and wait, this will trigger the callbacks */
+ pw_thread_loop_wait(data.loop);
+
+ /* unlock before stop */
+ pw_thread_loop_unlock(data.loop);
+ pw_thread_loop_stop(data.loop);
+
+ pw_stream_destroy(data.stream);
+
+ /* destroy after dependent objects are destroyed */
+ pw_thread_loop_destroy(data.loop);
+ pw_deinit();
+
+ return 0;
+}
diff --git a/src/examples/video-src-reneg.c b/src/examples/video-src-reneg.c
new file mode 100644
index 0000000..172e7dc
--- /dev/null
+++ b/src/examples/video-src-reneg.c
@@ -0,0 +1,509 @@
+/* PipeWire
+ *
+ * Copyright © 2020 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Renegotiating video producer and consumer formats with \ref pw_stream
+ [title]
+ */
+
+#include "config.h"
+
+#include <stdio.h>
+#include <errno.h>
+#include <signal.h>
+#include <math.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+
+#include <spa/param/video/format-utils.h>
+
+#include <pipewire/pipewire.h>
+
+#define BPP 3
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+#define CURSOR_BPP 4
+
+#define MAX_BUFFERS 64
+
+#define M_PI_M2 ( M_PI + M_PI )
+
+struct data {
+ struct pw_thread_loop *loop;
+ struct spa_source *timer;
+ struct spa_source *reneg_timer;
+
+ struct pw_stream *stream;
+ struct spa_hook stream_listener;
+
+ struct spa_video_info_raw format;
+ int32_t stride;
+
+ int counter;
+ int cycle;
+ uint32_t seq;
+
+ double crop;
+ double accumulator;
+};
+
+static void draw_elipse(uint32_t *dst, int width, int height, uint32_t color)
+{
+ int i, j, r1, r2, r12, r22, r122;
+
+ r1 = width/2;
+ r12 = r1 * r1;
+ r2 = height/2;
+ r22 = r2 * r2;
+ r122 = r12 * r22;
+
+ for (i = -r2; i < r2; i++) {
+ for (j = -r1; j < r1; j++) {
+ dst[(i + r2)*width+(j+r1)] =
+ (i * i * r12 + j * j * r22 <= r122) ? color : 0x00000000;
+ }
+ }
+}
+
+/* called when we should push a new buffer in the queue */
+static void on_process(void *userdata)
+{
+ struct data *data = userdata;
+ struct pw_buffer *b;
+ struct spa_buffer *buf;
+ uint32_t i, j;
+ uint8_t *p;
+ struct spa_meta *m;
+ struct spa_meta_header *h;
+ struct spa_meta_region *mc;
+ struct spa_meta_cursor *mcs;
+
+ pw_log_trace("timeout");
+
+ if ((b = pw_stream_dequeue_buffer(data->stream)) == NULL) {
+ pw_log_warn("out of buffers: %m");
+ return;
+ }
+
+ buf = b->buffer;
+ if ((p = buf->datas[0].data) == NULL)
+ return;
+
+ if ((h = spa_buffer_find_meta_data(buf, SPA_META_Header, sizeof(*h)))) {
+#if 0
+ struct timespec now;
+ clock_gettime(CLOCK_MONOTONIC, &now);
+ h->pts = SPA_TIMESPEC_TO_NSEC(&now);
+#else
+ h->pts = -1;
+#endif
+ h->flags = 0;
+ h->seq = data->seq++;
+ h->dts_offset = 0;
+ }
+ if ((m = spa_buffer_find_meta(buf, SPA_META_VideoDamage))) {
+ struct spa_meta_region *r = spa_meta_first(m);
+
+ if (spa_meta_check(r, m)) {
+ r->region.position = SPA_POINT(0,0);
+ r->region.size = data->format.size;
+ r++;
+ }
+ if (spa_meta_check(r, m))
+ r->region = SPA_REGION(0,0,0,0);
+ }
+ if ((mc = spa_buffer_find_meta_data(buf, SPA_META_VideoCrop, sizeof(*mc)))) {
+ data->crop = (sin(data->accumulator) + 1.0) * 32.0;
+ mc->region.position.x = data->crop;
+ mc->region.position.y = data->crop;
+ mc->region.size.width = data->format.size.width - data->crop*2;
+ mc->region.size.height = data->format.size.height - data->crop*2;
+ }
+ if ((mcs = spa_buffer_find_meta_data(buf, SPA_META_Cursor, sizeof(*mcs)))) {
+ struct spa_meta_bitmap *mb;
+ uint32_t *bitmap, color;
+
+ mcs->id = 1;
+ mcs->position.x = (sin(data->accumulator) + 1.0) * 160.0 + 80;
+ mcs->position.y = (cos(data->accumulator) + 1.0) * 100.0 + 50;
+ mcs->hotspot.x = 0;
+ mcs->hotspot.y = 0;
+ mcs->bitmap_offset = sizeof(struct spa_meta_cursor);
+
+ mb = SPA_PTROFF(mcs, mcs->bitmap_offset, struct spa_meta_bitmap);
+ mb->format = SPA_VIDEO_FORMAT_ARGB;
+ mb->size.width = CURSOR_WIDTH;
+ mb->size.height = CURSOR_HEIGHT;
+ mb->stride = CURSOR_WIDTH * CURSOR_BPP;
+ mb->offset = sizeof(struct spa_meta_bitmap);
+
+ bitmap = SPA_PTROFF(mb, mb->offset, uint32_t);
+ color = (cos(data->accumulator) + 1.0) * (1 << 23);
+ color |= 0xff000000;
+
+ draw_elipse(bitmap, mb->size.width, mb->size.height, color);
+ }
+
+ for (i = 0; i < data->format.size.height; i++) {
+ for (j = 0; j < data->format.size.width * BPP; j++) {
+ p[j] = data->counter + j * i;
+ }
+ p += data->stride;
+ data->counter += 13;
+ }
+
+ data->accumulator += M_PI_M2 / 50.0;
+ if (data->accumulator >= M_PI_M2)
+ data->accumulator -= M_PI_M2;
+
+ buf->datas[0].chunk->offset = 0;
+ buf->datas[0].chunk->size = data->format.size.height * data->stride;
+ buf->datas[0].chunk->stride = data->stride;
+
+ pw_stream_queue_buffer(data->stream, b);
+}
+
+/* called on timeout and we should start the graph */
+static void on_timeout(void *userdata, uint64_t expirations)
+{
+ struct data *data = userdata;
+ pw_log_trace("timeout");
+ pw_stream_trigger_process(data->stream);
+}
+
+/* when the stream is STREAMING, start the timer at 40ms intervals
+ * to produce and push a frame. In other states we PAUSE the timer. */
+static void on_stream_state_changed(void *_data, enum pw_stream_state old, enum pw_stream_state state,
+ const char *error)
+{
+ struct data *data = _data;
+
+ printf("stream state: \"%s\"\n", pw_stream_state_as_string(state));
+
+ switch (state) {
+ case PW_STREAM_STATE_PAUSED:
+ printf("node id: %d\n", pw_stream_get_node_id(data->stream));
+ pw_loop_update_timer(pw_thread_loop_get_loop(data->loop),
+ data->timer, NULL, NULL, false);
+ pw_loop_update_timer(pw_thread_loop_get_loop(data->loop),
+ data->reneg_timer, NULL, NULL, false);
+ break;
+ case PW_STREAM_STATE_STREAMING:
+ {
+ struct timespec timeout, interval;
+
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = 1;
+ interval.tv_sec = 0;
+ interval.tv_nsec = 40 * SPA_NSEC_PER_MSEC;
+
+ if (pw_stream_is_driving(data->stream))
+ pw_loop_update_timer(pw_thread_loop_get_loop(data->loop),
+ data->timer, &timeout, &interval, false);
+
+ timeout.tv_sec = 1;
+ timeout.tv_nsec = 0;
+ interval.tv_sec = 1;
+ interval.tv_nsec = 0;
+
+ pw_loop_update_timer(pw_thread_loop_get_loop(data->loop),
+ data->reneg_timer, &timeout, &interval, false);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/* we set the PW_STREAM_FLAG_ALLOC_BUFFERS flag when connecting so we need
+ * to provide buffer memory. */
+static void on_stream_add_buffer(void *_data, struct pw_buffer *buffer)
+{
+ struct data *data = _data;
+ struct spa_buffer *buf = buffer->buffer;
+ struct spa_data *d;
+#ifdef HAVE_MEMFD_CREATE
+ unsigned int seals;
+#endif
+
+ pw_log_info("add buffer %p", buffer);
+ d = buf->datas;
+
+ if ((d[0].type & (1<<SPA_DATA_MemFd)) == 0) {
+ pw_log_error("unsupported data type %08x", d[0].type);
+ return;
+ }
+
+ /* create the memfd on the buffer, set the type and flags */
+ d[0].type = SPA_DATA_MemFd;
+ d[0].flags = SPA_DATA_FLAG_READWRITE;
+#ifdef HAVE_MEMFD_CREATE
+ d[0].fd = memfd_create("video-src-memfd", MFD_CLOEXEC | MFD_ALLOW_SEALING);
+#else
+ d[0].fd = -1;
+#endif
+ if (d[0].fd == -1) {
+ pw_log_error("can't create memfd: %m");
+ return;
+ }
+ d[0].mapoffset = 0;
+ d[0].maxsize = data->stride * data->format.size.height;
+
+ /* truncate to the right size before we set seals */
+ if (ftruncate(d[0].fd, d[0].maxsize) < 0) {
+ pw_log_error("can't truncate to %d: %m", d[0].maxsize);
+ return;
+ }
+#ifdef HAVE_MEMFD_CREATE
+ /* not enforced yet but server might require SEAL_SHRINK later */
+ seals = F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL;
+ if (fcntl(d[0].fd, F_ADD_SEALS, seals) == -1) {
+ pw_log_warn("Failed to add seals: %m");
+ }
+#endif
+
+ /* now mmap so we can write to it in the process function above */
+ d[0].data = mmap(NULL, d[0].maxsize, PROT_READ|PROT_WRITE,
+ MAP_SHARED, d[0].fd, d[0].mapoffset);
+ if (d[0].data == MAP_FAILED) {
+ pw_log_error("can't mmap memory: %m");
+ return;
+ }
+}
+
+/* close the memfd we set on the buffers here */
+static void on_stream_remove_buffer(void *_data, struct pw_buffer *buffer)
+{
+ struct spa_buffer *buf = buffer->buffer;
+ struct spa_data *d;
+
+ d = buf->datas;
+ pw_log_info("remove buffer %p", buffer);
+
+ munmap(d[0].data, d[0].maxsize);
+ close(d[0].fd);
+}
+
+/* Be notified when the stream param changes. We're only looking at the
+ * format param.
+ *
+ * We are now supposed to call pw_stream_update_params() with success or
+ * failure, depending on if we can support the format. Because we gave
+ * a list of supported formats, this should be ok.
+ *
+ * As part of pw_stream_update_params() we can provide parameters that
+ * will control the buffer memory allocation. This includes the metadata
+ * that we would like on our buffer, the size, alignment, etc.
+ */
+static void
+on_stream_param_changed(void *_data, uint32_t id, const struct spa_pod *param)
+{
+ struct data *data = _data;
+ struct pw_stream *stream = data->stream;
+ uint8_t params_buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
+ const struct spa_pod *params[5];
+
+ if (param == NULL || id != SPA_PARAM_Format)
+ return;
+
+ pw_log_info("format changed");
+ spa_format_video_raw_parse(param, &data->format);
+
+ data->stride = SPA_ROUND_UP_N(data->format.size.width * BPP, 4);
+
+ params[0] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
+ SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS),
+ SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1),
+ SPA_PARAM_BUFFERS_size, SPA_POD_Int(data->stride * data->format.size.height),
+ SPA_PARAM_BUFFERS_stride, SPA_POD_Int(data->stride),
+ SPA_PARAM_BUFFERS_dataType, SPA_POD_CHOICE_FLAGS_Int(1<<SPA_DATA_MemFd));
+
+ params[1] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Header),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header)));
+
+ params[2] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoDamage),
+ SPA_PARAM_META_size, SPA_POD_CHOICE_RANGE_Int(
+ sizeof(struct spa_meta_region) * 16,
+ sizeof(struct spa_meta_region) * 1,
+ sizeof(struct spa_meta_region) * 16));
+ params[3] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoCrop),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_region)));
+#define CURSOR_META_SIZE(w,h) (sizeof(struct spa_meta_cursor) + \
+ sizeof(struct spa_meta_bitmap) + w * h * CURSOR_BPP)
+ params[4] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Cursor),
+ SPA_PARAM_META_size, SPA_POD_Int(
+ CURSOR_META_SIZE(CURSOR_WIDTH,CURSOR_HEIGHT)));
+
+ pw_stream_update_params(stream, params, 5);
+}
+
+static const struct pw_stream_events stream_events = {
+ PW_VERSION_STREAM_EVENTS,
+ .process = on_process,
+ .state_changed = on_stream_state_changed,
+ .param_changed = on_stream_param_changed,
+ .add_buffer = on_stream_add_buffer,
+ .remove_buffer = on_stream_remove_buffer,
+};
+
+static void on_reneg_timeout(void *userdata, uint64_t expirations)
+{
+ struct data *data = userdata;
+ uint8_t buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+ const struct spa_pod *params[2];
+ int32_t width, height;
+
+ width = data->cycle & 1 ? 320 : 640;
+ height = data->cycle & 1 ? 240 : 480;
+
+ fprintf(stderr, "renegotiate to %dx%d:\n", width, height);
+ params[0] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat,
+ SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video),
+ SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw),
+ SPA_FORMAT_VIDEO_format, SPA_POD_Id(SPA_VIDEO_FORMAT_RGB),
+ SPA_FORMAT_VIDEO_size, SPA_POD_Rectangle(&SPA_RECTANGLE(width, height)),
+ SPA_FORMAT_VIDEO_framerate, SPA_POD_Fraction(&SPA_FRACTION(25, 1)));
+
+ pw_stream_update_params(data->stream, params, 1);
+
+ data->cycle++;
+}
+
+static void do_quit(void *userdata, int signal_number)
+{
+ struct data *data = userdata;
+ pw_thread_loop_signal(data->loop, false);
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+ const struct spa_pod *params[1];
+ uint8_t buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+
+ pw_init(&argc, &argv);
+
+ /* create a thread loop and start it */
+ data.loop = pw_thread_loop_new("video-src-alloc", NULL);
+
+ /* take the lock around all PipeWire functions. In callbacks, the lock
+ * is already taken for you but it's ok to lock again because the lock is
+ * recursive */
+ pw_thread_loop_lock(data.loop);
+
+ /* install some handlers to exit nicely */
+ pw_loop_add_signal(pw_thread_loop_get_loop(data.loop), SIGINT, do_quit, &data);
+ pw_loop_add_signal(pw_thread_loop_get_loop(data.loop), SIGTERM, do_quit, &data);
+
+ /* start after the signal handlers are set */
+ pw_thread_loop_start(data.loop);
+
+ /* create a simple stream, the simple stream manages the core
+ * object for you if you don't want to deal with them.
+ *
+ * We're making a new video provider. We need to set the media-class
+ * property.
+ *
+ * Pass your events and a user_data pointer as the last arguments. This
+ * will inform you about the stream state. The most important event
+ * you need to listen to is the process event where you need to provide
+ * the data.
+ */
+ data.stream = pw_stream_new_simple(
+ pw_thread_loop_get_loop(data.loop),
+ "video-src-alloc",
+ pw_properties_new(
+ PW_KEY_MEDIA_CLASS, "Video/Source",
+ NULL),
+ &stream_events,
+ &data);
+
+ /* make a timer to schedule our frames */
+ data.timer = pw_loop_add_timer(pw_thread_loop_get_loop(data.loop),
+ on_timeout, &data);
+
+ /* make a timer to schedule renegotiation */
+ data.reneg_timer = pw_loop_add_timer(pw_thread_loop_get_loop(data.loop),
+ on_reneg_timeout, &data);
+
+ /* build the extra parameter for the connection. Here we make an
+ * EnumFormat parameter which lists the possible formats we can provide.
+ * The server will select a format that matches and informs us about this
+ * in the stream param_changed event.
+ */
+ params[0] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat,
+ SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video),
+ SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw),
+ SPA_FORMAT_VIDEO_format, SPA_POD_Id(SPA_VIDEO_FORMAT_RGB),
+ SPA_FORMAT_VIDEO_size, SPA_POD_CHOICE_RANGE_Rectangle(
+ &SPA_RECTANGLE(320, 240),
+ &SPA_RECTANGLE(1, 1),
+ &SPA_RECTANGLE(4096, 4096)),
+ SPA_FORMAT_VIDEO_framerate, SPA_POD_Fraction(&SPA_FRACTION(25, 1)));
+
+ /* now connect the stream, we need a direction (input/output),
+ * an optional target node to connect to, some flags and parameters.
+ *
+ * Here we pass PW_STREAM_FLAG_ALLOC_BUFFERS. We should in the
+ * add_buffer callback configure the buffer memory. This should be
+ * fd backed memory (memfd, dma-buf, ...) that can be shared with
+ * the server. */
+ pw_stream_connect(data.stream,
+ PW_DIRECTION_OUTPUT,
+ PW_ID_ANY,
+ PW_STREAM_FLAG_DRIVER |
+ PW_STREAM_FLAG_ALLOC_BUFFERS,
+ params, 1);
+
+ /* unlock, run the loop and wait, this will trigger the callbacks */
+ pw_thread_loop_wait(data.loop);
+
+ /* unlock before stop */
+ pw_thread_loop_unlock(data.loop);
+ pw_thread_loop_stop(data.loop);
+
+ pw_stream_destroy(data.stream);
+
+ /* destroy after dependent objects are destroyed */
+ pw_thread_loop_destroy(data.loop);
+ pw_deinit();
+
+ return 0;
+}
diff --git a/src/examples/video-src.c b/src/examples/video-src.c
new file mode 100644
index 0000000..d7e2051
--- /dev/null
+++ b/src/examples/video-src.c
@@ -0,0 +1,357 @@
+/* PipeWire
+ *
+ * Copyright © 2018 Wim Taymans
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ [title]
+ Video source using \ref pw_stream.
+ [title]
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <signal.h>
+#include <math.h>
+
+#include <spa/param/video/format-utils.h>
+
+#include <pipewire/pipewire.h>
+
+#define BPP 3
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+#define CURSOR_BPP 4
+
+#define MAX_BUFFERS 64
+
+#define M_PI_M2 ( M_PI + M_PI )
+
+struct data {
+ struct pw_main_loop *loop;
+ struct spa_source *timer;
+
+ struct pw_context *context;
+ struct pw_core *core;
+
+ struct pw_stream *stream;
+ struct spa_hook stream_listener;
+
+ struct spa_video_info_raw format;
+ int32_t stride;
+
+ int counter;
+ uint32_t seq;
+
+ double crop;
+ double accumulator;
+ int res;
+};
+
+static void draw_elipse(uint32_t *dst, int width, int height, uint32_t color)
+{
+ int i, j, r1, r2, r12, r22, r122;
+
+ r1 = width/2;
+ r12 = r1 * r1;
+ r2 = height/2;
+ r22 = r2 * r2;
+ r122 = r12 * r22;
+
+ for (i = -r2; i < r2; i++) {
+ for (j = -r1; j < r1; j++) {
+ dst[(i + r2)*width+(j+r1)] =
+ (i * i * r12 + j * j * r22 <= r122) ? color : 0x00000000;
+ }
+ }
+}
+
+static void on_process(void *userdata)
+{
+ struct data *data = userdata;
+ struct pw_buffer *b;
+ struct spa_buffer *buf;
+ uint32_t i, j;
+ uint8_t *p;
+ struct spa_meta *m;
+ struct spa_meta_header *h;
+ struct spa_meta_region *mc;
+ struct spa_meta_cursor *mcs;
+
+ if ((b = pw_stream_dequeue_buffer(data->stream)) == NULL) {
+ pw_log_warn("out of buffers: %m");
+ return;
+ }
+
+ buf = b->buffer;
+ if ((p = buf->datas[0].data) == NULL)
+ return;
+
+ if ((h = spa_buffer_find_meta_data(buf, SPA_META_Header, sizeof(*h)))) {
+#if 0
+ struct timespec now;
+ clock_gettime(CLOCK_MONOTONIC, &now);
+ h->pts = SPA_TIMESPEC_TO_NSEC(&now);
+#else
+ h->pts = -1;
+#endif
+ h->flags = 0;
+ h->seq = data->seq++;
+ h->dts_offset = 0;
+ }
+ if ((m = spa_buffer_find_meta(buf, SPA_META_VideoDamage))) {
+ struct spa_meta_region *r = spa_meta_first(m);
+
+ if (spa_meta_check(r, m)) {
+ r->region.position = SPA_POINT(0,0);
+ r->region.size = data->format.size;
+ r++;
+ }
+ if (spa_meta_check(r, m))
+ r->region = SPA_REGION(0,0,0,0);
+ }
+ if ((mc = spa_buffer_find_meta_data(buf, SPA_META_VideoCrop, sizeof(*mc)))) {
+ data->crop = (sin(data->accumulator) + 1.0) * 32.0;
+ mc->region.position.x = data->crop;
+ mc->region.position.y = data->crop;
+ mc->region.size.width = data->format.size.width - data->crop*2;
+ mc->region.size.height = data->format.size.height - data->crop*2;
+ }
+ if ((mcs = spa_buffer_find_meta_data(buf, SPA_META_Cursor, sizeof(*mcs)))) {
+ struct spa_meta_bitmap *mb;
+ uint32_t *bitmap, color;
+
+ mcs->id = 1;
+ mcs->position.x = (sin(data->accumulator) + 1.0) * 160.0 + 80;
+ mcs->position.y = (cos(data->accumulator) + 1.0) * 100.0 + 50;
+ mcs->hotspot.x = 0;
+ mcs->hotspot.y = 0;
+ mcs->bitmap_offset = sizeof(struct spa_meta_cursor);
+
+ mb = SPA_PTROFF(mcs, mcs->bitmap_offset, struct spa_meta_bitmap);
+ mb->format = SPA_VIDEO_FORMAT_ARGB;
+ mb->size.width = CURSOR_WIDTH;
+ mb->size.height = CURSOR_HEIGHT;
+ mb->stride = CURSOR_WIDTH * CURSOR_BPP;
+ mb->offset = sizeof(struct spa_meta_bitmap);
+
+ bitmap = SPA_PTROFF(mb, mb->offset, uint32_t);
+ color = (cos(data->accumulator) + 1.0) * (1 << 23);
+ color |= 0xff000000;
+
+ draw_elipse(bitmap, mb->size.width, mb->size.height, color);
+ }
+
+ for (i = 0; i < data->format.size.height; i++) {
+ for (j = 0; j < data->format.size.width * BPP; j++) {
+ p[j] = data->counter + j * i;
+ }
+ p += data->stride;
+ data->counter += 13;
+ }
+
+ data->accumulator += M_PI_M2 / 50.0;
+ if (data->accumulator >= M_PI_M2)
+ data->accumulator -= M_PI_M2;
+
+ buf->datas[0].chunk->offset = 0;
+ buf->datas[0].chunk->size = data->format.size.height * data->stride;
+ buf->datas[0].chunk->stride = data->stride;
+
+ pw_stream_queue_buffer(data->stream, b);
+}
+
+static void on_timeout(void *userdata, uint64_t expirations)
+{
+ struct data *data = userdata;
+ pw_log_trace("timeout");
+ pw_stream_trigger_process(data->stream);
+}
+
+static void on_stream_state_changed(void *_data, enum pw_stream_state old, enum pw_stream_state state,
+ const char *error)
+{
+ struct data *data = _data;
+
+ printf("stream state: \"%s\"\n", pw_stream_state_as_string(state));
+
+ switch (state) {
+ case PW_STREAM_STATE_ERROR:
+ case PW_STREAM_STATE_UNCONNECTED:
+ pw_main_loop_quit(data->loop);
+ break;
+
+ case PW_STREAM_STATE_PAUSED:
+ printf("node id: %d\n", pw_stream_get_node_id(data->stream));
+ pw_loop_update_timer(pw_main_loop_get_loop(data->loop),
+ data->timer, NULL, NULL, false);
+ break;
+ case PW_STREAM_STATE_STREAMING:
+ {
+ struct timespec timeout, interval;
+
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = 1;
+ interval.tv_sec = 0;
+ interval.tv_nsec = 40 * SPA_NSEC_PER_MSEC;
+
+ pw_loop_update_timer(pw_main_loop_get_loop(data->loop),
+ data->timer, &timeout, &interval, false);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static void
+on_stream_param_changed(void *_data, uint32_t id, const struct spa_pod *param)
+{
+ struct data *data = _data;
+ struct pw_stream *stream = data->stream;
+ uint8_t params_buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
+ const struct spa_pod *params[5];
+
+ if (param == NULL || id != SPA_PARAM_Format)
+ return;
+
+ spa_format_video_raw_parse(param, &data->format);
+
+ data->stride = SPA_ROUND_UP_N(data->format.size.width * BPP, 4);
+
+ params[0] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
+ SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS),
+ SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1),
+ SPA_PARAM_BUFFERS_size, SPA_POD_Int(data->stride * data->format.size.height),
+ SPA_PARAM_BUFFERS_stride, SPA_POD_Int(data->stride));
+
+ params[1] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Header),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header)));
+
+ params[2] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoDamage),
+ SPA_PARAM_META_size, SPA_POD_CHOICE_RANGE_Int(
+ sizeof(struct spa_meta_region) * 16,
+ sizeof(struct spa_meta_region) * 1,
+ sizeof(struct spa_meta_region) * 16));
+ params[3] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoCrop),
+ SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_region)));
+#define CURSOR_META_SIZE(w,h) (sizeof(struct spa_meta_cursor) + \
+ sizeof(struct spa_meta_bitmap) + w * h * CURSOR_BPP)
+ params[4] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Cursor),
+ SPA_PARAM_META_size, SPA_POD_Int(
+ CURSOR_META_SIZE(CURSOR_WIDTH,CURSOR_HEIGHT)));
+
+ pw_stream_update_params(stream, params, 5);
+}
+
+static void
+on_trigger_done(void *_data)
+{
+ pw_log_trace("trigger done");
+}
+
+static const struct pw_stream_events stream_events = {
+ PW_VERSION_STREAM_EVENTS,
+ .process = on_process,
+ .state_changed = on_stream_state_changed,
+ .param_changed = on_stream_param_changed,
+ .trigger_done = on_trigger_done,
+};
+
+static void do_quit(void *userdata, int signal_number)
+{
+ struct data *data = userdata;
+ pw_main_loop_quit(data->loop);
+}
+
+int main(int argc, char *argv[])
+{
+ struct data data = { 0, };
+ const struct spa_pod *params[1];
+ uint8_t buffer[1024];
+ struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer));
+
+ pw_init(&argc, &argv);
+
+ data.loop = pw_main_loop_new(NULL);
+
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGINT, do_quit, &data);
+ pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGTERM, do_quit, &data);
+
+ data.context = pw_context_new(pw_main_loop_get_loop(data.loop), NULL, 0);
+
+ data.timer = pw_loop_add_timer(pw_main_loop_get_loop(data.loop), on_timeout, &data);
+
+ data.core = pw_context_connect(data.context, NULL, 0);
+ if (data.core == NULL) {
+ fprintf(stderr, "can't connect: %m\n");
+ data.res = -errno;
+ goto cleanup;
+ }
+
+ data.stream = pw_stream_new(data.core, "video-src",
+ pw_properties_new(
+ PW_KEY_MEDIA_CLASS, "Video/Source",
+ NULL));
+
+ params[0] = spa_pod_builder_add_object(&b,
+ SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat,
+ SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video),
+ SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw),
+ SPA_FORMAT_VIDEO_format, SPA_POD_Id(SPA_VIDEO_FORMAT_RGB),
+ SPA_FORMAT_VIDEO_size, SPA_POD_CHOICE_RANGE_Rectangle(
+ &SPA_RECTANGLE(320, 240),
+ &SPA_RECTANGLE(1, 1),
+ &SPA_RECTANGLE(4096, 4096)),
+ SPA_FORMAT_VIDEO_framerate, SPA_POD_Fraction(&SPA_FRACTION(25, 1)));
+
+ pw_stream_add_listener(data.stream,
+ &data.stream_listener,
+ &stream_events,
+ &data);
+
+ pw_stream_connect(data.stream,
+ PW_DIRECTION_OUTPUT,
+ PW_ID_ANY,
+ PW_STREAM_FLAG_DRIVER |
+ PW_STREAM_FLAG_MAP_BUFFERS,
+ params, 1);
+
+ pw_main_loop_run(data.loop);
+
+cleanup:
+ pw_context_destroy(data.context);
+ pw_main_loop_destroy(data.loop);
+ pw_deinit();
+
+ return data.res;
+}