diff options
Diffstat (limited to 'audio')
32 files changed, 1248 insertions, 431 deletions
diff --git a/audio/chmap.c b/audio/chmap.c index e2b95f4..a56d78d 100644 --- a/audio/chmap.c +++ b/audio/chmap.c @@ -239,8 +239,8 @@ void mp_chmap_set_unknown(struct mp_chmap *dst, int num_channels) } } -// Return the ffmpeg/libav channel layout as in <libavutil/channel_layout.h>. -// Speakers not representable by ffmpeg/libav are dropped. +// Return the ffmpeg channel layout as in <libavutil/channel_layout.h>. +// Speakers not representable by ffmpeg are dropped. // Warning: this ignores the order of the channels, and will return a channel // mask even if the order is different from libavcodec's. // Also, "unknown" channel maps are translated to non-sense channel @@ -263,7 +263,7 @@ uint64_t mp_chmap_to_lavc_unchecked(const struct mp_chmap *src) return mask; } -// Return the ffmpeg/libav channel layout as in <libavutil/channel_layout.h>. +// Return the ffmpeg channel layout as in <libavutil/channel_layout.h>. // Returns 0 if the channel order doesn't match lavc's or if it's invalid. uint64_t mp_chmap_to_lavc(const struct mp_chmap *src) { @@ -272,7 +272,7 @@ uint64_t mp_chmap_to_lavc(const struct mp_chmap *src) return mp_chmap_to_lavc_unchecked(src); } -// Set channel map from the ffmpeg/libav channel layout as in +// Set channel map from the ffmpeg channel layout as in // <libavutil/channel_layout.h>. // If the number of channels exceed MP_NUM_CHANNELS, set dst to empty. void mp_chmap_from_lavc(struct mp_chmap *dst, uint64_t src) diff --git a/audio/decode/ad_lavc.c b/audio/decode/ad_lavc.c index 08b789a..9b5f1fb 100644 --- a/audio/decode/ad_lavc.c +++ b/audio/decode/ad_lavc.c @@ -44,6 +44,7 @@ #include "options/options.h" struct priv { + struct mp_codec_params *codec; AVCodecContext *avctx; AVFrame *avframe; AVPacket *avpkt; @@ -156,7 +157,7 @@ static bool init(struct mp_filter *da, struct mp_codec_params *codec, return true; } -static void destroy(struct mp_filter *da) +static void ad_lavc_destroy(struct mp_filter *da) { struct priv *ctx = da->priv; @@ -165,7 +166,7 @@ static void destroy(struct mp_filter *da) mp_free_av_packet(&ctx->avpkt); } -static void reset(struct mp_filter *da) +static void ad_lavc_reset(struct mp_filter *da) { struct priv *ctx = da->priv; @@ -219,6 +220,8 @@ static int receive_frame(struct mp_filter *da, struct mp_frame *out) if (!priv->avframe->buf[0]) return ret; + mp_codec_info_from_av(avctx, priv->codec); + double out_pts = mp_pts_from_av(priv->avframe->pts, &priv->codec_timebase); struct mp_aframe *mpframe = mp_aframe_from_avframe(priv->avframe); @@ -276,7 +279,7 @@ static int receive_frame(struct mp_filter *da, struct mp_frame *out) return ret; } -static void process(struct mp_filter *ad) +static void ad_lavc_process(struct mp_filter *ad) { struct priv *priv = ad->priv; @@ -286,9 +289,9 @@ static void process(struct mp_filter *ad) static const struct mp_filter_info ad_lavc_filter = { .name = "ad_lavc", .priv_size = sizeof(struct priv), - .process = process, - .reset = reset, - .destroy = destroy, + .process = ad_lavc_process, + .reset = ad_lavc_reset, + .destroy = ad_lavc_destroy, }; static struct mp_decoder *create(struct mp_filter *parent, @@ -305,12 +308,16 @@ static struct mp_decoder *create(struct mp_filter *parent, da->log = mp_log_new(da, parent->log, NULL); struct priv *priv = da->priv; + priv->codec = codec; priv->public.f = da; if (!init(da, codec, decoder)) { talloc_free(da); return NULL; } + + codec->codec_desc = priv->avctx->codec_descriptor->long_name; + return &priv->public; } diff --git a/audio/decode/ad_spdif.c b/audio/decode/ad_spdif.c index 393af8a..98a53f3 100644 --- a/audio/decode/ad_spdif.c +++ b/audio/decode/ad_spdif.c @@ -37,6 +37,12 @@ #define OUTBUF_SIZE 65536 +#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(60, 26, 100) +#define AV_PROFILE_UNKNOWN FF_PROFILE_UNKNOWN +#define AV_PROFILE_DTS_HD_HRA FF_PROFILE_DTS_HD_HRA +#define AV_PROFILE_DTS_HD_MA FF_PROFILE_DTS_HD_MA +#endif + struct spdifContext { struct mp_log *log; enum AVCodecID codec_id; @@ -53,7 +59,11 @@ struct spdifContext { struct mp_decoder public; }; +#if LIBAVCODEC_VERSION_MAJOR < 61 static int write_packet(void *p, uint8_t *buf, int buf_size) +#else +static int write_packet(void *p, const uint8_t *buf, int buf_size) +#endif { struct spdifContext *ctx = p; @@ -69,7 +79,7 @@ static int write_packet(void *p, uint8_t *buf, int buf_size) } // (called on both filter destruction _and_ if lavf fails to init) -static void destroy(struct mp_filter *da) +static void ad_spdif_destroy(struct mp_filter *da) { struct spdifContext *spdif_ctx = da->priv; AVFormatContext *lavf_ctx = spdif_ctx->lavf_ctx; @@ -79,7 +89,7 @@ static void destroy(struct mp_filter *da) av_write_trailer(lavf_ctx); if (lavf_ctx->pb) av_freep(&lavf_ctx->pb->buffer); - av_freep(&lavf_ctx->pb); + avio_context_free(&lavf_ctx->pb); avformat_free_context(lavf_ctx); spdif_ctx->lavf_ctx = NULL; } @@ -90,7 +100,7 @@ static void determine_codec_params(struct mp_filter *da, AVPacket *pkt, int *out_profile, int *out_rate) { struct spdifContext *spdif_ctx = da->priv; - int profile = FF_PROFILE_UNKNOWN; + int profile = AV_PROFILE_UNKNOWN; AVCodecContext *ctx = NULL; AVFrame *frame = NULL; @@ -115,7 +125,7 @@ static void determine_codec_params(struct mp_filter *da, AVPacket *pkt, av_parser_close(parser); } - if (profile != FF_PROFILE_UNKNOWN || spdif_ctx->codec_id != AV_CODEC_ID_DTS) + if (profile != AV_PROFILE_UNKNOWN || spdif_ctx->codec_id != AV_CODEC_ID_DTS) return; const AVCodec *codec = avcodec_find_decoder(spdif_ctx->codec_id); @@ -145,7 +155,7 @@ done: av_frame_free(&frame); avcodec_free_context(&ctx); - if (profile == FF_PROFILE_UNKNOWN) + if (profile == AV_PROFILE_UNKNOWN) MP_WARN(da, "Failed to parse codec profile.\n"); } @@ -155,7 +165,7 @@ static int init_filter(struct mp_filter *da) AVPacket *pkt = spdif_ctx->avpkt; - int profile = FF_PROFILE_UNKNOWN; + int profile = AV_PROFILE_UNKNOWN; int c_rate = 0; determine_codec_params(da, pkt, &profile, &c_rate); MP_VERBOSE(da, "In: profile=%d samplerate=%d\n", profile, c_rate); @@ -186,7 +196,8 @@ static int init_filter(struct mp_filter *da) if (!stream) goto fail; - stream->codecpar->codec_id = spdif_ctx->codec_id; + stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; + stream->codecpar->codec_id = spdif_ctx->codec_id; AVDictionary *format_opts = NULL; @@ -208,15 +219,15 @@ static int init_filter(struct mp_filter *da) num_channels = 2; break; case AV_CODEC_ID_DTS: { - bool is_hd = profile == FF_PROFILE_DTS_HD_HRA || - profile == FF_PROFILE_DTS_HD_MA || - profile == FF_PROFILE_UNKNOWN; + bool is_hd = profile == AV_PROFILE_DTS_HD_HRA || + profile == AV_PROFILE_DTS_HD_MA || + profile == AV_PROFILE_UNKNOWN; // Apparently, DTS-HD over SPDIF is specified to be 7.1 (8 channels) // for DTS-HD MA, and stereo (2 channels) for DTS-HD HRA. The bit // streaming rate as well as the signaled channel count are defined // based on this value. - int dts_hd_spdif_channel_count = profile == FF_PROFILE_DTS_HD_HRA ? + int dts_hd_spdif_channel_count = profile == AV_PROFILE_DTS_HD_HRA ? 2 : 8; if (spdif_ctx->use_dts_hd && is_hd) { av_dict_set_int(&format_opts, "dtshd_rate", @@ -226,7 +237,7 @@ static int init_filter(struct mp_filter *da) num_channels = dts_hd_spdif_channel_count; } else { sample_format = AF_FORMAT_S_DTS; - samplerate = 48000; + samplerate = c_rate > 44100 ? 48000 : 44100; num_channels = 2; } break; @@ -250,6 +261,8 @@ static int init_filter(struct mp_filter *da) abort(); } + stream->codecpar->sample_rate = samplerate; + struct mp_chmap chmap; mp_chmap_from_channels(&chmap, num_channels); mp_aframe_set_chmap(spdif_ctx->fmt, &chmap); @@ -270,12 +283,12 @@ static int init_filter(struct mp_filter *da) return 0; fail: - destroy(da); + ad_spdif_destroy(da); mp_filter_internal_mark_failed(da); return -1; } -static void process(struct mp_filter *da) +static void ad_spdif_process(struct mp_filter *da) { struct spdifContext *spdif_ctx = da->priv; @@ -400,8 +413,8 @@ struct mp_decoder_list *select_spdif_codec(const char *codec, const char *pref) static const struct mp_filter_info ad_spdif_filter = { .name = "ad_spdif", .priv_size = sizeof(struct spdifContext), - .process = process, - .destroy = destroy, + .process = ad_spdif_process, + .destroy = ad_spdif_destroy, }; static struct mp_decoder *create(struct mp_filter *parent, diff --git a/audio/filter/af_drop.c b/audio/filter/af_drop.c index 724c482..499389d 100644 --- a/audio/filter/af_drop.c +++ b/audio/filter/af_drop.c @@ -11,7 +11,7 @@ struct priv { struct mp_aframe *last; // for repeating }; -static void process(struct mp_filter *f) +static void af_drop_process(struct mp_filter *f) { struct priv *p = f->priv; @@ -52,7 +52,7 @@ static void process(struct mp_filter *f) mp_pin_in_write(f->ppins[1], frame); } -static bool command(struct mp_filter *f, struct mp_filter_command *cmd) +static bool af_drop_command(struct mp_filter *f, struct mp_filter_command *cmd) { struct priv *p = f->priv; @@ -65,7 +65,7 @@ static bool command(struct mp_filter *f, struct mp_filter_command *cmd) return false; } -static void reset(struct mp_filter *f) +static void af_drop_reset(struct mp_filter *f) { struct priv *p = f->priv; @@ -73,18 +73,18 @@ static void reset(struct mp_filter *f) p->diff = 0; } -static void destroy(struct mp_filter *f) +static void af_drop_destroy(struct mp_filter *f) { - reset(f); + af_drop_reset(f); } static const struct mp_filter_info af_drop_filter = { .name = "drop", .priv_size = sizeof(struct priv), - .process = process, - .command = command, - .reset = reset, - .destroy = destroy, + .process = af_drop_process, + .command = af_drop_command, + .reset = af_drop_reset, + .destroy = af_drop_destroy, }; static struct mp_filter *af_drop_create(struct mp_filter *parent, void *options) diff --git a/audio/filter/af_format.c b/audio/filter/af_format.c index 2d1c1cc..eddce64 100644 --- a/audio/filter/af_format.c +++ b/audio/filter/af_format.c @@ -38,7 +38,7 @@ struct priv { struct mp_pin *in_pin; }; -static void process(struct mp_filter *f) +static void af_format_process(struct mp_filter *f) { struct priv *p = f->priv; @@ -85,7 +85,7 @@ error: static const struct mp_filter_info af_format_filter = { .name = "format", .priv_size = sizeof(struct priv), - .process = process, + .process = af_format_process, }; static struct mp_filter *af_format_create(struct mp_filter *parent, diff --git a/audio/filter/af_lavcac3enc.c b/audio/filter/af_lavcac3enc.c index b4a1d59..def9700 100644 --- a/audio/filter/af_lavcac3enc.c +++ b/audio/filter/af_lavcac3enc.c @@ -50,7 +50,7 @@ #define AC3_MAX_CHANNELS 6 #define AC3_MAX_CODED_FRAME_SIZE 3840 #define AC3_FRAME_SIZE (6 * 256) -const static uint16_t ac3_bitrate_tab[19] = { +static const uint16_t ac3_bitrate_tab[19] = { 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 576, 640 }; @@ -103,7 +103,15 @@ static bool reinit(struct mp_filter *f) if (!bit_rate && chmap.num < AC3_MAX_CHANNELS + 1) bit_rate = default_bit_rate[chmap.num]; - avcodec_close(s->lavc_actx); + avcodec_free_context(&s->lavc_actx); + s->lavc_actx = avcodec_alloc_context3(s->lavc_acodec); + if (!s->lavc_actx) { + MP_ERR(f, "Audio LAVC, couldn't reallocate context!\n"); + return false; + } + + if (mp_set_avopts(f->log, s->lavc_actx, s->opts->avopts) < 0) + return false; // Put sample parameters s->lavc_actx->sample_fmt = af_to_avformat(format); @@ -131,18 +139,18 @@ static bool reinit(struct mp_filter *f) return true; } -static void reset(struct mp_filter *f) +static void af_lavcac3enc_reset(struct mp_filter *f) { struct priv *s = f->priv; TA_FREEP(&s->in_frame); } -static void destroy(struct mp_filter *f) +static void af_lavcac3enc_destroy(struct mp_filter *f) { struct priv *s = f->priv; - reset(f); + af_lavcac3enc_reset(f); av_packet_free(&s->lavc_pkt); avcodec_free_context(&s->lavc_actx); } @@ -153,7 +161,7 @@ static void swap_16(uint16_t *ptr, size_t size) ptr[n] = av_bswap16(ptr[n]); } -static void process(struct mp_filter *f) +static void af_lavcac3enc_process(struct mp_filter *f) { struct priv *s = f->priv; @@ -187,9 +195,6 @@ static void process(struct mp_filter *f) case MP_FRAME_AUDIO: TA_FREEP(&s->in_frame); s->in_frame = input.data; - frame = mp_frame_to_av(input, NULL); - if (!frame) - goto error; if (mp_aframe_get_channels(s->in_frame) < s->opts->min_channel_num) { // Just pass it through. s->in_frame = NULL; @@ -200,6 +205,9 @@ static void process(struct mp_filter *f) if (!reinit(f)) goto error; } + frame = mp_frame_to_av(input, NULL); + if (!frame) + goto error; break; default: goto error; // unexpected packet type } @@ -273,9 +281,9 @@ error: static const struct mp_filter_info af_lavcac3enc_filter = { .name = "lavcac3enc", .priv_size = sizeof(struct priv), - .process = process, - .reset = reset, - .destroy = destroy, + .process = af_lavcac3enc_process, + .reset = af_lavcac3enc_reset, + .destroy = af_lavcac3enc_destroy, }; static void add_chmaps_to_autoconv(struct mp_filter *f, diff --git a/audio/filter/af_rubberband.c b/audio/filter/af_rubberband.c index 48e5cc1..e71937f 100644 --- a/audio/filter/af_rubberband.c +++ b/audio/filter/af_rubberband.c @@ -105,7 +105,7 @@ static bool init_rubberband(struct mp_filter *f) return true; } -static void process(struct mp_filter *f) +static void af_rubberband_process(struct mp_filter *f) { struct priv *p = f->priv; @@ -233,7 +233,7 @@ error: mp_filter_internal_mark_failed(f); } -static bool command(struct mp_filter *f, struct mp_filter_command *cmd) +static bool af_rubberband_command(struct mp_filter *f, struct mp_filter_command *cmd) { struct priv *p = f->priv; @@ -263,7 +263,7 @@ static bool command(struct mp_filter *f, struct mp_filter_command *cmd) return false; } -static void reset(struct mp_filter *f) +static void af_rubberband_reset(struct mp_filter *f) { struct priv *p = f->priv; @@ -274,7 +274,7 @@ static void reset(struct mp_filter *f) TA_FREEP(&p->pending); } -static void destroy(struct mp_filter *f) +static void af_rubberband_destroy(struct mp_filter *f) { struct priv *p = f->priv; @@ -286,10 +286,10 @@ static void destroy(struct mp_filter *f) static const struct mp_filter_info af_rubberband_filter = { .name = "rubberband", .priv_size = sizeof(struct priv), - .process = process, - .command = command, - .reset = reset, - .destroy = destroy, + .process = af_rubberband_process, + .command = af_rubberband_command, + .reset = af_rubberband_reset, + .destroy = af_rubberband_destroy, }; static struct mp_filter *af_rubberband_create(struct mp_filter *parent, diff --git a/audio/filter/af_scaletempo.c b/audio/filter/af_scaletempo.c index f06478f..e7b101b 100644 --- a/audio/filter/af_scaletempo.c +++ b/audio/filter/af_scaletempo.c @@ -229,7 +229,7 @@ static void output_overlap_s16(struct priv *s, void *buf_out, } } -static void process(struct mp_filter *f) +static void af_scaletempo_process(struct mp_filter *f) { struct priv *s = f->priv; @@ -511,7 +511,7 @@ static bool reinit(struct mp_filter *f) return true; } -static bool command(struct mp_filter *f, struct mp_filter_command *cmd) +static bool af_scaletempo_command(struct mp_filter *f, struct mp_filter_command *cmd) { struct priv *s = f->priv; @@ -530,7 +530,7 @@ static bool command(struct mp_filter *f, struct mp_filter_command *cmd) return false; } -static void reset(struct mp_filter *f) +static void af_scaletempo_reset(struct mp_filter *f) { struct priv *s = f->priv; @@ -543,7 +543,7 @@ static void reset(struct mp_filter *f) TA_FREEP(&s->in); } -static void destroy(struct mp_filter *f) +static void af_scaletempo_destroy(struct mp_filter *f) { struct priv *s = f->priv; free(s->buf_queue); @@ -558,10 +558,10 @@ static void destroy(struct mp_filter *f) static const struct mp_filter_info af_scaletempo_filter = { .name = "scaletempo", .priv_size = sizeof(struct priv), - .process = process, - .command = command, - .reset = reset, - .destroy = destroy, + .process = af_scaletempo_process, + .command = af_scaletempo_command, + .reset = af_scaletempo_reset, + .destroy = af_scaletempo_destroy, }; static struct mp_filter *af_scaletempo_create(struct mp_filter *parent, diff --git a/audio/filter/af_scaletempo2.c b/audio/filter/af_scaletempo2.c index 7ad8e35..e43c29a 100644 --- a/audio/filter/af_scaletempo2.c +++ b/audio/filter/af_scaletempo2.c @@ -19,9 +19,9 @@ struct priv { }; static bool init_scaletempo2(struct mp_filter *f); -static void reset(struct mp_filter *f); +static void af_scaletempo2_reset(struct mp_filter *f); -static void process(struct mp_filter *f) +static void af_scaletempo2_process(struct mp_filter *f) { struct priv *p = f->priv; @@ -156,7 +156,7 @@ static bool init_scaletempo2(struct mp_filter *f) return true; } -static bool command(struct mp_filter *f, struct mp_filter_command *cmd) +static bool af_scaletempo2_command(struct mp_filter *f, struct mp_filter_command *cmd) { struct priv *p = f->priv; @@ -169,7 +169,7 @@ static bool command(struct mp_filter *f, struct mp_filter_command *cmd) return false; } -static void reset(struct mp_filter *f) +static void af_scaletempo2_reset(struct mp_filter *f) { struct priv *p = f->priv; mp_scaletempo2_reset(&p->data); @@ -177,7 +177,7 @@ static void reset(struct mp_filter *f) TA_FREEP(&p->pending); } -static void destroy(struct mp_filter *f) +static void af_scaletempo2_destroy(struct mp_filter *f) { struct priv *p = f->priv; mp_scaletempo2_destroy(&p->data); @@ -187,10 +187,10 @@ static void destroy(struct mp_filter *f) static const struct mp_filter_info af_scaletempo2_filter = { .name = "scaletempo2", .priv_size = sizeof(struct priv), - .process = process, - .command = command, - .reset = reset, - .destroy = destroy, + .process = af_scaletempo2_process, + .command = af_scaletempo2_command, + .reset = af_scaletempo2_reset, + .destroy = af_scaletempo2_destroy, }; static struct mp_filter *af_scaletempo2_create( diff --git a/audio/filter/af_scaletempo2_internals.c b/audio/filter/af_scaletempo2_internals.c index 534f4f6..924c091 100644 --- a/audio/filter/af_scaletempo2_internals.c +++ b/audio/filter/af_scaletempo2_internals.c @@ -93,15 +93,15 @@ static void multi_channel_moving_block_energies( } static float multi_channel_similarity_measure( - const float* dot_prod_a_b, - const float* energy_a, const float* energy_b, + const float* dot_prod, + const float* energy_target, const float* energy_candidate, int channels) { const float epsilon = 1e-12f; float similarity_measure = 0.0f; for (int n = 0; n < channels; ++n) { - similarity_measure += dot_prod_a_b[n] - / sqrtf(energy_a[n] * energy_b[n] + epsilon); + similarity_measure += dot_prod[n] * energy_target[n] + / sqrtf(energy_target[n] * energy_candidate[n] + epsilon); } return similarity_measure; } @@ -765,7 +765,8 @@ double mp_scaletempo2_get_latency(struct mp_scaletempo2 *p, double playback_rate bool mp_scaletempo2_frames_available(struct mp_scaletempo2 *p, double playback_rate) { - return p->input_buffer_final_frames > p->target_block_index + return (p->input_buffer_final_frames > p->target_block_index && + p->input_buffer_final_frames > 0) || can_perform_wsola(p, playback_rate) || p->num_complete_frames > 0; } diff --git a/audio/out/ao.c b/audio/out/ao.c index a5aa3a9..75fcbac 100644 --- a/audio/out/ao.c +++ b/audio/out/ao.c @@ -40,6 +40,7 @@ extern const struct ao_driver audio_out_audiotrack; extern const struct ao_driver audio_out_audiounit; extern const struct ao_driver audio_out_coreaudio; extern const struct ao_driver audio_out_coreaudio_exclusive; +extern const struct ao_driver audio_out_avfoundation; extern const struct ao_driver audio_out_rsound; extern const struct ao_driver audio_out_pipewire; extern const struct ao_driver audio_out_sndio; @@ -65,6 +66,9 @@ static const struct ao_driver * const audio_out_drivers[] = { #if HAVE_COREAUDIO &audio_out_coreaudio, #endif +#if HAVE_AVFOUNDATION + &audio_out_avfoundation, +#endif #if HAVE_PIPEWIRE &audio_out_pipewire, #endif @@ -612,7 +616,7 @@ void ao_set_gain(struct ao *ao, float gain) #define MUL_GAIN_f(d, num_samples, gain) \ for (int n = 0; n < (num_samples); n++) \ - (d)[n] = MPCLAMP(((d)[n]) * (gain), -1.0, 1.0) + (d)[n] = (d)[n] * (gain) static void process_plane(struct ao *ao, void *data, int num_samples) { diff --git a/audio/out/ao_alsa.c b/audio/out/ao_alsa.c index 75eda3b..92ea0db 100644 --- a/audio/out/ao_alsa.c +++ b/audio/out/ao_alsa.c @@ -623,7 +623,8 @@ static void uninit(struct ao *ao) CHECK_ALSA_ERROR("pcm close error"); } -alsa_error: ; +alsa_error: + snd_config_update_free_global(); } #define INIT_DEVICE_ERR_GENERIC -1 diff --git a/audio/out/ao_audiotrack.c b/audio/out/ao_audiotrack.c index 1392699..db1da9c 100644 --- a/audio/out/ao_audiotrack.c +++ b/audio/out/ao_audiotrack.c @@ -57,9 +57,6 @@ struct priv { bool cfg_pcm_float; int cfg_session_id; - bool needs_timestamp_offset; - int64_t timestamp_offset; - bool thread_terminate; bool thread_created; mp_thread thread; @@ -67,19 +64,19 @@ struct priv { mp_cond wakeup; }; -struct JNIByteBuffer { +static struct JNIByteBuffer { jclass clazz; jmethodID clear; - struct MPJniField mapping[]; -} ByteBuffer = {.mapping = { - #define OFFSET(member) offsetof(struct JNIByteBuffer, member) - {"java/nio/ByteBuffer", NULL, NULL, MP_JNI_CLASS, OFFSET(clazz), 1}, - {"java/nio/ByteBuffer", "clear", "()Ljava/nio/Buffer;", MP_JNI_METHOD, OFFSET(clear), 1}, +} ByteBuffer; +#define OFFSET(member) offsetof(struct JNIByteBuffer, member) +static const struct MPJniField ByteBuffer_mapping[] = { + {"java/nio/ByteBuffer", NULL, MP_JNI_CLASS, OFFSET(clazz), 1}, + {"clear", "()Ljava/nio/Buffer;", MP_JNI_METHOD, OFFSET(clear), 1}, {0}, - #undef OFFSET -}}; +}; +#undef OFFSET -struct JNIAudioTrack { +static struct JNIAudioTrack { jclass clazz; jmethodID ctor; jmethodID ctorV21; @@ -110,78 +107,78 @@ struct JNIAudioTrack { jint ERROR_INVALID_OPERATION; jint WRITE_BLOCKING; jint WRITE_NON_BLOCKING; - struct MPJniField mapping[]; -} AudioTrack = {.mapping = { - #define OFFSET(member) offsetof(struct JNIAudioTrack, member) - {"android/media/AudioTrack", NULL, NULL, MP_JNI_CLASS, OFFSET(clazz), 1}, - {"android/media/AudioTrack", "<init>", "(IIIIIII)V", MP_JNI_METHOD, OFFSET(ctor), 1}, - {"android/media/AudioTrack", "<init>", "(Landroid/media/AudioAttributes;Landroid/media/AudioFormat;III)V", MP_JNI_METHOD, OFFSET(ctorV21), 0}, - {"android/media/AudioTrack", "release", "()V", MP_JNI_METHOD, OFFSET(release), 1}, - {"android/media/AudioTrack", "getState", "()I", MP_JNI_METHOD, OFFSET(getState), 1}, - {"android/media/AudioTrack", "getPlayState", "()I", MP_JNI_METHOD, OFFSET(getPlayState), 1}, - {"android/media/AudioTrack", "play", "()V", MP_JNI_METHOD, OFFSET(play), 1}, - {"android/media/AudioTrack", "stop", "()V", MP_JNI_METHOD, OFFSET(stop), 1}, - {"android/media/AudioTrack", "flush", "()V", MP_JNI_METHOD, OFFSET(flush), 1}, - {"android/media/AudioTrack", "pause", "()V", MP_JNI_METHOD, OFFSET(pause), 1}, - {"android/media/AudioTrack", "write", "([BII)I", MP_JNI_METHOD, OFFSET(write), 1}, - {"android/media/AudioTrack", "write", "([FIII)I", MP_JNI_METHOD, OFFSET(writeFloat), 1}, - {"android/media/AudioTrack", "write", "([SIII)I", MP_JNI_METHOD, OFFSET(writeShortV23), 0}, - {"android/media/AudioTrack", "write", "(Ljava/nio/ByteBuffer;II)I", MP_JNI_METHOD, OFFSET(writeBufferV21), 1}, - {"android/media/AudioTrack", "getBufferSizeInFrames", "()I", MP_JNI_METHOD, OFFSET(getBufferSizeInFramesV23), 0}, - {"android/media/AudioTrack", "getTimestamp", "(Landroid/media/AudioTimestamp;)Z", MP_JNI_METHOD, OFFSET(getTimestamp), 1}, - {"android/media/AudioTrack", "getPlaybackHeadPosition", "()I", MP_JNI_METHOD, OFFSET(getPlaybackHeadPosition), 1}, - {"android/media/AudioTrack", "getLatency", "()I", MP_JNI_METHOD, OFFSET(getLatency), 1}, - {"android/media/AudioTrack", "getMinBufferSize", "(III)I", MP_JNI_STATIC_METHOD, OFFSET(getMinBufferSize), 1}, - {"android/media/AudioTrack", "getNativeOutputSampleRate", "(I)I", MP_JNI_STATIC_METHOD, OFFSET(getNativeOutputSampleRate), 1}, - {"android/media/AudioTrack", "WRITE_BLOCKING", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(WRITE_BLOCKING), 0}, - {"android/media/AudioTrack", "WRITE_NON_BLOCKING", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(WRITE_NON_BLOCKING), 0}, - {"android/media/AudioTrack", "STATE_INITIALIZED", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(STATE_INITIALIZED), 1}, - {"android/media/AudioTrack", "PLAYSTATE_STOPPED", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(PLAYSTATE_STOPPED), 1}, - {"android/media/AudioTrack", "PLAYSTATE_PAUSED", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(PLAYSTATE_PAUSED), 1}, - {"android/media/AudioTrack", "PLAYSTATE_PLAYING", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(PLAYSTATE_PLAYING), 1}, - {"android/media/AudioTrack", "MODE_STREAM", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(MODE_STREAM), 1}, - {"android/media/AudioTrack", "ERROR", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ERROR), 1}, - {"android/media/AudioTrack", "ERROR_BAD_VALUE", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ERROR_BAD_VALUE), 1}, - {"android/media/AudioTrack", "ERROR_INVALID_OPERATION", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ERROR_INVALID_OPERATION), 1}, +} AudioTrack; +#define OFFSET(member) offsetof(struct JNIAudioTrack, member) +static const struct MPJniField AudioTrack_mapping[] = { + {"android/media/AudioTrack", NULL, MP_JNI_CLASS, OFFSET(clazz), 1}, + {"<init>", "(IIIIIII)V", MP_JNI_METHOD, OFFSET(ctor), 1}, + {"<init>", "(Landroid/media/AudioAttributes;Landroid/media/AudioFormat;III)V", MP_JNI_METHOD, OFFSET(ctorV21), 0}, + {"release", "()V", MP_JNI_METHOD, OFFSET(release), 1}, + {"getState", "()I", MP_JNI_METHOD, OFFSET(getState), 1}, + {"getPlayState", "()I", MP_JNI_METHOD, OFFSET(getPlayState), 1}, + {"play", "()V", MP_JNI_METHOD, OFFSET(play), 1}, + {"stop", "()V", MP_JNI_METHOD, OFFSET(stop), 1}, + {"flush", "()V", MP_JNI_METHOD, OFFSET(flush), 1}, + {"pause", "()V", MP_JNI_METHOD, OFFSET(pause), 1}, + {"write", "([BII)I", MP_JNI_METHOD, OFFSET(write), 1}, + {"write", "([FIII)I", MP_JNI_METHOD, OFFSET(writeFloat), 1}, + {"write", "([SIII)I", MP_JNI_METHOD, OFFSET(writeShortV23), 0}, + {"write", "(Ljava/nio/ByteBuffer;II)I", MP_JNI_METHOD, OFFSET(writeBufferV21), 1}, + {"getBufferSizeInFrames", "()I", MP_JNI_METHOD, OFFSET(getBufferSizeInFramesV23), 0}, + {"getTimestamp", "(Landroid/media/AudioTimestamp;)Z", MP_JNI_METHOD, OFFSET(getTimestamp), 1}, + {"getPlaybackHeadPosition", "()I", MP_JNI_METHOD, OFFSET(getPlaybackHeadPosition), 1}, + {"getLatency", "()I", MP_JNI_METHOD, OFFSET(getLatency), 1}, + {"getMinBufferSize", "(III)I", MP_JNI_STATIC_METHOD, OFFSET(getMinBufferSize), 1}, + {"getNativeOutputSampleRate", "(I)I", MP_JNI_STATIC_METHOD, OFFSET(getNativeOutputSampleRate), 1}, + {"WRITE_BLOCKING", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(WRITE_BLOCKING), 0}, + {"WRITE_NON_BLOCKING", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(WRITE_NON_BLOCKING), 0}, + {"STATE_INITIALIZED", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(STATE_INITIALIZED), 1}, + {"PLAYSTATE_STOPPED", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(PLAYSTATE_STOPPED), 1}, + {"PLAYSTATE_PAUSED", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(PLAYSTATE_PAUSED), 1}, + {"PLAYSTATE_PLAYING", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(PLAYSTATE_PLAYING), 1}, + {"MODE_STREAM", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(MODE_STREAM), 1}, + {"ERROR", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ERROR), 1}, + {"ERROR_BAD_VALUE", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ERROR_BAD_VALUE), 1}, + {"ERROR_INVALID_OPERATION", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ERROR_INVALID_OPERATION), 1}, {0} - #undef OFFSET -}}; +}; +#undef OFFSET -struct JNIAudioAttributes { +static struct JNIAudioAttributes { jclass clazz; jint CONTENT_TYPE_MOVIE; jint CONTENT_TYPE_MUSIC; jint USAGE_MEDIA; - struct MPJniField mapping[]; -} AudioAttributes = {.mapping = { - #define OFFSET(member) offsetof(struct JNIAudioAttributes, member) - {"android/media/AudioAttributes", NULL, NULL, MP_JNI_CLASS, OFFSET(clazz), 0}, - {"android/media/AudioAttributes", "CONTENT_TYPE_MOVIE", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CONTENT_TYPE_MOVIE), 0}, - {"android/media/AudioAttributes", "CONTENT_TYPE_MUSIC", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CONTENT_TYPE_MUSIC), 0}, - {"android/media/AudioAttributes", "USAGE_MEDIA", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(USAGE_MEDIA), 0}, +} AudioAttributes; +#define OFFSET(member) offsetof(struct JNIAudioAttributes, member) +static const struct MPJniField AudioAttributes_mapping[] = { + {"android/media/AudioAttributes", NULL, MP_JNI_CLASS, OFFSET(clazz), 0}, + {"CONTENT_TYPE_MOVIE", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CONTENT_TYPE_MOVIE), 0}, + {"CONTENT_TYPE_MUSIC", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CONTENT_TYPE_MUSIC), 0}, + {"USAGE_MEDIA", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(USAGE_MEDIA), 0}, {0} - #undef OFFSET -}}; +}; +#undef OFFSET -struct JNIAudioAttributesBuilder { +static struct JNIAudioAttributesBuilder { jclass clazz; jmethodID ctor; jmethodID setUsage; jmethodID setContentType; jmethodID build; - struct MPJniField mapping[]; -} AudioAttributesBuilder = {.mapping = { - #define OFFSET(member) offsetof(struct JNIAudioAttributesBuilder, member) - {"android/media/AudioAttributes$Builder", NULL, NULL, MP_JNI_CLASS, OFFSET(clazz), 0}, - {"android/media/AudioAttributes$Builder", "<init>", "()V", MP_JNI_METHOD, OFFSET(ctor), 0}, - {"android/media/AudioAttributes$Builder", "setUsage", "(I)Landroid/media/AudioAttributes$Builder;", MP_JNI_METHOD, OFFSET(setUsage), 0}, - {"android/media/AudioAttributes$Builder", "setContentType", "(I)Landroid/media/AudioAttributes$Builder;", MP_JNI_METHOD, OFFSET(setContentType), 0}, - {"android/media/AudioAttributes$Builder", "build", "()Landroid/media/AudioAttributes;", MP_JNI_METHOD, OFFSET(build), 0}, +} AudioAttributesBuilder; +#define OFFSET(member) offsetof(struct JNIAudioAttributesBuilder, member) +static const struct MPJniField AudioAttributesBuilder_mapping[] = { + {"android/media/AudioAttributes$Builder", NULL, MP_JNI_CLASS, OFFSET(clazz), 0}, + {"<init>", "()V", MP_JNI_METHOD, OFFSET(ctor), 0}, + {"setUsage", "(I)Landroid/media/AudioAttributes$Builder;", MP_JNI_METHOD, OFFSET(setUsage), 0}, + {"setContentType", "(I)Landroid/media/AudioAttributes$Builder;", MP_JNI_METHOD, OFFSET(setContentType), 0}, + {"build", "()Landroid/media/AudioAttributes;", MP_JNI_METHOD, OFFSET(build), 0}, {0} - #undef OFFSET -}}; +}; +#undef OFFSET -struct JNIAudioFormat { +static struct JNIAudioFormat { jclass clazz; jint ENCODING_PCM_8BIT; jint ENCODING_PCM_16BIT; @@ -194,77 +191,90 @@ struct JNIAudioFormat { jint CHANNEL_OUT_5POINT1; jint CHANNEL_OUT_BACK_CENTER; jint CHANNEL_OUT_7POINT1_SURROUND; - struct MPJniField mapping[]; -} AudioFormat = {.mapping = { - #define OFFSET(member) offsetof(struct JNIAudioFormat, member) - {"android/media/AudioFormat", NULL, NULL, MP_JNI_CLASS, OFFSET(clazz), 1}, - {"android/media/AudioFormat", "ENCODING_PCM_8BIT", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ENCODING_PCM_8BIT), 1}, - {"android/media/AudioFormat", "ENCODING_PCM_16BIT", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ENCODING_PCM_16BIT), 1}, - {"android/media/AudioFormat", "ENCODING_PCM_FLOAT", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ENCODING_PCM_FLOAT), 1}, - {"android/media/AudioFormat", "ENCODING_IEC61937", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ENCODING_IEC61937), 0}, - {"android/media/AudioFormat", "CHANNEL_OUT_MONO", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CHANNEL_OUT_MONO), 1}, - {"android/media/AudioFormat", "CHANNEL_OUT_STEREO", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CHANNEL_OUT_STEREO), 1}, - {"android/media/AudioFormat", "CHANNEL_OUT_FRONT_CENTER", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CHANNEL_OUT_FRONT_CENTER), 1}, - {"android/media/AudioFormat", "CHANNEL_OUT_QUAD", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CHANNEL_OUT_QUAD), 1}, - {"android/media/AudioFormat", "CHANNEL_OUT_5POINT1", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CHANNEL_OUT_5POINT1), 1}, - {"android/media/AudioFormat", "CHANNEL_OUT_BACK_CENTER", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CHANNEL_OUT_BACK_CENTER), 1}, - {"android/media/AudioFormat", "CHANNEL_OUT_7POINT1_SURROUND", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CHANNEL_OUT_7POINT1_SURROUND), 0}, +} AudioFormat; +#define OFFSET(member) offsetof(struct JNIAudioFormat, member) +static const struct MPJniField AudioFormat_mapping[] = { + {"android/media/AudioFormat", NULL, MP_JNI_CLASS, OFFSET(clazz), 1}, + {"ENCODING_PCM_8BIT", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ENCODING_PCM_8BIT), 1}, + {"ENCODING_PCM_16BIT", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ENCODING_PCM_16BIT), 1}, + {"ENCODING_PCM_FLOAT", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ENCODING_PCM_FLOAT), 1}, + {"ENCODING_IEC61937", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ENCODING_IEC61937), 0}, + {"CHANNEL_OUT_MONO", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CHANNEL_OUT_MONO), 1}, + {"CHANNEL_OUT_STEREO", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CHANNEL_OUT_STEREO), 1}, + {"CHANNEL_OUT_FRONT_CENTER", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CHANNEL_OUT_FRONT_CENTER), 1}, + {"CHANNEL_OUT_QUAD", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CHANNEL_OUT_QUAD), 1}, + {"CHANNEL_OUT_5POINT1", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CHANNEL_OUT_5POINT1), 1}, + {"CHANNEL_OUT_BACK_CENTER", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CHANNEL_OUT_BACK_CENTER), 1}, + {"CHANNEL_OUT_7POINT1_SURROUND", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(CHANNEL_OUT_7POINT1_SURROUND), 0}, {0} - #undef OFFSET -}}; +}; +#undef OFFSET -struct JNIAudioFormatBuilder { +static struct JNIAudioFormatBuilder { jclass clazz; jmethodID ctor; jmethodID setEncoding; jmethodID setSampleRate; jmethodID setChannelMask; jmethodID build; - struct MPJniField mapping[]; -} AudioFormatBuilder = {.mapping = { - #define OFFSET(member) offsetof(struct JNIAudioFormatBuilder, member) - {"android/media/AudioFormat$Builder", NULL, NULL, MP_JNI_CLASS, OFFSET(clazz), 0}, - {"android/media/AudioFormat$Builder", "<init>", "()V", MP_JNI_METHOD, OFFSET(ctor), 0}, - {"android/media/AudioFormat$Builder", "setEncoding", "(I)Landroid/media/AudioFormat$Builder;", MP_JNI_METHOD, OFFSET(setEncoding), 0}, - {"android/media/AudioFormat$Builder", "setSampleRate", "(I)Landroid/media/AudioFormat$Builder;", MP_JNI_METHOD, OFFSET(setSampleRate), 0}, - {"android/media/AudioFormat$Builder", "setChannelMask", "(I)Landroid/media/AudioFormat$Builder;", MP_JNI_METHOD, OFFSET(setChannelMask), 0}, - {"android/media/AudioFormat$Builder", "build", "()Landroid/media/AudioFormat;", MP_JNI_METHOD, OFFSET(build), 0}, +} AudioFormatBuilder; +#define OFFSET(member) offsetof(struct JNIAudioFormatBuilder, member) +static const struct MPJniField AudioFormatBuilder_mapping[] = { + {"android/media/AudioFormat$Builder", NULL, MP_JNI_CLASS, OFFSET(clazz), 0}, + {"<init>", "()V", MP_JNI_METHOD, OFFSET(ctor), 0}, + {"setEncoding", "(I)Landroid/media/AudioFormat$Builder;", MP_JNI_METHOD, OFFSET(setEncoding), 0}, + {"setSampleRate", "(I)Landroid/media/AudioFormat$Builder;", MP_JNI_METHOD, OFFSET(setSampleRate), 0}, + {"setChannelMask", "(I)Landroid/media/AudioFormat$Builder;", MP_JNI_METHOD, OFFSET(setChannelMask), 0}, + {"build", "()Landroid/media/AudioFormat;", MP_JNI_METHOD, OFFSET(build), 0}, {0} - #undef OFFSET -}}; - +}; +#undef OFFSET -struct JNIAudioManager { +static struct JNIAudioManager { jclass clazz; jint ERROR_DEAD_OBJECT; jint STREAM_MUSIC; - struct MPJniField mapping[]; -} AudioManager = {.mapping = { - #define OFFSET(member) offsetof(struct JNIAudioManager, member) - {"android/media/AudioManager", NULL, NULL, MP_JNI_CLASS, OFFSET(clazz), 1}, - {"android/media/AudioManager", "STREAM_MUSIC", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(STREAM_MUSIC), 1}, - {"android/media/AudioManager", "ERROR_DEAD_OBJECT", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ERROR_DEAD_OBJECT), 0}, +} AudioManager; +#define OFFSET(member) offsetof(struct JNIAudioManager, member) +static const struct MPJniField AudioManager_mapping[] = { + {"android/media/AudioManager", NULL, MP_JNI_CLASS, OFFSET(clazz), 1}, + {"STREAM_MUSIC", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(STREAM_MUSIC), 1}, + {"ERROR_DEAD_OBJECT", "I", MP_JNI_STATIC_FIELD_AS_INT, OFFSET(ERROR_DEAD_OBJECT), 0}, {0} - #undef OFFSET -}}; +}; +#undef OFFSET -struct JNIAudioTimestamp { +static struct JNIAudioTimestamp { jclass clazz; jmethodID ctor; jfieldID framePosition; jfieldID nanoTime; - struct MPJniField mapping[]; -} AudioTimestamp = {.mapping = { - #define OFFSET(member) offsetof(struct JNIAudioTimestamp, member) - {"android/media/AudioTimestamp", NULL, NULL, MP_JNI_CLASS, OFFSET(clazz), 1}, - {"android/media/AudioTimestamp", "<init>", "()V", MP_JNI_METHOD, OFFSET(ctor), 1}, - {"android/media/AudioTimestamp", "framePosition", "J", MP_JNI_FIELD, OFFSET(framePosition), 1}, - {"android/media/AudioTimestamp", "nanoTime", "J", MP_JNI_FIELD, OFFSET(nanoTime), 1}, +} AudioTimestamp; +#define OFFSET(member) offsetof(struct JNIAudioTimestamp, member) +static const struct MPJniField AudioTimestamp_mapping[] = { + {"android/media/AudioTimestamp", NULL, MP_JNI_CLASS, OFFSET(clazz), 1}, + {"<init>", "()V", MP_JNI_METHOD, OFFSET(ctor), 1}, + {"framePosition", "J", MP_JNI_FIELD, OFFSET(framePosition), 1}, + {"nanoTime", "J", MP_JNI_FIELD, OFFSET(nanoTime), 1}, {0} - #undef OFFSET -}}; - -#define MP_JNI_DELETELOCAL(o) (*env)->DeleteLocalRef(env, o) +}; +#undef OFFSET + +#define ENTRY(name) { &name, name ## _mapping } +static const struct { + void *fields; + const struct MPJniField *mapping; +} jclass_list[] = { + ENTRY(ByteBuffer), + ENTRY(AudioTrack), + ENTRY(AudioAttributes), + ENTRY(AudioAttributesBuilder), + ENTRY(AudioFormat), + ENTRY(AudioFormatBuilder), + ENTRY(AudioManager), + ENTRY(AudioTimestamp), +}; +#undef ENTRY static int AudioTrack_New(struct ao *ao) { @@ -279,24 +289,24 @@ static int AudioTrack_New(struct ao *ao) jobject format_builder = MP_JNI_NEW(AudioFormatBuilder.clazz, AudioFormatBuilder.ctor); MP_JNI_EXCEPTION_LOG(ao); tmp = MP_JNI_CALL_OBJECT(format_builder, AudioFormatBuilder.setEncoding, p->format); - MP_JNI_DELETELOCAL(tmp); + MP_JNI_LOCAL_FREEP(&tmp); tmp = MP_JNI_CALL_OBJECT(format_builder, AudioFormatBuilder.setSampleRate, p->samplerate); - MP_JNI_DELETELOCAL(tmp); + MP_JNI_LOCAL_FREEP(&tmp); tmp = MP_JNI_CALL_OBJECT(format_builder, AudioFormatBuilder.setChannelMask, p->channel_config); - MP_JNI_DELETELOCAL(tmp); + MP_JNI_LOCAL_FREEP(&tmp); jobject format = MP_JNI_CALL_OBJECT(format_builder, AudioFormatBuilder.build); - MP_JNI_DELETELOCAL(format_builder); + MP_JNI_LOCAL_FREEP(&format_builder); jobject attr_builder = MP_JNI_NEW(AudioAttributesBuilder.clazz, AudioAttributesBuilder.ctor); MP_JNI_EXCEPTION_LOG(ao); tmp = MP_JNI_CALL_OBJECT(attr_builder, AudioAttributesBuilder.setUsage, AudioAttributes.USAGE_MEDIA); - MP_JNI_DELETELOCAL(tmp); + MP_JNI_LOCAL_FREEP(&tmp); jint content_type = (ao->init_flags & AO_INIT_MEDIA_ROLE_MUSIC) ? AudioAttributes.CONTENT_TYPE_MUSIC : AudioAttributes.CONTENT_TYPE_MOVIE; tmp = MP_JNI_CALL_OBJECT(attr_builder, AudioAttributesBuilder.setContentType, content_type); - MP_JNI_DELETELOCAL(tmp); + MP_JNI_LOCAL_FREEP(&tmp); jobject attr = MP_JNI_CALL_OBJECT(attr_builder, AudioAttributesBuilder.build); - MP_JNI_DELETELOCAL(attr_builder); + MP_JNI_LOCAL_FREEP(&attr_builder); audiotrack = MP_JNI_NEW( AudioTrack.clazz, @@ -308,8 +318,8 @@ static int AudioTrack_New(struct ao *ao) p->cfg_session_id ); - MP_JNI_DELETELOCAL(format); - MP_JNI_DELETELOCAL(attr); + MP_JNI_LOCAL_FREEP(&format); + MP_JNI_LOCAL_FREEP(&attr); } else { MP_VERBOSE(ao, "Using legacy initializer\n"); audiotrack = MP_JNI_NEW( @@ -332,7 +342,7 @@ static int AudioTrack_New(struct ao *ao) if (MP_JNI_CALL_INT(audiotrack, AudioTrack.getState) != AudioTrack.STATE_INITIALIZED) { MP_JNI_CALL_VOID(audiotrack, AudioTrack.release); MP_JNI_EXCEPTION_LOG(ao); - (*env)->DeleteLocalRef(env, audiotrack); + MP_JNI_LOCAL_FREEP(&audiotrack); MP_ERR(ao, "AudioTrack.getState failed\n"); return -1; } @@ -346,7 +356,7 @@ static int AudioTrack_New(struct ao *ao) } p->audiotrack = (*env)->NewGlobalRef(env, audiotrack); - (*env)->DeleteLocalRef(env, audiotrack); + MP_JNI_LOCAL_FREEP(&audiotrack); if (!p->audiotrack) return -1; @@ -360,8 +370,7 @@ static int AudioTrack_Recreate(struct ao *ao) MP_JNI_CALL_VOID(p->audiotrack, AudioTrack.release); MP_JNI_EXCEPTION_LOG(ao); - (*env)->DeleteGlobalRef(env, p->audiotrack); - p->audiotrack = NULL; + MP_JNI_GLOBAL_FREEP(&p->audiotrack); return AudioTrack_New(ao); } @@ -407,11 +416,6 @@ static uint32_t AudioTrack_getPlaybackHeadPosition(struct ao *ao) int64_t time = MP_JNI_GET_LONG(p->timestamp, AudioTimestamp.nanoTime); if (time == 0) fpos = pos = 0; - if (p->needs_timestamp_offset) { - if (time != 0 && !p->timestamp_offset) - p->timestamp_offset = now - time; - time += p->timestamp_offset; - } if (fpos != 0 && time != 0 && state == AudioTrack.PLAYSTATE_PLAYING) { double diff = (double)(now - time) / 1e9; pos += diff * ao->samplerate; @@ -497,7 +501,7 @@ static int AudioTrack_write(struct ao *ao, int len) // reset positions for reading jobject bbuf = MP_JNI_CALL_OBJECT(p->bbuf, ByteBuffer.clear); if (MP_JNI_EXCEPTION_LOG(ao) < 0) return -1; - (*env)->DeleteLocalRef(env, bbuf); + MP_JNI_LOCAL_FREEP(&bbuf); ret = MP_JNI_CALL_INT(p->audiotrack, AudioTrack.writeBufferV21, p->bbuf, len, AudioTrack.WRITE_BLOCKING); if (MP_JNI_EXCEPTION_LOG(ao) < 0) return -1; @@ -521,35 +525,29 @@ static int AudioTrack_write(struct ao *ao, int len) static void uninit_jni(struct ao *ao) { JNIEnv *env = MP_JNI_GET_ENV(ao); - mp_jni_reset_jfields(env, &AudioTrack, AudioTrack.mapping, 1, ao->log); - mp_jni_reset_jfields(env, &AudioTimestamp, AudioTimestamp.mapping, 1, ao->log); - mp_jni_reset_jfields(env, &AudioManager, AudioManager.mapping, 1, ao->log); - mp_jni_reset_jfields(env, &AudioFormat, AudioFormat.mapping, 1, ao->log); - mp_jni_reset_jfields(env, &AudioFormatBuilder, AudioFormatBuilder.mapping, 1, ao->log); - mp_jni_reset_jfields(env, &AudioAttributes, AudioAttributes.mapping, 1, ao->log); - mp_jni_reset_jfields(env, &AudioAttributesBuilder, AudioAttributesBuilder.mapping, 1, ao->log); - mp_jni_reset_jfields(env, &ByteBuffer, ByteBuffer.mapping, 1, ao->log); + for (int i = 0; i < MP_ARRAY_SIZE(jclass_list); i++) { + mp_jni_reset_jfields(env, jclass_list[i].fields, + jclass_list[i].mapping, 1, ao->log); + } } static int init_jni(struct ao *ao) { JNIEnv *env = MP_JNI_GET_ENV(ao); - if (mp_jni_init_jfields(env, &AudioTrack, AudioTrack.mapping, 1, ao->log) < 0 || - mp_jni_init_jfields(env, &ByteBuffer, ByteBuffer.mapping, 1, ao->log) < 0 || - mp_jni_init_jfields(env, &AudioTimestamp, AudioTimestamp.mapping, 1, ao->log) < 0 || - mp_jni_init_jfields(env, &AudioManager, AudioManager.mapping, 1, ao->log) < 0 || - mp_jni_init_jfields(env, &AudioAttributes, AudioAttributes.mapping, 1, ao->log) < 0 || - mp_jni_init_jfields(env, &AudioAttributesBuilder, AudioAttributesBuilder.mapping, 1, ao->log) < 0 || - mp_jni_init_jfields(env, &AudioFormatBuilder, AudioFormatBuilder.mapping, 1, ao->log) < 0 || - mp_jni_init_jfields(env, &AudioFormat, AudioFormat.mapping, 1, ao->log) < 0) { - uninit_jni(ao); - return -1; + for (int i = 0; i < MP_ARRAY_SIZE(jclass_list); i++) { + if (mp_jni_init_jfields(env, jclass_list[i].fields, + jclass_list[i].mapping, 1, ao->log) < 0) { + goto error; + } } - return 0; + +error: + uninit_jni(ao); + return -1; } -static MP_THREAD_VOID playthread(void *arg) +static MP_THREAD_VOID ao_thread(void *arg) { struct ao *ao = arg; struct priv *p = ao->priv; @@ -608,34 +606,18 @@ static void uninit(struct ao *ao) if (p->audiotrack) { MP_JNI_CALL_VOID(p->audiotrack, AudioTrack.release); MP_JNI_EXCEPTION_LOG(ao); - (*env)->DeleteGlobalRef(env, p->audiotrack); - p->audiotrack = NULL; + MP_JNI_GLOBAL_FREEP(&p->audiotrack); } - if (p->bytearray) { - (*env)->DeleteGlobalRef(env, p->bytearray); - p->bytearray = NULL; - } + MP_JNI_GLOBAL_FREEP(&p->bytearray); - if (p->shortarray) { - (*env)->DeleteGlobalRef(env, p->shortarray); - p->shortarray = NULL; - } + MP_JNI_GLOBAL_FREEP(&p->shortarray); - if (p->floatarray) { - (*env)->DeleteGlobalRef(env, p->floatarray); - p->floatarray = NULL; - } + MP_JNI_GLOBAL_FREEP(&p->floatarray); - if (p->bbuf) { - (*env)->DeleteGlobalRef(env, p->bbuf); - p->bbuf = NULL; - } + MP_JNI_GLOBAL_FREEP(&p->bbuf); - if (p->timestamp) { - (*env)->DeleteGlobalRef(env, p->timestamp); - p->timestamp = NULL; - } + MP_JNI_GLOBAL_FREEP(&p->timestamp); mp_cond_destroy(&p->wakeup); mp_mutex_destroy(&p->lock); @@ -658,6 +640,10 @@ static int init(struct ao *ao) if (af_fmt_is_spdif(ao->format)) { p->format = AudioFormat.ENCODING_IEC61937; + if (!p->format || !AudioTrack.writeShortV23) { + MP_ERR(ao, "spdif passthrough not supported by API\n"); + return -1; + } } else if (ao->format == AF_FORMAT_U8) { p->format = AudioFormat.ENCODING_PCM_8BIT; } else if (p->cfg_pcm_float && af_fmt_is_float(ao->format)) { @@ -752,26 +738,26 @@ static int init(struct ao *ao) return -1; } p->timestamp = (*env)->NewGlobalRef(env, timestamp); - (*env)->DeleteLocalRef(env, timestamp); + MP_JNI_LOCAL_FREEP(×tamp); // decide and create buffer of right type if (p->format == AudioFormat.ENCODING_IEC61937) { jshortArray shortarray = (*env)->NewShortArray(env, p->chunksize / 2); p->shortarray = (*env)->NewGlobalRef(env, shortarray); - (*env)->DeleteLocalRef(env, shortarray); + MP_JNI_LOCAL_FREEP(&shortarray); } else if (AudioTrack.writeBufferV21) { MP_VERBOSE(ao, "Using NIO ByteBuffer\n"); jobject bbuf = (*env)->NewDirectByteBuffer(env, p->chunk, p->chunksize); p->bbuf = (*env)->NewGlobalRef(env, bbuf); - (*env)->DeleteLocalRef(env, bbuf); + MP_JNI_LOCAL_FREEP(&bbuf); } else if (p->format == AudioFormat.ENCODING_PCM_FLOAT) { jfloatArray floatarray = (*env)->NewFloatArray(env, p->chunksize / sizeof(float)); p->floatarray = (*env)->NewGlobalRef(env, floatarray); - (*env)->DeleteLocalRef(env, floatarray); + MP_JNI_LOCAL_FREEP(&floatarray); } else { jbyteArray bytearray = (*env)->NewByteArray(env, p->chunksize); p->bytearray = (*env)->NewGlobalRef(env, bytearray); - (*env)->DeleteLocalRef(env, bytearray); + MP_JNI_LOCAL_FREEP(&bytearray); } /* create AudioTrack object */ @@ -780,7 +766,7 @@ static int init(struct ao *ao) goto error; } - if (mp_thread_create(&p->thread, playthread, ao)) { + if (mp_thread_create(&p->thread, ao_thread, ao)) { MP_ERR(ao, "pthread creation failed\n"); goto error; } @@ -812,7 +798,6 @@ static void stop(struct ao *ao) p->written_frames = 0; p->timestamp_fetched = 0; p->timestamp_set = false; - p->timestamp_offset = 0; } static void start(struct ao *ao) diff --git a/audio/out/ao_avfoundation.m b/audio/out/ao_avfoundation.m new file mode 100644 index 0000000..7654916 --- /dev/null +++ b/audio/out/ao_avfoundation.m @@ -0,0 +1,372 @@ +/* + * This file is part of mpv. + * + * mpv is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * mpv is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with mpv. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "ao.h" +#include "audio/format.h" +#include "audio/out/ao_coreaudio_chmap.h" +#include "audio/out/ao_coreaudio_utils.h" +#include "common/common.h" +#include "common/msg.h" +#include "internal.h" +#include "osdep/timer.h" +#include "ta/ta_talloc.h" + +#import <AVFoundation/AVFoundation.h> +#import <Foundation/Foundation.h> +#import <CoreAudioTypes/CoreAudioTypes.h> +#import <CoreFoundation/CoreFoundation.h> +#import <CoreMedia/CoreMedia.h> + + +@interface AVObserver : NSObject { + struct ao *ao; +} +- (void)handleRestartNotification:(NSNotification*)notification; +@end + +struct priv { + AVSampleBufferAudioRenderer *renderer; + AVSampleBufferRenderSynchronizer *synchronizer; + dispatch_queue_t queue; + CMAudioFormatDescriptionRef format_description; + AVObserver *observer; + int64_t end_time_av; +}; + +static int64_t CMTimeGetNanoseconds(CMTime time) +{ + time = CMTimeConvertScale(time, 1000000000, kCMTimeRoundingMethod_Default); + return time.value; +} + +static CMTime CMTimeFromNanoseconds(int64_t time) +{ + return CMTimeMake(time, 1000000000); +} + +static void feed(struct ao *ao) +{ + struct priv *p = ao->priv; + int samplerate = ao->samplerate; + int sstride = ao->sstride; + + CMBlockBufferRef block_buffer = NULL; + CMSampleBufferRef sample_buffer = NULL; + OSStatus err; + + int request_sample_count = samplerate / 10; + int buffer_size = request_sample_count * sstride; + void *data[] = {CFAllocatorAllocate(NULL, buffer_size, 0)}; + + int64_t cur_time_av = CMTimeGetNanoseconds([p->synchronizer currentTime]); + int64_t cur_time_mp = mp_time_ns(); + int64_t end_time_av = MPMAX(p->end_time_av, cur_time_av); + int64_t time_delta = CMTimeGetNanoseconds(CMTimeMake(request_sample_count, samplerate)); + int real_sample_count = ao_read_data_nonblocking(ao, data, request_sample_count, end_time_av - cur_time_av + cur_time_mp + time_delta); + if (real_sample_count == 0) { + // avoid spinning by blocking the thread + mp_sleep_ns(10000000); + goto finish; + } + + if ((err = CMBlockBufferCreateWithMemoryBlock( + NULL, + data[0], + buffer_size, + NULL, + NULL, + 0, + real_sample_count * sstride, + 0, + &block_buffer + )) != noErr) { + MP_FATAL(ao, "failed to create block buffer\n"); + MP_VERBOSE(ao, "CMBlockBufferCreateWithMemoryBlock returned %d\n", err); + goto error; + } + data[0] = NULL; + + CMSampleTimingInfo sample_timing_into[] = {(CMSampleTimingInfo) { + .duration = CMTimeMake(1, samplerate), + .presentationTimeStamp = CMTimeFromNanoseconds(end_time_av), + .decodeTimeStamp = kCMTimeInvalid + }}; + size_t sample_size_array[] = {sstride}; + if ((err = CMSampleBufferCreateReady( + NULL, + block_buffer, + p->format_description, + real_sample_count, + 1, + sample_timing_into, + 1, + sample_size_array, + &sample_buffer + )) != noErr) { + MP_FATAL(ao, "failed to create sample buffer\n"); + MP_VERBOSE(ao, "CMSampleBufferCreateReady returned %d\n", err); + goto error; + } + + [p->renderer enqueueSampleBuffer:sample_buffer]; + + time_delta = CMTimeGetNanoseconds(CMTimeMake(real_sample_count, samplerate)); + p->end_time_av = end_time_av + time_delta; + + goto finish; + +error: + ao_request_reload(ao); +finish: + if (data[0]) CFAllocatorDeallocate(NULL, data[0]); + if (block_buffer) CFRelease(block_buffer); + if (sample_buffer) CFRelease(sample_buffer); +} + +static void start(struct ao *ao) +{ + struct priv *p = ao->priv; + + p->end_time_av = -1; + [p->synchronizer setRate:1]; + [p->renderer requestMediaDataWhenReadyOnQueue:p->queue usingBlock:^{ + feed(ao); + }]; +} + +static void stop(struct ao *ao) +{ + struct priv *p = ao->priv; + + dispatch_sync(p->queue, ^{ + [p->renderer stopRequestingMediaData]; + [p->renderer flush]; + [p->synchronizer setRate:0]; + }); +} + +static bool set_pause(struct ao *ao, bool paused) +{ + struct priv *p = ao->priv; + + if (paused) { + [p->synchronizer setRate:0]; + } else { + [p->synchronizer setRate:1]; + } + + return true; +} + +static int control(struct ao *ao, enum aocontrol cmd, void *arg) +{ + struct priv *p = ao->priv; + + switch (cmd) { + case AOCONTROL_GET_MUTE: + *(bool*)arg = [p->renderer isMuted]; + return CONTROL_OK; + case AOCONTROL_GET_VOLUME: + *(float*)arg = [p->renderer volume] * 100; + return CONTROL_OK; + case AOCONTROL_SET_MUTE: + [p->renderer setMuted:*(bool*)arg]; + return CONTROL_OK; + case AOCONTROL_SET_VOLUME: + [p->renderer setVolume:*(float*)arg / 100]; + return CONTROL_OK; + default: + return CONTROL_UNKNOWN; + } +} + +@implementation AVObserver +- (instancetype)initWithAO:(struct ao*)_ao { + self = [super init]; + if (self) { + ao = _ao; + } + return self; +} +- (void)handleRestartNotification:(NSNotification*)notification { + char *name = cfstr_get_cstr((CFStringRef)notification.name); + MP_WARN(ao, "restarting due to system notification; this will cause desync\n"); + MP_VERBOSE(ao, "notification name: %s\n", name); + talloc_free(name); + stop(ao); + start(ao); +} +@end + +static int init(struct ao *ao) +{ + struct priv *p = ao->priv; + AudioChannelLayout *layout = NULL; + +#if TARGET_OS_IPHONE + AVAudioSession *instance = AVAudioSession.sharedInstance; + NSInteger maxChannels = instance.maximumOutputNumberOfChannels; + NSInteger prefChannels = MIN(maxChannels, ao->channels.num); + [instance setCategory:AVAudioSessionCategoryPlayback error:nil]; + [instance setMode:AVAudioSessionModeMoviePlayback error:nil]; + [instance setActive:YES error:nil]; + [instance setPreferredOutputNumberOfChannels:prefChannels error:nil]; +#endif + + if ((p->renderer = [[AVSampleBufferAudioRenderer alloc] init]) == nil) { + MP_FATAL(ao, "failed to create audio renderer\n"); + MP_VERBOSE(ao, "AVSampleBufferAudioRenderer failed to initialize\n"); + goto error; + } + if ((p->synchronizer = [[AVSampleBufferRenderSynchronizer alloc] init]) == nil) { + MP_FATAL(ao, "failed to create rendering synchronizer\n"); + MP_VERBOSE(ao, "AVSampleBufferRenderSynchronizer failed to initialize\n"); + goto error; + } + if ((p->queue = dispatch_queue_create( + "avfoundation event", + dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INTERACTIVE, 0) + )) == NULL) { + MP_FATAL(ao, "failed to create dispatch queue\n"); + MP_VERBOSE(ao, "dispatch_queue_create failed\n"); + goto error; + } + + if (ao->device && ao->device[0]) { + [p->renderer setAudioOutputDeviceUniqueID:(NSString*)cfstr_from_cstr(ao->device)]; + } + + [p->synchronizer addRenderer:p->renderer]; + if (@available(tvOS 14.5, iOS 14.5, macOS 11.3, *)) { + [p->synchronizer setDelaysRateChangeUntilHasSufficientMediaData:NO]; + } + + if (af_fmt_is_spdif(ao->format)) { + MP_FATAL(ao, "avfoundation does not support SPDIF\n"); +#if HAVE_COREAUDIO + MP_FATAL(ao, "please use coreaudio_exclusive instead\n"); +#endif + goto error; + } + + // AVSampleBufferAudioRenderer only supports interleaved formats + ao->format = af_fmt_from_planar(ao->format); + if (af_fmt_is_planar(ao->format)) { + MP_FATAL(ao, "planar audio formats are unsupported\n"); + goto error; + } + + AudioStreamBasicDescription asbd; + ca_fill_asbd(ao, &asbd); + size_t layout_size = sizeof(AudioChannelLayout) + + (ao->channels.num - 1) * sizeof(AudioChannelDescription); + layout = talloc_size(ao, layout_size); + layout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions; + layout->mNumberChannelDescriptions = ao->channels.num; + for (int i = 0; i < ao->channels.num; ++i) { + AudioChannelDescription *desc = layout->mChannelDescriptions + i; + desc->mChannelFlags = kAudioChannelFlags_AllOff; + desc->mChannelLabel = mp_speaker_id_to_ca_label(ao->channels.speaker[i]); + } + + void *talloc_ctx = talloc_new(NULL); + AudioChannelLayout *std_layout = ca_find_standard_layout(talloc_ctx, layout); + memmove(layout, std_layout, sizeof(AudioChannelLayout)); + talloc_free(talloc_ctx); + ca_log_layout(ao, MSGL_V, layout); + + OSStatus err; + if ((err = CMAudioFormatDescriptionCreate( + NULL, + &asbd, + layout_size, + layout, + 0, + NULL, + NULL, + &p->format_description + )) != noErr) { + MP_FATAL(ao, "failed to create audio format description\n"); + MP_VERBOSE(ao, "CMAudioFormatDescriptionCreate returned %d\n", err); + goto error; + } + talloc_free(layout); + layout = NULL; + + // AVSampleBufferAudioRenderer read ahead aggressively + ao->device_buffer = ao->samplerate * 2; + + p->observer = [[AVObserver alloc] initWithAO:ao]; + NSNotificationCenter *center = [NSNotificationCenter defaultCenter]; + [center addObserver:p->observer selector:@selector(handleRestartNotification:) name:AVSampleBufferAudioRendererOutputConfigurationDidChangeNotification object:p->renderer]; + [center addObserver:p->observer selector:@selector(handleRestartNotification:) name:AVSampleBufferAudioRendererWasFlushedAutomaticallyNotification object:p->renderer]; + + return CONTROL_OK; + +error: + talloc_free(layout); + if (p->renderer) [p->renderer release]; + if (p->synchronizer) [p->synchronizer release]; + if (p->queue) dispatch_release(p->queue); + if (p->format_description) CFRelease(p->format_description); + +#if TARGET_OS_IPHONE + [AVAudioSession.sharedInstance setActive:NO + withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation + error:nil + ]; +#endif + + return CONTROL_ERROR; +} + +static void uninit(struct ao *ao) +{ + struct priv *p = ao->priv; + + stop(ao); + + [p->renderer release]; + [p->synchronizer release]; + dispatch_release(p->queue); + CFRelease(p->format_description); + + [[NSNotificationCenter defaultCenter] removeObserver:p->observer]; + [p->observer release]; + +#if TARGET_OS_IPHONE + [AVAudioSession.sharedInstance setActive:NO + withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation + error:nil + ]; +#endif +} + +#define OPT_BASE_STRUCT struct priv + +const struct ao_driver audio_out_avfoundation = { + .description = "AVFoundation AVSampleBufferAudioRenderer", + .name = "avfoundation", + .uninit = uninit, + .init = init, + .control = control, + .reset = stop, + .start = start, + .set_pause = set_pause, + .list_devs = ca_get_device_list, + .priv_size = sizeof(struct priv), +}; diff --git a/audio/out/ao_coreaudio.c b/audio/out/ao_coreaudio.c index 37f1313..ae743c9 100644 --- a/audio/out/ao_coreaudio.c +++ b/audio/out/ao_coreaudio.c @@ -27,6 +27,11 @@ #include "ao_coreaudio_properties.h" #include "ao_coreaudio_utils.h" +// The timeout for stopping the audio unit after being reset. This allows the +// device to sleep after playback paused. The duration is chosen to match the +// behavior of AVFoundation. +#define IDLE_TIME 7 * NSEC_PER_SEC + struct priv { AudioDeviceID device; AudioUnit audio_unit; @@ -37,6 +42,12 @@ struct priv { AudioStreamID original_asbd_stream; bool change_physical_format; + + // Block that is executed after `IDLE_TIME` to stop audio output unit. + dispatch_block_t idle_work; + dispatch_queue_t queue; + + int hotplug_cb_registration_times; }; static int64_t ca_get_hardware_latency(struct ao *ao) { @@ -78,7 +89,7 @@ static OSStatus render_cb_lpcm(void *ctx, AudioUnitRenderActionFlags *aflags, int64_t end = mp_time_ns(); end += p->hw_latency_ns + ca_get_latency(ts) + ca_frames_to_ns(ao, frames); - int samples = ao_read_data_nonblocking(ao, planes, frames, end); + int samples = ao_read_data(ao, planes, frames, end); if (samples == 0) *aflags |= kAudioUnitRenderAction_OutputIsSilence; @@ -128,6 +139,9 @@ static int control(struct ao *ao, enum aocontrol cmd, void *arg) static bool init_audiounit(struct ao *ao, AudioStreamBasicDescription asbd); static void init_physical_format(struct ao *ao); +static void reinit_latency(struct ao *ao); +static bool register_hotplug_cb(struct ao *ao); +static void unregister_hotplug_cb(struct ao *ao); static bool reinit_device(struct ao *ao) { struct priv *p = ao->priv; @@ -154,6 +168,9 @@ static int init(struct ao *ao) if (!reinit_device(ao)) goto coreaudio_error; + if (!register_hotplug_cb(ao)) + goto coreaudio_error; + if (p->change_physical_format) init_physical_format(ao); @@ -166,6 +183,11 @@ static int init(struct ao *ao) if (!init_audiounit(ao, asbd)) goto coreaudio_error; + reinit_latency(ao); + + p->queue = dispatch_queue_create("io.mpv.coreaudio_stop_during_idle", + DISPATCH_QUEUE_SERIAL); + return CONTROL_OK; coreaudio_error: @@ -295,8 +317,6 @@ static bool init_audiounit(struct ao *ao, AudioStreamBasicDescription asbd) CHECK_CA_ERROR_L(coreaudio_error_audiounit, "can't link audio unit to selected device"); - p->hw_latency_ns = ca_get_hardware_latency(ao); - AURenderCallbackStruct render_cb = (AURenderCallbackStruct) { .inputProc = render_cb_lpcm, .inputProcRefCon = ao, @@ -320,24 +340,96 @@ coreaudio_error: return false; } -static void reset(struct ao *ao) +static void reinit_latency(struct ao *ao) +{ + struct priv *p = ao->priv; + + p->hw_latency_ns = ca_get_hardware_latency(ao); +} + +static void stop(struct ao *ao) +{ + struct priv *p = ao->priv; + OSStatus err = AudioOutputUnitStop(p->audio_unit); + CHECK_CA_WARN("can't stop audio unit"); +} + +static void cancel_and_release_idle_work(struct priv *p) +{ + if (!p->idle_work) + return; + + dispatch_block_cancel(p->idle_work); + Block_release(p->idle_work); + p->idle_work = NULL; +} + +static void stop_after_idle_time(struct ao *ao) { struct priv *p = ao->priv; + + cancel_and_release_idle_work(p); + + p->idle_work = dispatch_block_create(0, ^{ + MP_VERBOSE(ao, "Stopping audio unit due to idle timeout\n"); + stop(ao); + }); + + dispatch_after(dispatch_time(DISPATCH_TIME_NOW, IDLE_TIME), + p->queue, p->idle_work); +} + +static void _reset(void *_ao) +{ + struct ao *ao = (struct ao *)_ao; + struct priv *p = ao->priv; OSStatus err = AudioUnitReset(p->audio_unit, kAudioUnitScope_Global, 0); CHECK_CA_WARN("can't reset audio unit"); + + // Until the audio unit is stopped the macOS daemon coreaudiod continues to + // consume CPU and prevent macOS from sleeping. Immediately stopping the + // audio unit would be disruptive for short pause/resume cycles as + // restarting the audio unit takes a noticeable amount of time when a + // wireless audio device is being used. Instead the audio unit is stopped + // after a delay if it remains idle. + stop_after_idle_time(ao); } -static void start(struct ao *ao) +static void reset(struct ao *ao) { struct priv *p = ao->priv; + // Must dispatch to serialize reset, start and stop operations. + dispatch_sync_f(p->queue, ao, &_reset); +} + +static void _start(void *_ao) +{ + struct ao *ao = (struct ao *)_ao; + struct priv *p = ao->priv; + + if (p->idle_work) + dispatch_block_cancel(p->idle_work); + OSStatus err = AudioOutputUnitStart(p->audio_unit); CHECK_CA_WARN("can't start audio unit"); } +static void start(struct ao *ao) +{ + struct priv *p = ao->priv; + // Must dispatch to serialize reset, start and stop operations. + dispatch_sync_f(p->queue, ao, &_start); +} static void uninit(struct ao *ao) { struct priv *p = ao->priv; + + dispatch_sync(p->queue, ^{ + cancel_and_release_idle_work(p); + }); + dispatch_release(p->queue); + AudioOutputUnitStop(p->audio_unit); AudioUnitUninitialize(p->audio_unit); AudioComponentInstanceDispose(p->audio_unit); @@ -348,6 +440,8 @@ static void uninit(struct ao *ao) &p->original_asbd); CHECK_CA_WARN("could not restore physical stream format"); } + + unregister_hotplug_cb(ao); } static OSStatus hotplug_cb(AudioObjectID id, UInt32 naddr, @@ -355,8 +449,11 @@ static OSStatus hotplug_cb(AudioObjectID id, UInt32 naddr, void *ctx) { struct ao *ao = ctx; + struct priv *p = ao->priv; MP_VERBOSE(ao, "Handling potential hotplug event...\n"); reinit_device(ao); + if (p->audio_unit) + reinit_latency(ao); ao_hotplug_event(ao); return noErr; } @@ -369,7 +466,25 @@ static uint32_t hotplug_properties[] = { static int hotplug_init(struct ao *ao) { if (!reinit_device(ao)) - goto coreaudio_error; + return -1; + + if (!register_hotplug_cb(ao)) + return -1; + + return 0; +} + +static void hotplug_uninit(struct ao *ao) +{ + unregister_hotplug_cb(ao); +} + +static bool register_hotplug_cb(struct ao *ao) +{ + struct priv *p = ao->priv; + + if (p->hotplug_cb_registration_times++) + return true; OSStatus err = noErr; for (int i = 0; i < MP_ARRAY_SIZE(hotplug_properties); i++) { @@ -388,14 +503,19 @@ static int hotplug_init(struct ao *ao) } } - return 0; + return true; coreaudio_error: - return -1; + return false; } -static void hotplug_uninit(struct ao *ao) +static void unregister_hotplug_cb(struct ao *ao) { + struct priv *p = ao->priv; + + if (--p->hotplug_cb_registration_times) + return; + OSStatus err = noErr; for (int i = 0; i < MP_ARRAY_SIZE(hotplug_properties); i++) { AudioObjectPropertyAddress addr = { diff --git a/audio/out/ao_coreaudio_chmap.c b/audio/out/ao_coreaudio_chmap.c index 3fd9550..5a129c4 100644 --- a/audio/out/ao_coreaudio_chmap.c +++ b/audio/out/ao_coreaudio_chmap.c @@ -22,6 +22,7 @@ #include "ao_coreaudio_utils.h" #include "ao_coreaudio_chmap.h" +#include <CoreAudioTypes/CoreAudioTypes.h> static const int speaker_map[][2] = { { kAudioChannelLabel_Left, MP_SPEAKER_ID_FL }, @@ -65,6 +66,119 @@ static const int speaker_map[][2] = { { 0, -1 }, }; +static const AudioChannelLayoutTag std_layouts[] = { + (100U<<16) | 1, // kAudioChannelLayoutTag_Mono + (101U<<16) | 2, // kAudioChannelLayoutTag_Stereo + (102U<<16) | 2, // kAudioChannelLayoutTag_StereoHeadphones + (103U<<16) | 2, // kAudioChannelLayoutTag_MatrixStereo + (104U<<16) | 2, // kAudioChannelLayoutTag_MidSide + (105U<<16) | 2, // kAudioChannelLayoutTag_XY + (106U<<16) | 2, // kAudioChannelLayoutTag_Binaural + (107U<<16) | 4, // kAudioChannelLayoutTag_Ambisonic_B_Format + (108U<<16) | 4, // kAudioChannelLayoutTag_Quadraphonic + (109U<<16) | 5, // kAudioChannelLayoutTag_Pentagonal + (110U<<16) | 6, // kAudioChannelLayoutTag_Hexagonal + (111U<<16) | 8, // kAudioChannelLayoutTag_Octagonal + (112U<<16) | 8, // kAudioChannelLayoutTag_Cube + (113U<<16) | 3, // kAudioChannelLayoutTag_MPEG_3_0_A + (114U<<16) | 3, // kAudioChannelLayoutTag_MPEG_3_0_B + (115U<<16) | 4, // kAudioChannelLayoutTag_MPEG_4_0_A + (116U<<16) | 4, // kAudioChannelLayoutTag_MPEG_4_0_B + (117U<<16) | 5, // kAudioChannelLayoutTag_MPEG_5_0_A + (118U<<16) | 5, // kAudioChannelLayoutTag_MPEG_5_0_B + (119U<<16) | 5, // kAudioChannelLayoutTag_MPEG_5_0_C + (120U<<16) | 5, // kAudioChannelLayoutTag_MPEG_5_0_D + (121U<<16) | 6, // kAudioChannelLayoutTag_MPEG_5_1_A + (122U<<16) | 6, // kAudioChannelLayoutTag_MPEG_5_1_B + (123U<<16) | 6, // kAudioChannelLayoutTag_MPEG_5_1_C + (124U<<16) | 6, // kAudioChannelLayoutTag_MPEG_5_1_D + (125U<<16) | 7, // kAudioChannelLayoutTag_MPEG_6_1_A + (126U<<16) | 8, // kAudioChannelLayoutTag_MPEG_7_1_A + (127U<<16) | 8, // kAudioChannelLayoutTag_MPEG_7_1_B + (128U<<16) | 8, // kAudioChannelLayoutTag_MPEG_7_1_C + (129U<<16) | 8, // kAudioChannelLayoutTag_Emagic_Default_7_1 + (130U<<16) | 8, // kAudioChannelLayoutTag_SMPTE_DTV + (131U<<16) | 3, // kAudioChannelLayoutTag_ITU_2_1 + (132U<<16) | 4, // kAudioChannelLayoutTag_ITU_2_2 + (133U<<16) | 3, // kAudioChannelLayoutTag_DVD_4 + (134U<<16) | 4, // kAudioChannelLayoutTag_DVD_5 + (135U<<16) | 5, // kAudioChannelLayoutTag_DVD_6 + (136U<<16) | 4, // kAudioChannelLayoutTag_DVD_10 + (137U<<16) | 5, // kAudioChannelLayoutTag_DVD_11 + (138U<<16) | 5, // kAudioChannelLayoutTag_DVD_18 + (139U<<16) | 6, // kAudioChannelLayoutTag_AudioUnit_6_0 + (140U<<16) | 7, // kAudioChannelLayoutTag_AudioUnit_7_0 + (148U<<16) | 7, // kAudioChannelLayoutTag_AudioUnit_7_0_Front + (141U<<16) | 6, // kAudioChannelLayoutTag_AAC_6_0 + (142U<<16) | 7, // kAudioChannelLayoutTag_AAC_6_1 + (143U<<16) | 7, // kAudioChannelLayoutTag_AAC_7_0 + (183U<<16) | 8, // kAudioChannelLayoutTag_AAC_7_1_B + (184U<<16) | 8, // kAudioChannelLayoutTag_AAC_7_1_C + (144U<<16) | 8, // kAudioChannelLayoutTag_AAC_Octagonal + (145U<<16) | 16, // kAudioChannelLayoutTag_TMH_10_2_std + (146U<<16) | 21, // kAudioChannelLayoutTag_TMH_10_2_full + (149U<<16) | 2, // kAudioChannelLayoutTag_AC3_1_0_1 + (150U<<16) | 3, // kAudioChannelLayoutTag_AC3_3_0 + (151U<<16) | 4, // kAudioChannelLayoutTag_AC3_3_1 + (152U<<16) | 4, // kAudioChannelLayoutTag_AC3_3_0_1 + (153U<<16) | 4, // kAudioChannelLayoutTag_AC3_2_1_1 + (154U<<16) | 5, // kAudioChannelLayoutTag_AC3_3_1_1 + (155U<<16) | 6, // kAudioChannelLayoutTag_EAC_6_0_A + (156U<<16) | 7, // kAudioChannelLayoutTag_EAC_7_0_A + (157U<<16) | 7, // kAudioChannelLayoutTag_EAC3_6_1_A + (158U<<16) | 7, // kAudioChannelLayoutTag_EAC3_6_1_B + (159U<<16) | 7, // kAudioChannelLayoutTag_EAC3_6_1_C + (160U<<16) | 8, // kAudioChannelLayoutTag_EAC3_7_1_A + (161U<<16) | 8, // kAudioChannelLayoutTag_EAC3_7_1_B + (162U<<16) | 8, // kAudioChannelLayoutTag_EAC3_7_1_C + (163U<<16) | 8, // kAudioChannelLayoutTag_EAC3_7_1_D + (164U<<16) | 8, // kAudioChannelLayoutTag_EAC3_7_1_E + (165U<<16) | 8, // kAudioChannelLayoutTag_EAC3_7_1_F + (166U<<16) | 8, // kAudioChannelLayoutTag_EAC3_7_1_G + (167U<<16) | 8, // kAudioChannelLayoutTag_EAC3_7_1_H + (168U<<16) | 4, // kAudioChannelLayoutTag_DTS_3_1 + (169U<<16) | 5, // kAudioChannelLayoutTag_DTS_4_1 + (170U<<16) | 6, // kAudioChannelLayoutTag_DTS_6_0_A + (171U<<16) | 6, // kAudioChannelLayoutTag_DTS_6_0_B + (172U<<16) | 6, // kAudioChannelLayoutTag_DTS_6_0_C + (173U<<16) | 7, // kAudioChannelLayoutTag_DTS_6_1_A + (174U<<16) | 7, // kAudioChannelLayoutTag_DTS_6_1_B + (175U<<16) | 7, // kAudioChannelLayoutTag_DTS_6_1_C + (176U<<16) | 7, // kAudioChannelLayoutTag_DTS_7_0 + (177U<<16) | 8, // kAudioChannelLayoutTag_DTS_7_1 + (178U<<16) | 8, // kAudioChannelLayoutTag_DTS_8_0_A + (179U<<16) | 8, // kAudioChannelLayoutTag_DTS_8_0_B + (180U<<16) | 9, // kAudioChannelLayoutTag_DTS_8_1_A + (181U<<16) | 9, // kAudioChannelLayoutTag_DTS_8_1_B + (182U<<16) | 7, // kAudioChannelLayoutTag_DTS_6_1_D + (185U<<16) | 4, // kAudioChannelLayoutTag_WAVE_4_0_B + (186U<<16) | 5, // kAudioChannelLayoutTag_WAVE_5_0_B + (187U<<16) | 6, // kAudioChannelLayoutTag_WAVE_5_1_B + (188U<<16) | 7, // kAudioChannelLayoutTag_WAVE_6_1 + (189U<<16) | 8, // kAudioChannelLayoutTag_WAVE_7_1 + (194U<<16) | 8, // kAudioChannelLayoutTag_Atmos_5_1_2 + (195U<<16) | 10, // kAudioChannelLayoutTag_Atmos_5_1_4 + (196U<<16) | 10, // kAudioChannelLayoutTag_Atmos_7_1_2 + (192U<<16) | 12, // kAudioChannelLayoutTag_Atmos_7_1_4 + (193U<<16) | 16, // kAudioChannelLayoutTag_Atmos_9_1_6 + (197U<<16) | 4, // kAudioChannelLayoutTag_Logic_4_0_C + (198U<<16) | 6, // kAudioChannelLayoutTag_Logic_6_0_B + (199U<<16) | 7, // kAudioChannelLayoutTag_Logic_6_1_B + (200U<<16) | 7, // kAudioChannelLayoutTag_Logic_6_1_D + (201U<<16) | 8, // kAudioChannelLayoutTag_Logic_7_1_B + (202U<<16) | 12, // kAudioChannelLayoutTag_Logic_Atmos_7_1_4_B + (203U<<16) | 14, // kAudioChannelLayoutTag_Logic_Atmos_7_1_6 + (204U<<16) | 24, // kAudioChannelLayoutTag_CICP_13 + (205U<<16) | 8, // kAudioChannelLayoutTag_CICP_14 + (206U<<16) | 12, // kAudioChannelLayoutTag_CICP_15 + (207U<<16) | 10, // kAudioChannelLayoutTag_CICP_16 + (208U<<16) | 12, // kAudioChannelLayoutTag_CICP_17 + (209U<<16) | 14, // kAudioChannelLayoutTag_CICP_18 + (210U<<16) | 12, // kAudioChannelLayoutTag_CICP_19 + (211U<<16) | 14, // kAudioChannelLayoutTag_CICP_20 + kAudioChannelLayoutTag_Unknown +}; + int ca_label_to_mp_speaker_id(AudioChannelLabel label) { for (int i = 0; speaker_map[i][1] >= 0; i++) @@ -73,30 +187,48 @@ int ca_label_to_mp_speaker_id(AudioChannelLabel label) return -1; } +AudioChannelLabel mp_speaker_id_to_ca_label(int speaker_id) +{ + for (int i = 0; speaker_map[i][1] >= 0; i++) + if (speaker_map[i][1] == speaker_id) + return speaker_map[i][0]; + return -1; // kAudioChannelLabel_Unknown +} + #if HAVE_COREAUDIO -static void ca_log_layout(struct ao *ao, int l, AudioChannelLayout *layout) +void ca_log_layout(struct ao *ao, int l, AudioChannelLayout *layout) { if (!mp_msg_test(ao->log, l)) return; - AudioChannelDescription *descs = layout->mChannelDescriptions; - - mp_msg(ao->log, l, "layout: tag: <%u>, bitmap: <%u>, " - "descriptions <%u>\n", - (unsigned) layout->mChannelLayoutTag, - (unsigned) layout->mChannelBitmap, - (unsigned) layout->mNumberChannelDescriptions); - - for (int i = 0; i < layout->mNumberChannelDescriptions; i++) { - AudioChannelDescription d = descs[i]; - mp_msg(ao->log, l, " - description %d: label <%u, %u>, " - " flags: <%u>, coords: <%f, %f, %f>\n", i, - (unsigned) d.mChannelLabel, - (unsigned) ca_label_to_mp_speaker_id(d.mChannelLabel), - (unsigned) d.mChannelFlags, - d.mCoordinates[0], - d.mCoordinates[1], - d.mCoordinates[2]); + AudioChannelLayoutTag tag = layout->mChannelLayoutTag; + mp_msg(ao->log, l, "audio channel layout: tag: <%u>", tag); + + if (tag == kAudioChannelLayoutTag_UseChannelDescriptions) { + AudioChannelDescription *descs = layout->mChannelDescriptions; + mp_msg(ao->log, l, ", descriptions <%u>\n", + (unsigned) layout->mNumberChannelDescriptions); + + for (int i = 0; i < layout->mNumberChannelDescriptions; i++) { + AudioChannelDescription d = descs[i]; + mp_msg(ao->log, l, " - description %d: label <%u, %u>, flags: <%u>", + i, + (unsigned) d.mChannelLabel, + (unsigned) ca_label_to_mp_speaker_id(d.mChannelLabel), + (unsigned) d.mChannelFlags); + if (d.mChannelFlags != kAudioChannelFlags_AllOff) { + mp_msg(ao->log, l, ", coords: <%f, %f, %f>\n", + d.mCoordinates[0], + d.mCoordinates[1], + d.mCoordinates[2]); + } else { + mp_msg(ao->log, l, "\n"); + } + } + } else if (tag == kAudioChannelLayoutTag_UseChannelBitmap) { + mp_msg(ao->log, l, ", bitmap <%u>\n", layout->mChannelBitmap); + } else { + mp_msg(ao->log, l, "\n"); } } @@ -117,7 +249,7 @@ static AudioChannelLayout *ca_layout_to_custom_layout(struct ao *ao, kAudioFormatProperty_ChannelLayoutForBitmap, sizeof(uint32_t), &l->mChannelBitmap, &psize); CHECK_CA_ERROR("failed to convert channel bitmap to descriptions (info)"); - r = talloc_size(NULL, psize); + r = talloc_size(talloc_ctx, psize); err = AudioFormatGetProperty( kAudioFormatProperty_ChannelLayoutForBitmap, sizeof(uint32_t), &l->mChannelBitmap, &psize, r); @@ -127,7 +259,7 @@ static AudioChannelLayout *ca_layout_to_custom_layout(struct ao *ao, err = AudioFormatGetPropertyInfo( kAudioFormatProperty_ChannelLayoutForTag, sizeof(AudioChannelLayoutTag), &l->mChannelLayoutTag, &psize); - r = talloc_size(NULL, psize); + r = talloc_size(talloc_ctx, psize); CHECK_CA_ERROR("failed to convert channel tag to descriptions (info)"); err = AudioFormatGetProperty( kAudioFormatProperty_ChannelLayoutForTag, @@ -135,14 +267,53 @@ static AudioChannelLayout *ca_layout_to_custom_layout(struct ao *ao, CHECK_CA_ERROR("failed to convert channel tag to descriptions (get)"); } - MP_VERBOSE(ao, "converted input channel layout:\n"); - ca_log_layout(ao, MSGL_V, l); + if (ao) { + MP_VERBOSE(ao, "converted input channel layout:\n"); + ca_log_layout(ao, MSGL_V, l); + } return r; coreaudio_error: return NULL; } +AudioChannelLayout *ca_find_standard_layout(void *talloc_ctx, AudioChannelLayout *l) +{ + if (l->mChannelLayoutTag != kAudioChannelLayoutTag_UseChannelDescriptions) + return l; + + AudioChannelLayout *s = talloc_size(talloc_ctx, sizeof(AudioChannelLayout)); + + for (int i = 0; ; ++i) { + if ((s->mChannelLayoutTag = std_layouts[i]) == kAudioChannelLayoutTag_Unknown) { + s = NULL; + break; + } + + AudioChannelLayout *r = ca_layout_to_custom_layout(NULL, talloc_ctx, s); + + if (!r) + goto mismatch; + if (l->mNumberChannelDescriptions != r->mNumberChannelDescriptions) + goto mismatch; + + for (int i = 0; i < l->mNumberChannelDescriptions; ++i) { + AudioChannelDescription *ld = l->mChannelDescriptions + i; + AudioChannelDescription *rd = r->mChannelDescriptions + i; + if (ld->mChannelLabel == rd->mChannelLabel) + continue; + // XXX: we cannot handle channels with coordinates + goto mismatch; + } + + break; + +mismatch:; + } + + return s ? s : l; +} + #define CHMAP(n, ...) &(struct mp_chmap) MP_CONCAT(MP_CHMAP, n) (__VA_ARGS__) @@ -241,8 +412,8 @@ static AudioChannelLayout* ca_query_stereo_layout(struct ao *ao, void *talloc_ctx) { OSStatus err; - const int nch = 2; - uint32_t channels[nch]; + uint32_t channels[2]; + const int nch = MP_ARRAY_SIZE(channels); AudioChannelLayout *r = NULL; AudioObjectPropertyAddress p_addr = (AudioObjectPropertyAddress) { diff --git a/audio/out/ao_coreaudio_chmap.h b/audio/out/ao_coreaudio_chmap.h index b6d160c..0b21e83 100644 --- a/audio/out/ao_coreaudio_chmap.h +++ b/audio/out/ao_coreaudio_chmap.h @@ -18,15 +18,22 @@ #ifndef MPV_COREAUDIO_CHMAP_H #define MPV_COREAUDIO_CHMAP_H +#include "config.h" #include <AudioToolbox/AudioToolbox.h> -#include "config.h" +#if HAVE_AVFOUNDATION || HAVE_AUDIOUNIT +#undef HAVE_COREAUDIO +#define HAVE_COREAUDIO 1 +#endif struct mp_chmap; int ca_label_to_mp_speaker_id(AudioChannelLabel label); +AudioChannelLabel mp_speaker_id_to_ca_label(int speaker_id); #if HAVE_COREAUDIO +AudioChannelLayout *ca_find_standard_layout(void *talloc_ctx, AudioChannelLayout *l); +void ca_log_layout(struct ao *ao, int l, AudioChannelLayout *layout); bool ca_init_chmap(struct ao *ao, AudioDeviceID device); void ca_get_active_chmap(struct ao *ao, AudioDeviceID device, int channel_count, struct mp_chmap *out_map); diff --git a/audio/out/ao_coreaudio_exclusive.c b/audio/out/ao_coreaudio_exclusive.c index e24f791..5e0ec3b 100644 --- a/audio/out/ao_coreaudio_exclusive.c +++ b/audio/out/ao_coreaudio_exclusive.c @@ -1,5 +1,5 @@ /* - * CoreAudio audio output driver for Mac OS X + * CoreAudio audio output driver for macOS * * original copyright (C) Timothy J. Wood - Aug 2000 * ported to MPlayer libao2 by Dan Christiansen @@ -28,7 +28,7 @@ */ /* - * The MacOS X CoreAudio framework doesn't mesh as simply as some + * The macOS CoreAudio framework doesn't mesh as simply as some * simpler frameworks do. This is due to the fact that CoreAudio pulls * audio samples rather than having them pushed at it (which is nice * when you are wanting to do good buffering of audio). @@ -114,7 +114,7 @@ static OSStatus enable_property_listener(struct ao *ao, bool enabled) kAudioHardwarePropertyDevices}; AudioDeviceID devs[] = {p->device, kAudioObjectSystemObject}; - assert(MP_ARRAY_SIZE(selectors) == MP_ARRAY_SIZE(devs)); + static_assert(MP_ARRAY_SIZE(selectors) == MP_ARRAY_SIZE(devs), ""); OSStatus status = noErr; for (int n = 0; n < MP_ARRAY_SIZE(devs); n++) { diff --git a/audio/out/ao_coreaudio_properties.h b/audio/out/ao_coreaudio_properties.h index f293968..2c9c565 100644 --- a/audio/out/ao_coreaudio_properties.h +++ b/audio/out/ao_coreaudio_properties.h @@ -23,6 +23,11 @@ #include "internal.h" +#if HAVE_AVFOUNDATION || HAVE_AUDIOUNIT +#undef HAVE_COREAUDIO +#define HAVE_COREAUDIO 1 +#endif + // CoreAudio names are way too verbose #define ca_sel AudioObjectPropertySelector #define ca_scope AudioObjectPropertyScope diff --git a/audio/out/ao_coreaudio_utils.c b/audio/out/ao_coreaudio_utils.c index 14db8e3..e74092a 100644 --- a/audio/out/ao_coreaudio_utils.c +++ b/audio/out/ao_coreaudio_utils.c @@ -138,7 +138,8 @@ bool check_ca_st(struct ao *ao, int level, OSStatus code, const char *message) { if (code == noErr) return true; - mp_msg(ao->log, level, "%s (%s/%d)\n", message, mp_tag_str(code), (int)code); + if (ao) + mp_msg(ao->log, level, "%s (%s/%d)\n", message, mp_tag_str(code), (int)code); return false; } @@ -470,11 +471,9 @@ bool ca_change_physical_format_sync(struct ao *ao, AudioStreamID stream, ca_print_asbd(ao, "setting stream physical format:", &change_format); - sem_t wakeup; - if (mp_sem_init(&wakeup, 0, 0)) { - MP_WARN(ao, "OOM\n"); - return false; - } + mp_sem_t wakeup; + if (mp_sem_init(&wakeup, 0, 0)) + MP_HANDLE_OOM(0); AudioStreamBasicDescription prev_format; err = CA_GET(stream, kAudioStreamPropertyPhysicalFormat, &prev_format); diff --git a/audio/out/ao_coreaudio_utils.h b/audio/out/ao_coreaudio_utils.h index 0e2b8b1..699ffde 100644 --- a/audio/out/ao_coreaudio_utils.h +++ b/audio/out/ao_coreaudio_utils.h @@ -27,7 +27,12 @@ #include "common/msg.h" #include "audio/out/ao.h" #include "internal.h" -#include "osdep/apple_utils.h" +#include "osdep/utils-mac.h" + +#if HAVE_AVFOUNDATION || HAVE_AUDIOUNIT +#undef HAVE_COREAUDIO +#define HAVE_COREAUDIO 1 +#endif bool check_ca_st(struct ao *ao, int level, OSStatus code, const char *message); diff --git a/audio/out/ao_lavc.c b/audio/out/ao_lavc.c index 163fdca..4bae438 100644 --- a/audio/out/ao_lavc.c +++ b/audio/out/ao_lavc.c @@ -26,6 +26,7 @@ #include <limits.h> #include <libavutil/common.h> +#include <libavutil/samplefmt.h> #include "config.h" #include "options/options.h" diff --git a/audio/out/ao_null.c b/audio/out/ao_null.c index fcb61d2..0cda8d9 100644 --- a/audio/out/ao_null.c +++ b/audio/out/ao_null.c @@ -116,10 +116,11 @@ static void uninit(struct ao *ao) { } -// stop playing and empty buffers (for seeking/pause) +// stop playing and empty buffers (for seeking) static void reset(struct ao *ao) { struct priv *priv = ao->priv; + priv->paused = false; priv->buffered = 0; priv->playing = false; } diff --git a/audio/out/ao_oss.c b/audio/out/ao_oss.c index 5c0b8c9..afe5839 100644 --- a/audio/out/ao_oss.c +++ b/audio/out/ao_oss.c @@ -36,6 +36,7 @@ #include <sys/types.h> #include "audio/format.h" +#include "common/common.h" #include "common/msg.h" #include "options/options.h" #include "osdep/endian.h" @@ -89,7 +90,7 @@ static const int format_table[][2] = { #define MP_WARN_IOCTL_ERR(__ao) \ MP_WARN((__ao), "%s: ioctl() fail, err = %i: %s\n", \ - __FUNCTION__, errno, strerror(errno)) + __FUNCTION__, errno, mp_strerror(errno)) static void uninit(struct ao *ao); @@ -329,7 +330,7 @@ static bool audio_write(struct ao *ao, void **data, int samples) if (errno == EINTR) continue; MP_WARN(ao, "audio_write: write() fail, err = %i: %s.\n", - errno, strerror(errno)); + errno, mp_strerror(errno)); return false; } if ((size_t)rc != size) { diff --git a/audio/out/ao_pipewire.c b/audio/out/ao_pipewire.c index 3fbcbf6..94d393a 100644 --- a/audio/out/ao_pipewire.c +++ b/audio/out/ao_pipewire.c @@ -27,6 +27,7 @@ #include <spa/utils/result.h> #include <math.h> +#include "common/common.h" #include "common/msg.h" #include "options/m_config.h" #include "options/m_option.h" @@ -46,6 +47,15 @@ static inline int pw_stream_get_time_n(struct pw_stream *stream, struct pw_time #define spa_hook_remove(hook) if ((hook)->link.prev) spa_hook_remove(hook) #endif +#if !PW_CHECK_VERSION(1, 0, 4) +static uint64_t pw_stream_get_nsec(struct pw_stream *stream) +{ + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return SPA_TIMESPEC_TO_NSEC(&ts); +} +#endif + enum init_state { INIT_STATE_NONE, INIT_STATE_SUCCESS, @@ -86,7 +96,7 @@ struct id_list { struct spa_list node; }; -static enum spa_audio_format af_fmt_to_pw(struct ao *ao, enum af_format format) +static enum spa_audio_format af_fmt_to_pw(enum af_format format) { switch (format) { case AF_FORMAT_U8: return SPA_AUDIO_FORMAT_U8; @@ -99,9 +109,21 @@ static enum spa_audio_format af_fmt_to_pw(struct ao *ao, enum af_format format) case AF_FORMAT_S32P: return SPA_AUDIO_FORMAT_S32P; case AF_FORMAT_FLOATP: return SPA_AUDIO_FORMAT_F32P; case AF_FORMAT_DOUBLEP: return SPA_AUDIO_FORMAT_F64P; - default: - MP_WARN(ao, "Unhandled format %d\n", format); - return SPA_AUDIO_FORMAT_UNKNOWN; + default: return SPA_AUDIO_FORMAT_UNKNOWN; + } +} + +static enum spa_audio_iec958_codec af_fmt_to_codec(enum af_format format) +{ + switch (format) { + case AF_FORMAT_S_AAC: return SPA_AUDIO_IEC958_CODEC_MPEG2_AAC; + case AF_FORMAT_S_AC3: return SPA_AUDIO_IEC958_CODEC_AC3; + case AF_FORMAT_S_DTS: return SPA_AUDIO_IEC958_CODEC_DTS; + case AF_FORMAT_S_DTSHD: return SPA_AUDIO_IEC958_CODEC_DTSHD; + case AF_FORMAT_S_EAC3: return SPA_AUDIO_IEC958_CODEC_EAC3; + case AF_FORMAT_S_MP3: return SPA_AUDIO_IEC958_CODEC_MPEG; + case AF_FORMAT_S_TRUEHD: return SPA_AUDIO_IEC958_CODEC_TRUEHD; + default: return SPA_AUDIO_IEC958_CODEC_UNKNOWN; } } @@ -154,14 +176,13 @@ static void on_process(void *userdata) void *data[MP_NUM_CHANNELS]; if ((b = pw_stream_dequeue_buffer(p->stream)) == NULL) { - MP_WARN(ao, "out of buffers: %s\n", strerror(errno)); + MP_WARN(ao, "out of buffers: %s\n", mp_strerror(errno)); return; } struct spa_buffer *buf = b->buffer; - int bytes_per_channel = buf->datas[0].maxsize / ao->channels.num; - int nframes = bytes_per_channel / ao->sstride; + int nframes = buf->datas[0].maxsize / ao->sstride; #if PW_CHECK_VERSION(0, 3, 49) if (b->requested != 0) nframes = MPMIN(b->requested, nframes); @@ -177,9 +198,13 @@ static void on_process(void *userdata) time.rate.num = 1; int64_t end_time = mp_time_ns(); - /* time.queued is always going to be 0, so we don't need to care */ - end_time += (nframes * 1e9 / ao->samplerate) + - ((double) time.delay * SPA_NSEC_PER_SEC * time.rate.num / time.rate.denom); + end_time += MP_TIME_S_TO_NS(nframes) / ao->samplerate; + end_time += MP_TIME_S_TO_NS(time.delay) * time.rate.num / time.rate.denom; + end_time += MP_TIME_S_TO_NS(time.queued) / ao->samplerate; +#if PW_CHECK_VERSION(0, 3, 50) + end_time += MP_TIME_S_TO_NS(time.buffered) / ao->samplerate; +#endif + end_time -= pw_stream_get_nsec(p->stream) - time.now; int samples = ao_read_data_nonblocking(ao, data, nframes, end_time); b->size = samples; @@ -214,7 +239,7 @@ static void on_param_changed(void *userdata, uint32_t id, const struct spa_pod * if (param == NULL || id != SPA_PARAM_Format) return; - int buffer_size = ao->device_buffer * af_fmt_to_bytes(ao->format) * ao->channels.num; + int buffer_size = ao->device_buffer * ao->sstride; params[0] = spa_pod_builder_add_object(&b, SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers, @@ -506,7 +531,7 @@ static int pipewire_init_boilerplate(struct ao *ao) if (!p->core) { MP_MSG(ao, ao->probing ? MSGL_V : MSGL_ERR, "Could not connect to context '%s': %s\n", - p->options.remote, strerror(errno)); + p->options.remote, mp_strerror(errno)); pw_context_destroy(context); goto error; } @@ -580,24 +605,41 @@ static int init(struct ao *ao) pw_properties_setf(props, PW_KEY_NODE_RATE, "1/%d", ao->samplerate); - enum spa_audio_format spa_format = af_fmt_to_pw(ao, ao->format); - if (spa_format == SPA_AUDIO_FORMAT_UNKNOWN) { - ao->format = AF_FORMAT_FLOATP; - spa_format = SPA_AUDIO_FORMAT_F32P; - } + if (af_fmt_is_spdif(ao->format)) { + enum spa_audio_iec958_codec spa_codec = af_fmt_to_codec(ao->format); + if (spa_codec == SPA_AUDIO_IEC958_CODEC_UNKNOWN) { + MP_ERR(ao, "Unhandled codec %d\n", ao->format); + goto error_props; + } - struct spa_audio_info_raw audio_info = { - .format = spa_format, - .rate = ao->samplerate, - .channels = ao->channels.num, - }; + struct spa_audio_info_iec958 audio_info = { + .codec = spa_codec, + .rate = ao->samplerate, + }; - for (int i = 0; i < ao->channels.num; i++) - audio_info.position[i] = mp_speaker_id_to_spa(ao, ao->channels.speaker[i]); + params[0] = spa_format_audio_iec958_build(&b, SPA_PARAM_EnumFormat, &audio_info); + if (!params[0]) + goto error_props; + } else { + enum spa_audio_format spa_format = af_fmt_to_pw(ao->format); + if (spa_format == SPA_AUDIO_FORMAT_UNKNOWN) { + MP_ERR(ao, "Unhandled format %d\n", ao->format); + goto error_props; + } - params[0] = spa_format_audio_raw_build(&b, SPA_PARAM_EnumFormat, &audio_info); - if (!params[0]) - goto error_props; + struct spa_audio_info_raw audio_info = { + .format = spa_format, + .rate = ao->samplerate, + .channels = ao->channels.num, + }; + + for (int i = 0; i < ao->channels.num; i++) + audio_info.position[i] = mp_speaker_id_to_spa(ao, ao->channels.speaker[i]); + + params[0] = spa_format_audio_raw_build(&b, SPA_PARAM_EnumFormat, &audio_info); + if (!params[0]) + goto error_props; + } if (af_fmt_is_planar(ao->format)) { ao->num_planes = ao->channels.num; @@ -664,6 +706,15 @@ static void start(struct ao *ao) pw_thread_loop_unlock(p->loop); } +static bool set_pause(struct ao *ao, bool paused) +{ + struct priv *p = ao->priv; + pw_thread_loop_lock(p->loop); + pw_stream_set_active(p->stream, !paused); + pw_thread_loop_unlock(p->loop); + return true; +} + #define CONTROL_RET(r) (!r ? CONTROL_OK : CONTROL_ERROR) static int control(struct ao *ao, enum aocontrol cmd, void *arg) @@ -855,7 +906,7 @@ const struct ao_driver audio_out_pipewire = { .uninit = uninit, .reset = reset, .start = start, - + .set_pause = set_pause, .control = control, .hotplug_init = hotplug_init, diff --git a/audio/out/ao_pulse.c b/audio/out/ao_pulse.c index 3b29b1a..5c86855 100644 --- a/audio/out/ao_pulse.c +++ b/audio/out/ao_pulse.c @@ -118,7 +118,7 @@ static void stream_request_cb(pa_stream *s, size_t length, void *userdata) { struct ao *ao = userdata; struct priv *priv = ao->priv; - ao_wakeup_playthread(ao); + ao_wakeup(ao); pa_threaded_mainloop_signal(priv->mainloop, 0); } @@ -135,7 +135,7 @@ static void underflow_cb(pa_stream *s, void *userdata) struct priv *priv = ao->priv; priv->playing = false; priv->underrun_signalled = true; - ao_wakeup_playthread(ao); + ao_wakeup(ao); pa_threaded_mainloop_signal(priv->mainloop, 0); } @@ -804,6 +804,7 @@ const struct ao_driver audio_out_pulse = { .priv_size = sizeof(struct priv), .priv_defaults = &(const struct priv) { .cfg_buffer = 100, + .cfg_latency_hacks = true, }, .options = (const struct m_option[]) { {"host", OPT_STRING(cfg_host)}, diff --git a/audio/out/ao_sndio.c b/audio/out/ao_sndio.c index fce7139..309ea4a 100644 --- a/audio/out/ao_sndio.c +++ b/audio/out/ao_sndio.c @@ -22,6 +22,8 @@ #include <errno.h> #include <sndio.h> +#include "config.h" + #include "options/m_option.h" #include "common/msg.h" @@ -292,7 +294,7 @@ static void get_state(struct ao *ao, struct mp_pcm_state *state) state->delay = p->delay / (double)p->par.rate; /* report unexpected EOF / underrun */ - if ((state->queued_samples && state->queued_samples && + if ((state->queued_samples && (state->queued_samples < state->free_samples) && p->playing) || sio_eof(p->hdl)) { @@ -301,7 +303,7 @@ static void get_state(struct ao *ao, struct mp_pcm_state *state) state->free_samples, state->queued_samples, state->delay); p->playing = false; state->playing = p->playing; - ao_wakeup_playthread(ao); + ao_wakeup(ao); } else { state->playing = p->playing; } diff --git a/audio/out/ao_wasapi.c b/audio/out/ao_wasapi.c index b201f26..d986d80 100644 --- a/audio/out/ao_wasapi.c +++ b/audio/out/ao_wasapi.c @@ -150,14 +150,32 @@ exit_label: return false; } +static void thread_pause(struct ao *ao) +{ + struct wasapi_state *state = ao->priv; + MP_DBG(state, "Thread Pause\n"); + HRESULT hr = IAudioClient_Stop(state->pAudioClient); + if (FAILED(hr)) + MP_ERR(state, "IAudioClient_Stop returned: %s\n", mp_HRESULT_to_str(hr)); +} + +static void thread_unpause(struct ao *ao) +{ + struct wasapi_state *state = ao->priv; + MP_DBG(state, "Thread Unpause\n"); + HRESULT hr = IAudioClient_Start(state->pAudioClient); + if (FAILED(hr)) { + MP_ERR(state, "IAudioClient_Start returned %s\n", + mp_HRESULT_to_str(hr)); + } +} + static void thread_reset(struct ao *ao) { struct wasapi_state *state = ao->priv; HRESULT hr; MP_DBG(state, "Thread Reset\n"); - hr = IAudioClient_Stop(state->pAudioClient); - if (FAILED(hr)) - MP_ERR(state, "IAudioClient_Stop returned: %s\n", mp_HRESULT_to_str(hr)); + thread_pause(ao); hr = IAudioClient_Reset(state->pAudioClient); if (FAILED(hr)) @@ -172,27 +190,20 @@ static void thread_resume(struct ao *ao) MP_DBG(state, "Thread Resume\n"); thread_reset(ao); thread_feed(ao); - - HRESULT hr = IAudioClient_Start(state->pAudioClient); - if (FAILED(hr)) { - MP_ERR(state, "IAudioClient_Start returned %s\n", - mp_HRESULT_to_str(hr)); - } + thread_unpause(ao); } -static void thread_wakeup(void *ptr) +static void set_state_and_wakeup_thread(struct ao *ao, + enum wasapi_thread_state thread_state) { - struct ao *ao = ptr; struct wasapi_state *state = ao->priv; + atomic_store(&state->thread_state, thread_state); SetEvent(state->hWake); } -static void set_thread_state(struct ao *ao, - enum wasapi_thread_state thread_state) +static void thread_process_dispatch(void *ptr) { - struct wasapi_state *state = ao->priv; - atomic_store(&state->thread_state, thread_state); - thread_wakeup(ao); + set_state_and_wakeup_thread(ptr, WASAPI_THREAD_DISPATCH); } static DWORD __stdcall AudioThread(void *lpParameter) @@ -212,8 +223,6 @@ static DWORD __stdcall AudioThread(void *lpParameter) if (WaitForSingleObject(state->hWake, INFINITE) != WAIT_OBJECT_0) MP_ERR(ao, "Unexpected return value from WaitForSingleObject\n"); - mp_dispatch_queue_process(state->dispatch, 0); - int thread_state = atomic_load(&state->thread_state); switch (thread_state) { case WASAPI_THREAD_FEED: @@ -221,6 +230,9 @@ static DWORD __stdcall AudioThread(void *lpParameter) if (thread_feed(ao) && thread_feed(ao)) MP_ERR(ao, "Unable to fill buffer fast enough\n"); break; + case WASAPI_THREAD_DISPATCH: + mp_dispatch_queue_process(state->dispatch, 0); + break; case WASAPI_THREAD_RESET: thread_reset(ao); break; @@ -230,6 +242,12 @@ static DWORD __stdcall AudioThread(void *lpParameter) case WASAPI_THREAD_SHUTDOWN: thread_reset(ao); goto exit_label; + case WASAPI_THREAD_PAUSE: + thread_pause(ao); + break; + case WASAPI_THREAD_UNPAUSE: + thread_unpause(ao); + break; default: MP_ERR(ao, "Unhandled thread state: %d\n", thread_state); } @@ -250,7 +268,7 @@ static void uninit(struct ao *ao) MP_DBG(ao, "Uninit wasapi\n"); struct wasapi_state *state = ao->priv; if (state->hWake) - set_thread_state(ao, WASAPI_THREAD_SHUTDOWN); + set_state_and_wakeup_thread(ao, WASAPI_THREAD_SHUTDOWN); if (state->hAudioThread && WaitForSingleObject(state->hAudioThread, INFINITE) != WAIT_OBJECT_0) @@ -301,7 +319,7 @@ static int init(struct ao *ao) } state->dispatch = mp_dispatch_create(state); - mp_dispatch_set_wakeup_fn(state->dispatch, thread_wakeup, ao); + mp_dispatch_set_wakeup_fn(state->dispatch, thread_process_dispatch, ao); state->init_ok = false; state->hAudioThread = CreateThread(NULL, 0, &AudioThread, ao, 0, NULL); @@ -349,7 +367,7 @@ static int thread_control_exclusive(struct ao *ao, enum aocontrol cmd, void *arg case AOCONTROL_GET_VOLUME: IAudioEndpointVolume_GetMasterVolumeLevelScalar( state->pEndpointVolume, &volume); - *(float *)arg = volume; + *(float *)arg = volume * 100.f; return CONTROL_OK; case AOCONTROL_SET_VOLUME: volume = (*(float *)arg) / 100.f; @@ -379,7 +397,7 @@ static int thread_control_shared(struct ao *ao, enum aocontrol cmd, void *arg) switch(cmd) { case AOCONTROL_GET_VOLUME: ISimpleAudioVolume_GetMasterVolume(state->pAudioVolume, &volume); - *(float *)arg = volume; + *(float *)arg = volume * 100.f; return CONTROL_OK; case AOCONTROL_SET_VOLUME: volume = (*(float *)arg) / 100.f; @@ -456,12 +474,18 @@ static int control(struct ao *ao, enum aocontrol cmd, void *arg) static void audio_reset(struct ao *ao) { - set_thread_state(ao, WASAPI_THREAD_RESET); + set_state_and_wakeup_thread(ao, WASAPI_THREAD_RESET); } static void audio_resume(struct ao *ao) { - set_thread_state(ao, WASAPI_THREAD_RESUME); + set_state_and_wakeup_thread(ao, WASAPI_THREAD_RESUME); +} + +static bool audio_set_pause(struct ao *ao, bool paused) +{ + set_state_and_wakeup_thread(ao, paused ? WASAPI_THREAD_PAUSE : WASAPI_THREAD_UNPAUSE); + return true; } static void hotplug_uninit(struct ao *ao) @@ -497,6 +521,7 @@ const struct ao_driver audio_out_wasapi = { .control = control, .reset = audio_reset, .start = audio_resume, + .set_pause = audio_set_pause, .list_devs = wasapi_list_devs, .hotplug_init = hotplug_init, .hotplug_uninit = hotplug_uninit, diff --git a/audio/out/ao_wasapi.h b/audio/out/ao_wasapi.h index 17b8f7a..4e5e9c8 100644 --- a/audio/out/ao_wasapi.h +++ b/audio/out/ao_wasapi.h @@ -48,9 +48,12 @@ void wasapi_change_uninit(struct ao* ao); enum wasapi_thread_state { WASAPI_THREAD_FEED = 0, + WASAPI_THREAD_DISPATCH, WASAPI_THREAD_RESUME, WASAPI_THREAD_RESET, - WASAPI_THREAD_SHUTDOWN + WASAPI_THREAD_SHUTDOWN, + WASAPI_THREAD_PAUSE, + WASAPI_THREAD_UNPAUSE, }; typedef struct wasapi_state { diff --git a/audio/out/ao_wasapi_utils.c b/audio/out/ao_wasapi_utils.c index 731fe8a..7e85f75 100644 --- a/audio/out/ao_wasapi_utils.c +++ b/audio/out/ao_wasapi_utils.c @@ -18,12 +18,15 @@ */ #include <math.h> -#include <wchar.h> + #include <windows.h> -#include <errors.h> +#include <mmsystem.h> +#include <mmreg.h> #include <ksguid.h> #include <ksmedia.h> #include <avrt.h> +#include <propsys.h> +#include <functiondiscoverykeys_devpkey.h> #include "audio/format.h" #include "osdep/timer.h" @@ -31,36 +34,47 @@ #include "osdep/strnlen.h" #include "ao_wasapi.h" -DEFINE_PROPERTYKEY(mp_PKEY_Device_FriendlyName, - 0xa45c254e, 0xdf1c, 0x4efd, 0x80, 0x20, - 0x67, 0xd1, 0x46, 0xa8, 0x50, 0xe0, 14); -DEFINE_PROPERTYKEY(mp_PKEY_Device_DeviceDesc, - 0xa45c254e, 0xdf1c, 0x4efd, 0x80, 0x20, - 0x67, 0xd1, 0x46, 0xa8, 0x50, 0xe0, 2); -// CEA 861 subformats -// should work on vista -DEFINE_GUID(mp_KSDATAFORMAT_SUBTYPE_IEC61937_DTS, - 0x00000008, 0x0000, 0x0010, 0x80, 0x00, +#ifndef KSDATAFORMAT_SUBTYPE_IEC61937_DTS +DEFINE_GUID(KSDATAFORMAT_SUBTYPE_IEC61937_DTS, + WAVE_FORMAT_DTS, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71); -DEFINE_GUID(mp_KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL, - 0x00000092, 0x0000, 0x0010, 0x80, 0x00, +#endif + +#ifndef KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL +DEFINE_GUID(KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL, + WAVE_FORMAT_DOLBY_AC3_SPDIF, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71); -// might require 7+ -DEFINE_GUID(mp_KSDATAFORMAT_SUBTYPE_IEC61937_AAC, +#endif + +#ifndef KSDATAFORMAT_SUBTYPE_IEC61937_AAC +DEFINE_GUID(KSDATAFORMAT_SUBTYPE_IEC61937_AAC, 0x00000006, 0x0cea, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71); -DEFINE_GUID(mp_KSDATAFORMAT_SUBTYPE_IEC61937_MPEG3, - 0x00000004, 0x0cea, 0x0010, 0x80, 0x00, +#endif + +#ifndef KSDATAFORMAT_SUBTYPE_IEC61937_MPEG3 +DEFINE_GUID(KSDATAFORMAT_SUBTYPE_IEC61937_MPEG3, + 0x00000005, 0x0cea, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71); -DEFINE_GUID(mp_KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL_PLUS, +#endif + +#ifndef KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL_PLUS +DEFINE_GUID(KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL_PLUS, 0x0000000a, 0x0cea, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71); -DEFINE_GUID(mp_KSDATAFORMAT_SUBTYPE_IEC61937_DTS_HD, +#endif + +#ifndef KSDATAFORMAT_SUBTYPE_IEC61937_DTS_HD +DEFINE_GUID(KSDATAFORMAT_SUBTYPE_IEC61937_DTS_HD, 0x0000000b, 0x0cea, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71); -DEFINE_GUID(mp_KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_MLP, +#endif + +#ifndef KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_MLP +DEFINE_GUID(KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_MLP, 0x0000000c, 0x0cea, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71); +#endif struct wasapi_sample_fmt { int mp_format; // AF_FORMAT_* @@ -82,13 +96,13 @@ static const struct wasapi_sample_fmt wasapi_formats[] = { // aka S24 (with conversion on output) {AF_FORMAT_S32, 24, 24, &KSDATAFORMAT_SUBTYPE_PCM}, {AF_FORMAT_FLOAT, 32, 32, &KSDATAFORMAT_SUBTYPE_IEEE_FLOAT}, - {AF_FORMAT_S_AC3, 16, 16, &mp_KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL}, - {AF_FORMAT_S_DTS, 16, 16, &mp_KSDATAFORMAT_SUBTYPE_IEC61937_DTS}, - {AF_FORMAT_S_AAC, 16, 16, &mp_KSDATAFORMAT_SUBTYPE_IEC61937_AAC}, - {AF_FORMAT_S_MP3, 16, 16, &mp_KSDATAFORMAT_SUBTYPE_IEC61937_MPEG3}, - {AF_FORMAT_S_TRUEHD, 16, 16, &mp_KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_MLP}, - {AF_FORMAT_S_EAC3, 16, 16, &mp_KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL_PLUS}, - {AF_FORMAT_S_DTSHD, 16, 16, &mp_KSDATAFORMAT_SUBTYPE_IEC61937_DTS_HD}, + {AF_FORMAT_S_AC3, 16, 16, &KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL}, + {AF_FORMAT_S_DTS, 16, 16, &KSDATAFORMAT_SUBTYPE_IEC61937_DTS}, + {AF_FORMAT_S_AAC, 16, 16, &KSDATAFORMAT_SUBTYPE_IEC61937_AAC}, + {AF_FORMAT_S_MP3, 16, 16, &KSDATAFORMAT_SUBTYPE_IEC61937_MPEG3}, + {AF_FORMAT_S_TRUEHD, 16, 16, &KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_MLP}, + {AF_FORMAT_S_EAC3, 16, 16, &KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL_PLUS}, + {AF_FORMAT_S_DTSHD, 16, 16, &KSDATAFORMAT_SUBTYPE_IEC61937_DTS_HD}, {0}, }; @@ -562,9 +576,10 @@ static void init_session_display(struct wasapi_state *state, const char *name) { (void **)&state->pSessionControl); EXIT_ON_ERROR(hr); - wchar_t path[MAX_PATH] = {0}; - GetModuleFileNameW(NULL, path, MAX_PATH); + wchar_t *path = talloc_array(NULL, wchar_t, MP_PATH_MAX); + GetModuleFileNameW(NULL, path, MP_PATH_MAX); hr = IAudioSessionControl_SetIconPath(state->pSessionControl, path, NULL); + talloc_free(path); if (FAILED(hr)) { // don't goto exit_label here since SetDisplayName might still work MP_WARN(state, "Error setting audio session icon: %s\n", @@ -718,7 +733,7 @@ static char* get_device_name(struct mp_log *l, void *talloc_ctx, IMMDevice *pDev HRESULT hr = IMMDevice_OpenPropertyStore(pDevice, STGM_READ, &pProps); EXIT_ON_ERROR(hr); - hr = IPropertyStore_GetValue(pProps, &mp_PKEY_Device_FriendlyName, + hr = IPropertyStore_GetValue(pProps, &PKEY_Device_FriendlyName, &devname); EXIT_ON_ERROR(hr); diff --git a/audio/out/buffer.c b/audio/out/buffer.c index 5b8b523..97f7ea1 100644 --- a/audio/out/buffer.c +++ b/audio/out/buffer.c @@ -41,7 +41,7 @@ struct buffer_state { mp_mutex lock; mp_cond wakeup; - // Playthread sleep + // AO thread sleep mp_mutex pt_lock; mp_cond pt_wakeup; @@ -62,6 +62,11 @@ struct buffer_state { bool paused; // logically paused int64_t end_time_ns; // absolute output time of last played sample + int64_t queued_time_ns; // duration of samples that have been queued to + // the device but have not been played. + // This field is only set in ao_set_paused(), + // and is considered as a temporary solution; + // DO NOT USE IT IN OTHER PLACES. bool initial_unblocked; @@ -78,9 +83,9 @@ struct buffer_state { bool terminate; // exit thread }; -static MP_THREAD_VOID playthread(void *arg); +static MP_THREAD_VOID ao_thread(void *arg); -void ao_wakeup_playthread(struct ao *ao) +void ao_wakeup(struct ao *ao) { struct buffer_state *p = ao->buffer_state; mp_mutex_lock(&p->pt_lock); @@ -173,8 +178,8 @@ static int read_buffer(struct ao *ao, void **data, int samples, bool *eof, return pos; } -static int ao_read_data_unlocked(struct ao *ao, void **data, int samples, - int64_t out_time_ns, bool pad_silence) +static int ao_read_data_locked(struct ao *ao, void **data, int samples, + int64_t out_time_ns, bool pad_silence) { struct buffer_state *p = ao->buffer_state; assert(!ao->driver->write); @@ -208,7 +213,7 @@ int ao_read_data(struct ao *ao, void **data, int samples, int64_t out_time_ns) mp_mutex_lock(&p->lock); - int pos = ao_read_data_unlocked(ao, data, samples, out_time_ns, true); + int pos = ao_read_data_locked(ao, data, samples, out_time_ns, true); mp_mutex_unlock(&p->lock); @@ -224,7 +229,7 @@ int ao_read_data_nonblocking(struct ao *ao, void **data, int samples, int64_t ou if (mp_mutex_trylock(&p->lock)) return 0; - int pos = ao_read_data_unlocked(ao, data, samples, out_time_ns, false); + int pos = ao_read_data_locked(ao, data, samples, out_time_ns, false); mp_mutex_unlock(&p->lock); @@ -347,7 +352,7 @@ void ao_reset(struct ao *ao) ao->driver->reset(ao); if (wakeup) - ao_wakeup_playthread(ao); + ao_wakeup(ao); } // Initiate playback. This moves from the stop/underrun state to actually @@ -374,14 +379,14 @@ void ao_start(struct ao *ao) if (do_start) ao->driver->start(ao); - ao_wakeup_playthread(ao); + ao_wakeup(ao); } void ao_set_paused(struct ao *ao, bool paused, bool eof) { struct buffer_state *p = ao->buffer_state; bool wakeup = false; - bool do_reset = false, do_start = false; + bool do_change_state = false; // If we are going to pause on eof and ao is still playing, // be sure to drain the ao first for gapless. @@ -402,9 +407,9 @@ void ao_set_paused(struct ao *ao, bool paused, bool eof) p->streaming = false; p->recover_pause = !ao->untimed; } - } else if (ao->driver->reset) { + } else if (ao->driver->reset || ao->driver->set_pause) { // See ao_reset() why this is done outside of the lock. - do_reset = true; + do_change_state = true; p->streaming = false; } } @@ -416,7 +421,7 @@ void ao_set_paused(struct ao *ao, bool paused, bool eof) p->hw_paused = false; } else { if (!p->streaming) - do_start = true; + do_change_state = true; p->streaming = true; } wakeup = true; @@ -425,13 +430,25 @@ void ao_set_paused(struct ao *ao, bool paused, bool eof) mp_mutex_unlock(&p->lock); - if (do_reset) - ao->driver->reset(ao); - if (do_start) - ao->driver->start(ao); + if (do_change_state) { + if (ao->driver->set_pause) { + if (paused) { + ao->driver->set_pause(ao, true); + p->queued_time_ns = p->end_time_ns - mp_time_ns(); + } else { + p->end_time_ns = p->queued_time_ns + mp_time_ns(); + ao->driver->set_pause(ao, false); + } + } else { + if (paused) + ao->driver->reset(ao); + else + ao->driver->start(ao); + } + } if (wakeup) - ao_wakeup_playthread(ao); + ao_wakeup(ao); } // Whether audio is playing. This means that there is still data in the buffers, @@ -486,7 +503,7 @@ void ao_drain(struct ao *ao) static void wakeup_filters(void *ctx) { struct ao *ao = ctx; - ao_wakeup_playthread(ao); + ao_wakeup(ao); } void ao_uninit(struct ao *ao) @@ -561,7 +578,7 @@ bool init_buffer_post(struct ao *ao) mp_filter_graph_set_wakeup_cb(p->filter_root, wakeup_filters, ao); p->thread_valid = true; - if (mp_thread_create(&p->thread, playthread, ao)) { + if (mp_thread_create(&p->thread, ao_thread, ao)) { p->thread_valid = false; return false; } @@ -684,7 +701,7 @@ eof: return true; } -static MP_THREAD_VOID playthread(void *arg) +static MP_THREAD_VOID ao_thread(void *arg) { struct ao *ao = arg; struct buffer_state *p = ao->buffer_state; @@ -731,6 +748,6 @@ void ao_unblock(struct ao *ao) mp_mutex_lock(&p->lock); p->initial_unblocked = true; mp_mutex_unlock(&p->lock); - ao_wakeup_playthread(ao); + ao_wakeup(ao); } } diff --git a/audio/out/internal.h b/audio/out/internal.h index 7951b38..51429b9 100644 --- a/audio/out/internal.h +++ b/audio/out/internal.h @@ -108,6 +108,7 @@ struct mp_pcm_state { * start * Optional for both types: * control + * set_pause * a) ->write is called to queue audio. push.c creates a thread to regularly * refill audio device buffers with ->write, but all driver functions are * always called under an exclusive lock. @@ -115,8 +116,6 @@ struct mp_pcm_state { * reset * write * get_state - * Optional: - * set_pause * b) ->write must be NULL. ->start must be provided, and should make the * audio API start calling the audio callback. Your audio callback should * in turn call ao_read_data() to get audio data. Most functions are @@ -149,6 +148,9 @@ struct ao_driver { // Stop all audio playback, clear buffers, back to state after init(). // Optional for pull AOs. void (*reset)(struct ao *ao); + // pull based: set pause state. Only called after start() and before reset(). + // The return value is ignored. + // The pausing state is also cleared by reset(). // push based: set pause state. Only called after start() and before reset(). // returns success (this is intended for paused=true; if it // returns false, playback continues, and the core emulates via @@ -157,7 +159,7 @@ struct ao_driver { bool (*set_pause)(struct ao *ao, bool paused); // pull based: start the audio callback // push based: start playing queued data - // AO should call ao_wakeup_playthread() if a period boundary + // AO should call ao_wakeup() if a period boundary // is crossed, or playback stops due to external reasons // (including underruns or device removal) // must set mp_pcm_state.playing; unset on error/underrun/end @@ -229,7 +231,7 @@ bool ao_can_convert_inplace(struct ao_convert_fmt *fmt); bool ao_need_conversion(struct ao_convert_fmt *fmt); void ao_convert_inplace(struct ao_convert_fmt *fmt, void **data, int num_samples); -void ao_wakeup_playthread(struct ao *ao); +void ao_wakeup(struct ao *ao); int ao_read_data_converted(struct ao *ao, struct ao_convert_fmt *fmt, void **data, int samples, int64_t out_time_ns); |